From: Francis Deslauriers Date: Mon, 29 May 2017 19:32:04 +0000 (-0400) Subject: Prevent re-entrancy in callstack-user context X-Git-Tag: v2.11.0-rc1~24 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=a2b0231a9ae996cc69e48d75c338ad7d081a0f85;p=lttng-modules.git Prevent re-entrancy in callstack-user context Userspace callstack context often triggers kernel pagefaults that can be traced by the kernel tracer which might then attempt to gather the userspace callstack again... This recursion will be stop by the RING_BUFFER_MAX_NESTING check but will still pollute the traces with redundant information. To prevent this, check if the tracer is already gathering the userspace callstack and if it's the case don't record it again. Signed-off-by: Francis Deslauriers Signed-off-by: Mathieu Desnoyers --- diff --git a/lttng-context-callstack.c b/lttng-context-callstack.c index fd730bbc..2fa5f52d 100644 --- a/lttng-context-callstack.c +++ b/lttng-context-callstack.c @@ -120,14 +120,26 @@ int init_type(enum lttng_cs_ctx_modes mode) return 0; } +/* Keep track of nesting inside userspace callstack context code */ +DEFINE_PER_CPU(int, callstack_user_nesting); + static struct stack_trace *stack_trace_context(struct lttng_ctx_field *field, struct lib_ring_buffer_ctx *ctx) { - int nesting; + int buffer_nesting, cs_user_nesting; struct lttng_cs *cs; struct field_data *fdata = field->priv; + /* + * Do not gather the userspace callstack context when the event was + * triggered by the userspace callstack context saving mechanism. + */ + cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); + + if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) + return NULL; + /* * get_cpu() is not required, preemption is already * disabled while event is written. @@ -136,11 +148,11 @@ struct stack_trace *stack_trace_context(struct lttng_ctx_field *field, * Check it again as a safety net. */ cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); - nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; - if (nesting >= RING_BUFFER_MAX_NESTING) { + buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; + if (buffer_nesting >= RING_BUFFER_MAX_NESTING) return NULL; - } - return &cs->dispatch[nesting].stack_trace; + + return &cs->dispatch[buffer_nesting].stack_trace; } /* @@ -168,8 +180,15 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field, /* reset stack trace, no need to clear memory */ trace->nr_entries = 0; + if (fdata->mode == CALLSTACK_USER) + ++per_cpu(callstack_user_nesting, ctx->cpu); + /* do the real work and reserve space */ cs_types[fdata->mode].save_func(trace); + + if (fdata->mode == CALLSTACK_USER) + per_cpu(callstack_user_nesting, ctx->cpu)--; + /* * Remove final ULONG_MAX delimiter. If we cannot find it, add * our own marker to show that the stack is incomplete. This is