},
};
+static
+const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
+{
+ return cs_types[mode].name;
+}
+
static
int init_type(enum lttng_cs_ctx_modes mode)
{
return 0;
}
+static
+void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
+{
+ int cpu, i;
+
+ for_each_possible_cpu(cpu) {
+ struct lttng_cs *cs;
+
+ cs = per_cpu_ptr(cs_set, cpu);
+ for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
+ struct lttng_cs_dispatch *dispatch;
+
+ dispatch = &cs->dispatch[i];
+ dispatch->stack_trace.entries = dispatch->entries;
+ dispatch->stack_trace.max_entries = MAX_ENTRIES;
+ }
+ }
+}
+
/* Keep track of nesting inside userspace callstack context code */
DEFINE_PER_CPU(int, callstack_user_nesting);
static
struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
{
- int cpu, i;
struct lttng_cs __percpu *cs_set;
struct field_data *fdata;
cs_set = alloc_percpu(struct lttng_cs);
if (!cs_set)
goto error_alloc;
-
+ lttng_cs_set_init(cs_set);
fdata->cs_percpu = cs_set;
- for_each_possible_cpu(cpu) {
- struct lttng_cs *cs;
-
- cs = per_cpu_ptr(cs_set, cpu);
- for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
- struct lttng_cs_dispatch *dispatch;
-
- dispatch = &cs->dispatch[i];
- dispatch->stack_trace.entries = dispatch->entries;
- dispatch->stack_trace.max_entries = MAX_ENTRIES;
- }
- }
fdata->mode = mode;
return fdata;
int __lttng_add_callstack_generic(struct lttng_ctx **ctx,
enum lttng_cs_ctx_modes mode)
{
- const char *ctx_name = cs_types[mode].name;
+ const char *ctx_name = lttng_cs_ctx_mode_name(mode);
struct lttng_ctx_field *field;
struct field_data *fdata;
int ret;