Introduce lttng_guid_gen wrapper for kernels >= 5.7.0
[lttng-modules.git] / lttng-probes.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-probes.c
4 *
5 * Holds LTTng probes registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/seq_file.h>
14
15 #include <lttng-events.h>
16
17 /*
18 * probe list is protected by sessions lock.
19 */
20 static LIST_HEAD(_probe_list);
21
22 /*
23 * List of probes registered by not yet processed.
24 */
25 static LIST_HEAD(lazy_probe_init);
26
27 /*
28 * lazy_nesting counter ensures we don't trigger lazy probe registration
29 * fixup while we are performing the fixup. It is protected by the
30 * sessions lock.
31 */
32 static int lazy_nesting;
33
34 DEFINE_PER_CPU(struct lttng_dynamic_len_stack, lttng_dynamic_len_stack);
35
36 EXPORT_PER_CPU_SYMBOL_GPL(lttng_dynamic_len_stack);
37
38 /*
39 * Called under sessions lock.
40 */
41 static
42 int check_event_provider(struct lttng_probe_desc *desc)
43 {
44 int i;
45 size_t provider_name_len;
46
47 provider_name_len = strnlen(desc->provider,
48 LTTNG_KERNEL_SYM_NAME_LEN - 1);
49 for (i = 0; i < desc->nr_events; i++) {
50 if (strncmp(desc->event_desc[i]->name,
51 desc->provider,
52 provider_name_len))
53 return 0; /* provider mismatch */
54 /*
55 * The event needs to contain at least provider name + _ +
56 * one or more letter.
57 */
58 if (strlen(desc->event_desc[i]->name) <= provider_name_len + 1)
59 return 0; /* provider mismatch */
60 if (desc->event_desc[i]->name[provider_name_len] != '_')
61 return 0; /* provider mismatch */
62 }
63 return 1;
64 }
65
66 /*
67 * Called under sessions lock.
68 */
69 static
70 void lttng_lazy_probe_register(struct lttng_probe_desc *desc)
71 {
72 struct lttng_probe_desc *iter;
73 struct list_head *probe_list;
74
75 /*
76 * Each provider enforce that every event name begins with the
77 * provider name. Check this in an assertion for extra
78 * carefulness. This ensures we cannot have duplicate event
79 * names across providers.
80 */
81 WARN_ON_ONCE(!check_event_provider(desc));
82
83 /*
84 * The provider ensures there are no duplicate event names.
85 * Duplicated TRACEPOINT_EVENT event names would generate a
86 * compile-time error due to duplicated symbol names.
87 */
88
89 /*
90 * We sort the providers by struct lttng_probe_desc pointer
91 * address.
92 */
93 probe_list = &_probe_list;
94 list_for_each_entry_reverse(iter, probe_list, head) {
95 BUG_ON(iter == desc); /* Should never be in the list twice */
96 if (iter < desc) {
97 /* We belong to the location right after iter. */
98 list_add(&desc->head, &iter->head);
99 goto desc_added;
100 }
101 }
102 /* We should be added at the head of the list */
103 list_add(&desc->head, probe_list);
104 desc_added:
105 pr_debug("LTTng: just registered probe %s containing %u events\n",
106 desc->provider, desc->nr_events);
107 }
108
109 /*
110 * Called under sessions lock.
111 */
112 static
113 void fixup_lazy_probes(void)
114 {
115 struct lttng_probe_desc *iter, *tmp;
116 int ret;
117
118 lazy_nesting++;
119 list_for_each_entry_safe(iter, tmp,
120 &lazy_probe_init, lazy_init_head) {
121 lttng_lazy_probe_register(iter);
122 iter->lazy = 0;
123 list_del(&iter->lazy_init_head);
124 }
125 ret = lttng_fix_pending_events();
126 WARN_ON_ONCE(ret);
127 lazy_nesting--;
128 }
129
130 /*
131 * Called under sessions lock.
132 */
133 struct list_head *lttng_get_probe_list_head(void)
134 {
135 if (!lazy_nesting && !list_empty(&lazy_probe_init))
136 fixup_lazy_probes();
137 return &_probe_list;
138 }
139
140 static
141 const struct lttng_probe_desc *find_provider(const char *provider)
142 {
143 struct lttng_probe_desc *iter;
144 struct list_head *probe_list;
145
146 probe_list = lttng_get_probe_list_head();
147 list_for_each_entry(iter, probe_list, head) {
148 if (!strcmp(iter->provider, provider))
149 return iter;
150 }
151 return NULL;
152 }
153
154 int lttng_probe_register(struct lttng_probe_desc *desc)
155 {
156 int ret = 0;
157
158 lttng_lock_sessions();
159
160 /*
161 * Check if the provider has already been registered.
162 */
163 if (find_provider(desc->provider)) {
164 ret = -EEXIST;
165 goto end;
166 }
167 list_add(&desc->lazy_init_head, &lazy_probe_init);
168 desc->lazy = 1;
169 pr_debug("LTTng: adding probe %s containing %u events to lazy registration list\n",
170 desc->provider, desc->nr_events);
171 /*
172 * If there is at least one active session, we need to register
173 * the probe immediately, since we cannot delay event
174 * registration because they are needed ASAP.
175 */
176 if (lttng_session_active())
177 fixup_lazy_probes();
178 end:
179 lttng_unlock_sessions();
180 return ret;
181 }
182 EXPORT_SYMBOL_GPL(lttng_probe_register);
183
184 void lttng_probe_unregister(struct lttng_probe_desc *desc)
185 {
186 lttng_lock_sessions();
187 if (!desc->lazy)
188 list_del(&desc->head);
189 else
190 list_del(&desc->lazy_init_head);
191 pr_debug("LTTng: just unregistered probe %s\n", desc->provider);
192 lttng_unlock_sessions();
193 }
194 EXPORT_SYMBOL_GPL(lttng_probe_unregister);
195
196 /*
197 * TODO: this is O(nr_probes * nb_events), could be faster.
198 * Called with sessions lock held.
199 */
200 static
201 const struct lttng_event_desc *find_event(const char *name)
202 {
203 struct lttng_probe_desc *probe_desc;
204 int i;
205
206 list_for_each_entry(probe_desc, &_probe_list, head) {
207 for (i = 0; i < probe_desc->nr_events; i++) {
208 if (!strcmp(probe_desc->event_desc[i]->name, name))
209 return probe_desc->event_desc[i];
210 }
211 }
212 return NULL;
213 }
214
215 /*
216 * Called with sessions lock held.
217 */
218 const struct lttng_event_desc *lttng_event_get(const char *name)
219 {
220 const struct lttng_event_desc *event;
221 int ret;
222
223 event = find_event(name);
224 if (!event)
225 return NULL;
226 ret = try_module_get(event->owner);
227 WARN_ON_ONCE(!ret);
228 return event;
229 }
230 EXPORT_SYMBOL_GPL(lttng_event_get);
231
232 /*
233 * Called with sessions lock held.
234 */
235 void lttng_event_put(const struct lttng_event_desc *event)
236 {
237 module_put(event->owner);
238 }
239 EXPORT_SYMBOL_GPL(lttng_event_put);
240
241 static
242 void *tp_list_start(struct seq_file *m, loff_t *pos)
243 {
244 struct lttng_probe_desc *probe_desc;
245 struct list_head *probe_list;
246 int iter = 0, i;
247
248 lttng_lock_sessions();
249 probe_list = lttng_get_probe_list_head();
250 list_for_each_entry(probe_desc, probe_list, head) {
251 for (i = 0; i < probe_desc->nr_events; i++) {
252 if (iter++ >= *pos)
253 return (void *) probe_desc->event_desc[i];
254 }
255 }
256 /* End of list */
257 return NULL;
258 }
259
260 static
261 void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
262 {
263 struct lttng_probe_desc *probe_desc;
264 struct list_head *probe_list;
265 int iter = 0, i;
266
267 (*ppos)++;
268 probe_list = lttng_get_probe_list_head();
269 list_for_each_entry(probe_desc, probe_list, head) {
270 for (i = 0; i < probe_desc->nr_events; i++) {
271 if (iter++ >= *ppos)
272 return (void *) probe_desc->event_desc[i];
273 }
274 }
275 /* End of list */
276 return NULL;
277 }
278
279 static
280 void tp_list_stop(struct seq_file *m, void *p)
281 {
282 lttng_unlock_sessions();
283 }
284
285 static
286 int tp_list_show(struct seq_file *m, void *p)
287 {
288 const struct lttng_event_desc *probe_desc = p;
289
290 seq_printf(m, "event { name = %s; };\n",
291 probe_desc->name);
292 return 0;
293 }
294
295 static
296 const struct seq_operations lttng_tracepoint_list_seq_ops = {
297 .start = tp_list_start,
298 .next = tp_list_next,
299 .stop = tp_list_stop,
300 .show = tp_list_show,
301 };
302
303 static
304 int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
305 {
306 return seq_open(file, &lttng_tracepoint_list_seq_ops);
307 }
308
309 const struct file_operations lttng_tracepoint_list_fops = {
310 .owner = THIS_MODULE,
311 .open = lttng_tracepoint_list_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = seq_release,
315 };
316
317 int lttng_probes_init(void)
318 {
319 int cpu;
320
321 for_each_possible_cpu(cpu)
322 per_cpu_ptr(&lttng_dynamic_len_stack, cpu)->offset = 0;
323 return 0;
324 }
This page took 0.036434 seconds and 4 git commands to generate.