Tests: add missing kernel test cases to make check target
[lttng-tools.git] / src / bin / lttng-sessiond / notification-thread.h
1 /*
2 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #ifndef NOTIFICATION_THREAD_H
9 #define NOTIFICATION_THREAD_H
10
11 #include "action-executor.h"
12 #include "thread.h"
13 #include <common/compat/poll.h>
14 #include <common/hashtable/hashtable.h>
15 #include <common/pipe.h>
16 #include <lttng/trigger/trigger.h>
17 #include <lttng/domain.h>
18 #include <pthread.h>
19 #include <semaphore.h>
20 #include <urcu.h>
21 #include <urcu/list.h>
22 #include <urcu/rculfhash.h>
23
24 typedef uint64_t notification_client_id;
25
26 /*
27 * The notification thread holds no ownership of the tracer event source pipe
28 * file descriptor. The tracer management logic must remove the event source
29 * from the notification thread (see external commands) before releasing
30 * this file descriptor.
31 */
32 struct notification_event_tracer_event_source_element {
33 int fd;
34 /*
35 * A tracer event source can be removed from the notification thread's
36 * poll set before the end of its lifetime (for instance, when an error
37 * or hang-up is detected on its file descriptor). This is done to
38 * allow the notification thread to ignore follow-up events on this
39 * file descriptors.
40 *
41 * Under such circumstances, the notification thread still expects
42 * the normal clean-up to occur through the 'REMOVE_TRACER_EVENT_SOURCE'
43 * command.
44 */
45 bool is_fd_in_poll_set;
46 enum lttng_domain_type domain;
47 struct cds_list_head node;
48 };
49
50 struct notification_trigger_tokens_ht_element {
51 uint64_t token;
52 /* Weak reference to the trigger. */
53 struct lttng_trigger *trigger;
54 struct cds_lfht_node node;
55 /* call_rcu delayed reclaim. */
56 struct rcu_head rcu_node;
57 };
58
59 struct notification_thread_handle {
60 /*
61 * Queue of struct notification command.
62 * event_pipe must be WRITE(2) to signal that a new command
63 * has been enqueued.
64 */
65 struct {
66 struct lttng_pipe *event_pipe;
67 struct cds_list_head list;
68 pthread_mutex_t lock;
69 } cmd_queue;
70 /*
71 * Read side of pipes used to receive channel status info collected
72 * by the various consumer daemons.
73 */
74 struct {
75 int ust32_consumer;
76 int ust64_consumer;
77 int kernel_consumer;
78 } channel_monitoring_pipes;
79 /* Used to wait for the launch of the notification thread. */
80 sem_t ready;
81 };
82
83 /**
84 * This thread maintains an internal state associating clients and triggers.
85 *
86 * In order to speed-up and simplify queries, hash tables providing the
87 * following associations are maintained:
88 *
89 * - client_socket_ht: associate a client's socket (fd) to its
90 * "struct notification_client".
91 * This hash table owns the "struct notification_client" which must
92 * thus be disposed-of on removal from the hash table.
93 *
94 * - client_id_ht: associate a client's id to its "struct notification_client"
95 * This hash table holds a _weak_ reference to the
96 * "struct notification_client".
97 *
98 * - channel_triggers_ht:
99 * associates a channel key to a list of
100 * struct lttng_trigger_list_nodes. The triggers in this list are
101 * those that have conditions that apply to a particular channel.
102 * A channel entry is only created when a channel is added; the
103 * list of triggers applying to such a channel is built at that
104 * moment.
105 * This hash table owns the list, but not the triggers themselves.
106 *
107 * - session_triggers_ht:
108 * associates a session name to a list of
109 * struct lttng_trigger_list_nodes. The triggers in this list are
110 * those that have conditions that apply to a particular session.
111 * A session entry is only created when a session is created; the
112 * list of triggers applying to this new session is built at that
113 * moment. This happens at the time of creation of a session_info.
114 * Likewise, the list is destroyed at the time of the session_info's
115 * destruction.
116 *
117 * - channel_state_ht:
118 * associates a pair (channel key, channel domain) to its last
119 * sampled state received from the consumer daemon
120 * (struct channel_state).
121 * This previous sample is kept to implement edge-triggered
122 * conditions as we need to detect the state transitions.
123 * This hash table owns the channel state.
124 *
125 * - notification_trigger_clients_ht:
126 * associates notification-emitting triggers to clients
127 * (struct notification_client_list) subscribed to those
128 * conditions.
129 * The condition's hash and match functions are used directly since
130 * all triggers in this hash table have the "notify" action.
131 * This hash table holds no ownership.
132 *
133 * - channels_ht:
134 * associates a channel_key to a struct channel_info. The hash table
135 * holds the ownership of the struct channel_info.
136 *
137 * - sessions_ht:
138 * associates a session_name (hash) to a struct session_info. The
139 * hash table holds no ownership of the struct session_info;
140 * the session_info structure is owned by the session's various
141 * channels through their struct channel_info (ref-counting is used).
142 *
143 * - triggers_ht:
144 * associates a trigger to a struct lttng_trigger_ht_element.
145 * The hash table holds the ownership of the
146 * lttng_trigger_ht_elements along with the triggers themselves.
147 * - triggers_by_name_uid_ht:
148 * associates a trigger (name, uid) tuple to
149 * a struct lttng_trigger_ht_element.
150 * The hash table does not hold any ownership and is used strictly
151 * for lookup on registration.
152 * - tracer_event_sources_list:
153 * A list of tracer event source (read side fd) of type
154 * struct notification_event_tracer_event_source_element.
155 *
156 *
157 * The thread reacts to the following internal events:
158 * 1) creation of a tracing channel,
159 * 2) destruction of a tracing channel,
160 * 3) registration of a trigger,
161 * 4) unregistration of a trigger,
162 * 5) reception of a channel monitor sample from the consumer daemon,
163 * 6) Session rotation ongoing,
164 * 7) Session rotation completed,
165 * 8) registration of a tracer event source,
166 * 9) unregistration of a tracer event source,
167 *
168 * Events specific to notification-emitting triggers:
169 * 9) connection of a notification client,
170 * 10) disconnection of a notification client,
171 * 11) subscription of a client to a conditions' notifications,
172 * 12) unsubscription of a client from a conditions' notifications,
173 *
174 *
175 * 1) Creation of a tracing channel
176 * - notification_trigger_clients_ht is traversed to identify
177 * triggers which apply to this new channel,
178 * - triggers identified are added to the channel_triggers_ht.
179 * - add channel to channels_ht
180 * - if it is the first channel of a session, a session_info is created and
181 * added to the sessions_ht. A list of the triggers associated with that
182 * session is built, and it is added to session_triggers_ht.
183 *
184 * 2) Destruction of a tracing channel
185 * - remove entry from channel_triggers_ht, releasing the list wrapper and
186 * elements,
187 * - remove entry from the channel_state_ht.
188 * - remove channel from channels_ht
189 * - if it was the last known channel of a session, the session_info
190 * structure is torndown, which in return destroys the list of triggers
191 * applying to that session.
192 *
193 * 3) Registration of a trigger
194 * - if the trigger's action is of type "notify",
195 * - traverse the list of conditions of every client to build a list of
196 * clients which have to be notified when this trigger's condition is met,
197 * - add list of clients (even if it is empty) to the
198 * notification_trigger_clients_ht,
199 * - add trigger to channel_triggers_ht (if applicable),
200 * - add trigger to session_triggers_ht (if applicable),
201 * - add trigger to triggers_by_name_uid_ht
202 * - add trigger to triggers_ht
203 * - evaluate the trigger's condition right away to react if that condition
204 * is true from the beginning.
205 *
206 * 4) Unregistration of a trigger
207 * - if the trigger's action is of type "notify",
208 * - remove the trigger from the notification_trigger_clients_ht,
209 * - remove trigger from channel_triggers_ht (if applicable),
210 * - remove trigger from session_triggers_ht (if applicable),
211 * - remove trigger from triggers_by_name_uid_ht
212 * - remove trigger from triggers_ht
213 *
214 * 5) Reception of a channel monitor sample from the consumer daemon
215 * - evaluate the conditions associated with the triggers found in
216 * the channel_triggers_ht,
217 * - if a condition evaluates to "true" and the condition is of type
218 * "notify", query the notification_trigger_clients_ht and send
219 * a notification to the clients.
220 *
221 * 6) Session rotation ongoing
222 *
223 * 7) Session rotation completed
224 *
225 * 8) Registration of a tracer event source
226 * - Add the tracer event source of the application to
227 * tracer_event_sources_list,
228 * - Add the trace event source to the pollset.
229 *
230 * 8) Unregistration of a tracer event source
231 * - Remove the tracer event source of the application from
232 * tracer_event_sources_list,
233 * - Remove the trace event source from the pollset.
234 *
235 * 10) Connection of a client
236 * - add client socket to the client_socket_ht,
237 * - add client socket to the client_id_ht.
238 *
239 * 11) Disconnection of a client
240 * - remove client socket from the client_id_ht,
241 * - remove client socket from the client_socket_ht,
242 * - traverse all conditions to which the client is subscribed and remove
243 * the client from the notification_trigger_clients_ht.
244 *
245 * 12) Subscription of a client to a condition's notifications
246 * - Add the condition to the client's list of subscribed conditions,
247 * - Look-up notification_trigger_clients_ht and add the client to
248 * list of clients.
249 * - Evaluate the condition for the client that subscribed if the trigger
250 * was already registered.
251 *
252 * 13) Unsubscription of a client to a condition's notifications
253 * - Remove the condition from the client's list of subscribed conditions,
254 * - Look-up notification_trigger_clients_ht and remove the client
255 * from the list of clients.
256 */
257 struct notification_thread_state {
258 int notification_channel_socket;
259 struct lttng_poll_event events;
260 struct cds_lfht *client_socket_ht;
261 struct cds_lfht *client_id_ht;
262 struct cds_lfht *channel_triggers_ht;
263 struct cds_lfht *session_triggers_ht;
264 struct cds_lfht *channel_state_ht;
265 struct cds_lfht *notification_trigger_clients_ht;
266 struct cds_lfht *channels_ht;
267 struct cds_lfht *sessions_ht;
268 struct cds_lfht *triggers_ht;
269 struct cds_lfht *triggers_by_name_uid_ht;
270 struct cds_lfht *trigger_tokens_ht;
271 struct {
272 uint64_t next_tracer_token;
273 uint64_t name_offset;
274 } trigger_id;
275 /*
276 * Read side of the pipes used to receive tracer events. As their name
277 * implies, tracer event source activity originate from either
278 * registered applications (user space tracer) or from the kernel
279 * tracer.
280 *
281 * The list is not protected by a lock since add and remove operations
282 * are currently done only by the notification thread through in
283 * response to blocking commands.
284 */
285 struct cds_list_head tracer_event_sources_list;
286 notification_client_id next_notification_client_id;
287 struct action_executor *executor;
288
289 /*
290 * Indicates the thread to break for the poll event processing loop and
291 * call _poll_wait() again.
292 *
293 * This is necessary because some events on one fd might trigger the
294 * consumption of another fd.
295 * For example, a single _poll_wait() call can return notification
296 * thread commands and events from the tracer event source (event
297 * notifier).
298 * Picture a scenario where we receive two events:
299 * the first one is a _REMOVE_TRACER_EVENT_SOURCE command, and
300 * the second is an POLLIN on the tracer event source fd.
301 *
302 * The _REMOVE_TRACER_EVENT_SOURCE will read all the data of the
303 * removed tracer event source.
304 *
305 * The second event is now invalid has we consumed all the data for
306 * which we received the POLLIN.
307 *
308 * For this reason, we need to break for the event processing loop and
309 * call _poll_wait() again to get a clean view of the activity on the
310 * fds.
311 */
312 bool restart_poll;
313 };
314
315 /* notification_thread_data takes ownership of the channel monitor pipes. */
316 struct notification_thread_handle *notification_thread_handle_create(
317 struct lttng_pipe *ust32_channel_monitor_pipe,
318 struct lttng_pipe *ust64_channel_monitor_pipe,
319 struct lttng_pipe *kernel_channel_monitor_pipe);
320 void notification_thread_handle_destroy(
321 struct notification_thread_handle *handle);
322 struct lttng_thread *launch_notification_thread(
323 struct notification_thread_handle *handle);
324
325 #endif /* NOTIFICATION_THREAD_H */
This page took 0.036479 seconds and 4 git commands to generate.