Commit | Line | Data |
---|---|---|
1ce86c9a JD |
1 | /* |
2 | * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca> | |
3 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
82a3637f DG |
7 | * as published by the Free Software Foundation; only version 2 |
8 | * of the License. | |
1ce86c9a JD |
9 | * |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | */ | |
19 | ||
20 | #define _GNU_SOURCE | |
21 | #include <fcntl.h> | |
22 | #include <poll.h> | |
23 | #include <pthread.h> | |
24 | #include <stdlib.h> | |
25 | #include <string.h> | |
26 | #include <sys/mman.h> | |
27 | #include <sys/socket.h> | |
28 | #include <sys/types.h> | |
29 | #include <unistd.h> | |
30 | #include <urcu/list.h> | |
31 | ||
32 | #include "libkernelctl.h" | |
33 | #include "liblttkconsumerd.h" | |
34 | #include "lttngerr.h" | |
35 | ||
242cd187 MD |
36 | static |
37 | struct kconsumerd_global_data { | |
38 | /* | |
39 | * kconsumerd_data.lock protects kconsumerd_data.fd_list, | |
40 | * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It | |
41 | * ensures the count matches the number of items in the fd_list. | |
42 | * It ensures the list updates *always* trigger an fd_array | |
43 | * update (therefore need to make list update vs | |
44 | * kconsumerd_data.need_update flag update atomic, and also flag | |
45 | * read, fd array and flag clear atomic). | |
46 | */ | |
47 | pthread_mutex_t lock; | |
48 | /* | |
49 | * Number of element for the list below. Protected by | |
50 | * kconsumerd_data.lock. | |
51 | */ | |
52 | unsigned int fds_count; | |
53 | /* | |
54 | * List of FDs. Protected by kconsumerd_data.lock. | |
55 | */ | |
56 | struct kconsumerd_fd_list fd_list; | |
57 | /* | |
58 | * Flag specifying if the local array of FDs needs update in the | |
59 | * poll function. Protected by kconsumerd_data.lock. | |
60 | */ | |
61 | unsigned int need_update; | |
62 | } kconsumerd_data = { | |
63 | .fd_list.head = CDS_LIST_HEAD_INIT(kconsumerd_data.fd_list.head), | |
1ce86c9a JD |
64 | }; |
65 | ||
1ce86c9a JD |
66 | /* communication with splice */ |
67 | static int kconsumerd_thread_pipe[2]; | |
68 | ||
69 | /* pipe to wake the poll thread when necessary */ | |
70 | static int kconsumerd_poll_pipe[2]; | |
71 | ||
4de84ad9 JD |
72 | /* to let the signal handler wake up the fd receiver thread */ |
73 | static int kconsumerd_should_quit[2]; | |
fec07047 | 74 | |
1ce86c9a JD |
75 | /* timeout parameter, to control the polling thread grace period */ |
76 | static int kconsumerd_poll_timeout = -1; | |
77 | ||
78 | /* socket to communicate errors with sessiond */ | |
79 | static int kconsumerd_error_socket; | |
80 | ||
81 | /* socket to exchange commands with sessiond */ | |
82 | static char *kconsumerd_command_sock_path; | |
83 | ||
3dcd2721 MD |
84 | /* |
85 | * flag to inform the polling thread to quit when all fd hung up. | |
86 | * Updated by the kconsumerd_thread_receive_fds when it notices that all | |
87 | * fds has hung up. Also updated by the signal handler | |
88 | * (kconsumerd_should_exit()). Read by the polling threads. | |
89 | */ | |
90 | static volatile int kconsumerd_quit = 0; | |
1ce86c9a JD |
91 | |
92 | /* | |
93 | * kconsumerd_set_error_socket | |
94 | * | |
95 | * Set the error socket | |
96 | */ | |
97 | void kconsumerd_set_error_socket(int sock) | |
98 | { | |
99 | kconsumerd_error_socket = sock; | |
100 | } | |
101 | ||
102 | /* | |
103 | * kconsumerd_set_command_socket_path | |
104 | * | |
105 | * Set the command socket path | |
106 | */ | |
107 | void kconsumerd_set_command_socket_path(char *sock) | |
108 | { | |
109 | kconsumerd_command_sock_path = sock; | |
110 | } | |
111 | ||
38079a1b DG |
112 | /* |
113 | * kconsumerd_find_session_fd | |
114 | * | |
115 | * Find a session fd in the global list. | |
fec07047 | 116 | * The kconsumerd_data.lock must be locked during this call |
38079a1b DG |
117 | * |
118 | * Return 1 if found else 0 | |
119 | */ | |
120 | static int kconsumerd_find_session_fd(int fd) | |
121 | { | |
122 | struct kconsumerd_fd *iter; | |
123 | ||
242cd187 | 124 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { |
38079a1b DG |
125 | if (iter->sessiond_fd == fd) { |
126 | DBG("Duplicate session fd %d", fd); | |
242cd187 | 127 | pthread_mutex_unlock(&kconsumerd_data.lock); |
38079a1b DG |
128 | return 1; |
129 | } | |
130 | } | |
38079a1b DG |
131 | |
132 | return 0; | |
133 | } | |
134 | ||
1ce86c9a JD |
135 | /* |
136 | * kconsumerd_del_fd | |
137 | * | |
138 | * Remove a fd from the global list protected by a mutex | |
139 | */ | |
140 | static void kconsumerd_del_fd(struct kconsumerd_fd *lcf) | |
141 | { | |
242cd187 | 142 | pthread_mutex_lock(&kconsumerd_data.lock); |
1ce86c9a | 143 | cds_list_del(&lcf->list); |
242cd187 MD |
144 | if (kconsumerd_data.fds_count > 0) { |
145 | kconsumerd_data.fds_count--; | |
1ce86c9a JD |
146 | if (lcf != NULL) { |
147 | close(lcf->out_fd); | |
148 | close(lcf->consumerd_fd); | |
149 | free(lcf); | |
150 | lcf = NULL; | |
151 | } | |
152 | } | |
242cd187 MD |
153 | kconsumerd_data.need_update = 1; |
154 | pthread_mutex_unlock(&kconsumerd_data.lock); | |
1ce86c9a JD |
155 | } |
156 | ||
157 | /* | |
158 | * kconsumerd_add_fd | |
159 | * | |
160 | * Add a fd to the global list protected by a mutex | |
161 | */ | |
162 | static int kconsumerd_add_fd(struct lttcomm_kconsumerd_msg *buf, int consumerd_fd) | |
163 | { | |
1ce86c9a | 164 | int ret; |
38079a1b DG |
165 | struct kconsumerd_fd *tmp_fd; |
166 | ||
242cd187 | 167 | pthread_mutex_lock(&kconsumerd_data.lock); |
38079a1b DG |
168 | /* Check if already exist */ |
169 | ret = kconsumerd_find_session_fd(buf->fd); | |
170 | if (ret == 1) { | |
171 | goto end; | |
172 | } | |
1ce86c9a JD |
173 | |
174 | tmp_fd = malloc(sizeof(struct kconsumerd_fd)); | |
175 | tmp_fd->sessiond_fd = buf->fd; | |
176 | tmp_fd->consumerd_fd = consumerd_fd; | |
177 | tmp_fd->state = buf->state; | |
178 | tmp_fd->max_sb_size = buf->max_sb_size; | |
179 | strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX); | |
180 | ||
181 | /* Opening the tracefile in write mode */ | |
182 | ret = open(tmp_fd->path_name, | |
183 | O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU|S_IRWXG|S_IRWXO); | |
184 | if (ret < 0) { | |
185 | ERR("Opening %s", tmp_fd->path_name); | |
186 | perror("open"); | |
187 | goto end; | |
188 | } | |
189 | tmp_fd->out_fd = ret; | |
190 | tmp_fd->out_fd_offset = 0; | |
191 | ||
192 | DBG("Adding %s (%d, %d, %d)", tmp_fd->path_name, | |
193 | tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd); | |
194 | ||
242cd187 MD |
195 | cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head); |
196 | kconsumerd_data.fds_count++; | |
197 | kconsumerd_data.need_update = 1; | |
1ce86c9a | 198 | end: |
242cd187 | 199 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
200 | return ret; |
201 | } | |
202 | ||
203 | /* | |
204 | * kconsumerd_change_fd_state | |
205 | * | |
206 | * Update a fd according to what we just received | |
207 | */ | |
208 | static void kconsumerd_change_fd_state(int sessiond_fd, | |
209 | enum kconsumerd_fd_state state) | |
210 | { | |
211 | struct kconsumerd_fd *iter; | |
0237248c | 212 | |
242cd187 MD |
213 | pthread_mutex_lock(&kconsumerd_data.lock); |
214 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { | |
1ce86c9a JD |
215 | if (iter->sessiond_fd == sessiond_fd) { |
216 | iter->state = state; | |
217 | break; | |
218 | } | |
219 | } | |
242cd187 MD |
220 | kconsumerd_data.need_update = 1; |
221 | pthread_mutex_unlock(&kconsumerd_data.lock); | |
1ce86c9a JD |
222 | } |
223 | ||
224 | /* | |
225 | * kconsumerd_update_poll_array | |
226 | * | |
227 | * Allocate the pollfd structure and the local view of the out fds | |
228 | * to avoid doing a lookup in the linked list and concurrency issues | |
229 | * when writing is needed. | |
230 | * Returns the number of fds in the structures | |
242cd187 | 231 | * Called with kconsumerd_data.lock held. |
1ce86c9a JD |
232 | */ |
233 | static int kconsumerd_update_poll_array(struct pollfd **pollfd, | |
234 | struct kconsumerd_fd **local_kconsumerd_fd) | |
235 | { | |
236 | struct kconsumerd_fd *iter; | |
237 | int i = 0; | |
238 | ||
239 | DBG("Updating poll fd array"); | |
1ce86c9a | 240 | |
242cd187 | 241 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { |
1ce86c9a JD |
242 | DBG("Inside for each"); |
243 | if (iter->state == ACTIVE_FD) { | |
244 | DBG("Active FD %d", iter->consumerd_fd); | |
245 | (*pollfd)[i].fd = iter->consumerd_fd; | |
246 | (*pollfd)[i].events = POLLIN | POLLPRI; | |
247 | local_kconsumerd_fd[i] = iter; | |
248 | i++; | |
249 | } | |
250 | } | |
251 | ||
252 | /* | |
253 | * insert the kconsumerd_poll_pipe at the end of the array and don't | |
254 | * increment i so nb_fd is the number of real FD | |
255 | */ | |
256 | (*pollfd)[i].fd = kconsumerd_poll_pipe[0]; | |
257 | (*pollfd)[i].events = POLLIN; | |
1ce86c9a JD |
258 | return i; |
259 | } | |
260 | ||
261 | ||
262 | /* | |
263 | * kconsumerd_on_read_subbuffer_mmap | |
264 | * | |
265 | * mmap the ring buffer, read it and write the data to the tracefile. | |
266 | * Returns the number of bytes written | |
267 | */ | |
268 | static int kconsumerd_on_read_subbuffer_mmap( | |
269 | struct kconsumerd_fd *kconsumerd_fd, unsigned long len) | |
270 | { | |
271 | unsigned long mmap_len, mmap_offset, padded_len, padding_len; | |
272 | char *mmap_base; | |
273 | char *padding = NULL; | |
274 | long ret = 0; | |
275 | off_t orig_offset = kconsumerd_fd->out_fd_offset; | |
276 | int fd = kconsumerd_fd->consumerd_fd; | |
277 | int outfd = kconsumerd_fd->out_fd; | |
278 | ||
279 | /* get the padded subbuffer size to know the padding required */ | |
280 | ret = kernctl_get_padded_subbuf_size(fd, &padded_len); | |
281 | if (ret != 0) { | |
282 | ret = errno; | |
283 | perror("kernctl_get_padded_subbuf_size"); | |
284 | goto end; | |
285 | } | |
286 | padding_len = padded_len - len; | |
287 | padding = malloc(padding_len * sizeof(char)); | |
288 | memset(padding, '\0', padding_len); | |
289 | ||
290 | /* get the len of the mmap region */ | |
291 | ret = kernctl_get_mmap_len(fd, &mmap_len); | |
292 | if (ret != 0) { | |
293 | ret = errno; | |
294 | perror("kernctl_get_mmap_len"); | |
295 | goto end; | |
296 | } | |
297 | ||
298 | /* get the offset inside the fd to mmap */ | |
299 | ret = kernctl_get_mmap_read_offset(fd, &mmap_offset); | |
300 | if (ret != 0) { | |
301 | ret = errno; | |
302 | perror("kernctl_get_mmap_read_offset"); | |
303 | goto end; | |
304 | } | |
305 | ||
306 | mmap_base = mmap(NULL, mmap_len, PROT_READ, MAP_PRIVATE, fd, mmap_offset); | |
307 | if (mmap_base == MAP_FAILED) { | |
308 | perror("Error mmaping"); | |
309 | ret = -1; | |
310 | goto end; | |
311 | } | |
312 | ||
313 | while (len > 0) { | |
314 | ret = write(outfd, mmap_base, len); | |
315 | if (ret >= len) { | |
316 | len = 0; | |
317 | } else if (ret < 0) { | |
318 | ret = errno; | |
319 | perror("Error in file write"); | |
320 | goto end; | |
321 | } | |
322 | /* This won't block, but will start writeout asynchronously */ | |
323 | sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret, | |
324 | SYNC_FILE_RANGE_WRITE); | |
325 | kconsumerd_fd->out_fd_offset += ret; | |
326 | } | |
327 | ||
328 | /* once all the data is written, write the padding to disk */ | |
329 | ret = write(outfd, padding, padding_len); | |
330 | if (ret < 0) { | |
331 | ret = errno; | |
332 | perror("Error writing padding to file"); | |
333 | goto end; | |
334 | } | |
335 | ||
336 | /* | |
337 | * This does a blocking write-and-wait on any page that belongs to the | |
338 | * subbuffer prior to the one we just wrote. | |
339 | * Don't care about error values, as these are just hints and ways to | |
340 | * limit the amount of page cache used. | |
341 | */ | |
342 | if (orig_offset >= kconsumerd_fd->max_sb_size) { | |
343 | sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
344 | kconsumerd_fd->max_sb_size, | |
345 | SYNC_FILE_RANGE_WAIT_BEFORE | |
346 | | SYNC_FILE_RANGE_WRITE | |
347 | | SYNC_FILE_RANGE_WAIT_AFTER); | |
348 | ||
349 | /* | |
350 | * Give hints to the kernel about how we access the file: | |
351 | * POSIX_FADV_DONTNEED : we won't re-access data in a near future after | |
352 | * we write it. | |
353 | * | |
354 | * We need to call fadvise again after the file grows because the | |
355 | * kernel does not seem to apply fadvise to non-existing parts of the | |
356 | * file. | |
357 | * | |
358 | * Call fadvise _after_ having waited for the page writeback to | |
359 | * complete because the dirty page writeback semantic is not well | |
360 | * defined. So it can be expected to lead to lower throughput in | |
361 | * streaming. | |
362 | */ | |
363 | posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
364 | kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED); | |
365 | } | |
366 | goto end; | |
367 | ||
368 | end: | |
369 | if (padding != NULL) { | |
370 | free(padding); | |
371 | } | |
372 | return ret; | |
373 | } | |
374 | ||
375 | /* | |
376 | * kconsumerd_on_read_subbuffer | |
377 | * | |
378 | * Splice the data from the ring buffer to the tracefile. | |
379 | * Returns the number of bytes spliced | |
380 | */ | |
381 | static int kconsumerd_on_read_subbuffer( | |
382 | struct kconsumerd_fd *kconsumerd_fd, unsigned long len) | |
383 | { | |
384 | long ret = 0; | |
385 | loff_t offset = 0; | |
386 | off_t orig_offset = kconsumerd_fd->out_fd_offset; | |
387 | int fd = kconsumerd_fd->consumerd_fd; | |
388 | int outfd = kconsumerd_fd->out_fd; | |
389 | ||
390 | while (len > 0) { | |
391 | DBG("splice chan to pipe offset %lu (fd : %d)", | |
392 | (unsigned long)offset, fd); | |
393 | ret = splice(fd, &offset, kconsumerd_thread_pipe[1], NULL, len, | |
394 | SPLICE_F_MOVE | SPLICE_F_MORE); | |
395 | DBG("splice chan to pipe ret %ld", ret); | |
396 | if (ret < 0) { | |
397 | ret = errno; | |
398 | perror("Error in relay splice"); | |
399 | goto splice_error; | |
400 | } | |
401 | ||
402 | ret = splice(kconsumerd_thread_pipe[0], NULL, outfd, NULL, ret, | |
403 | SPLICE_F_MOVE | SPLICE_F_MORE); | |
404 | DBG("splice pipe to file %ld", ret); | |
405 | if (ret < 0) { | |
406 | ret = errno; | |
407 | perror("Error in file splice"); | |
408 | goto splice_error; | |
409 | } | |
410 | if (ret >= len) { | |
411 | len = 0; | |
412 | } | |
413 | /* This won't block, but will start writeout asynchronously */ | |
414 | sync_file_range(outfd, kconsumerd_fd->out_fd_offset, ret, | |
415 | SYNC_FILE_RANGE_WRITE); | |
416 | kconsumerd_fd->out_fd_offset += ret; | |
417 | } | |
418 | ||
419 | /* | |
420 | * This does a blocking write-and-wait on any page that belongs to the | |
421 | * subbuffer prior to the one we just wrote. | |
422 | * Don't care about error values, as these are just hints and ways to | |
423 | * limit the amount of page cache used. | |
424 | */ | |
425 | if (orig_offset >= kconsumerd_fd->max_sb_size) { | |
426 | sync_file_range(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
427 | kconsumerd_fd->max_sb_size, | |
428 | SYNC_FILE_RANGE_WAIT_BEFORE | |
429 | | SYNC_FILE_RANGE_WRITE | |
430 | | SYNC_FILE_RANGE_WAIT_AFTER); | |
431 | /* | |
432 | * Give hints to the kernel about how we access the file: | |
433 | * POSIX_FADV_DONTNEED : we won't re-access data in a near future after | |
434 | * we write it. | |
435 | * | |
436 | * We need to call fadvise again after the file grows because the | |
437 | * kernel does not seem to apply fadvise to non-existing parts of the | |
438 | * file. | |
439 | * | |
440 | * Call fadvise _after_ having waited for the page writeback to | |
441 | * complete because the dirty page writeback semantic is not well | |
442 | * defined. So it can be expected to lead to lower throughput in | |
443 | * streaming. | |
444 | */ | |
445 | posix_fadvise(outfd, orig_offset - kconsumerd_fd->max_sb_size, | |
446 | kconsumerd_fd->max_sb_size, POSIX_FADV_DONTNEED); | |
447 | } | |
448 | goto end; | |
449 | ||
450 | splice_error: | |
451 | /* send the appropriate error description to sessiond */ | |
452 | switch(ret) { | |
453 | case EBADF: | |
454 | kconsumerd_send_error(KCONSUMERD_SPLICE_EBADF); | |
455 | break; | |
456 | case EINVAL: | |
457 | kconsumerd_send_error(KCONSUMERD_SPLICE_EINVAL); | |
458 | break; | |
459 | case ENOMEM: | |
460 | kconsumerd_send_error(KCONSUMERD_SPLICE_ENOMEM); | |
461 | break; | |
462 | case ESPIPE: | |
463 | kconsumerd_send_error(KCONSUMERD_SPLICE_ESPIPE); | |
464 | break; | |
465 | } | |
466 | ||
467 | end: | |
468 | return ret; | |
469 | } | |
470 | ||
471 | /* | |
472 | * kconsumerd_read_subbuffer | |
473 | * | |
474 | * Consume data on a file descriptor and write it on a trace file | |
475 | */ | |
476 | static int kconsumerd_read_subbuffer(struct kconsumerd_fd *kconsumerd_fd) | |
477 | { | |
478 | unsigned long len; | |
479 | int err; | |
480 | long ret = 0; | |
481 | int infd = kconsumerd_fd->consumerd_fd; | |
482 | ||
483 | DBG("In kconsumerd_read_subbuffer (infd : %d)", infd); | |
484 | /* Get the next subbuffer */ | |
485 | err = kernctl_get_next_subbuf(infd); | |
486 | if (err != 0) { | |
487 | ret = errno; | |
488 | perror("Reserving sub buffer failed (everything is normal, " | |
489 | "it is due to concurrency)"); | |
490 | goto end; | |
491 | } | |
492 | ||
493 | switch (DEFAULT_KERNEL_CHANNEL_OUTPUT) { | |
7d29a247 | 494 | case LTTNG_EVENT_SPLICE: |
1ce86c9a JD |
495 | /* read the whole subbuffer */ |
496 | err = kernctl_get_padded_subbuf_size(infd, &len); | |
497 | if (err != 0) { | |
498 | ret = errno; | |
499 | perror("Getting sub-buffer len failed."); | |
500 | goto end; | |
501 | } | |
502 | ||
503 | /* splice the subbuffer to the tracefile */ | |
504 | ret = kconsumerd_on_read_subbuffer(kconsumerd_fd, len); | |
505 | if (ret < 0) { | |
506 | /* | |
507 | * display the error but continue processing to try | |
508 | * to release the subbuffer | |
509 | */ | |
510 | ERR("Error splicing to tracefile"); | |
511 | } | |
512 | break; | |
7d29a247 | 513 | case LTTNG_EVENT_MMAP: |
1ce86c9a JD |
514 | /* read the used subbuffer size */ |
515 | err = kernctl_get_subbuf_size(infd, &len); | |
516 | if (err != 0) { | |
517 | ret = errno; | |
518 | perror("Getting sub-buffer len failed."); | |
519 | goto end; | |
520 | } | |
521 | /* write the subbuffer to the tracefile */ | |
522 | ret = kconsumerd_on_read_subbuffer_mmap(kconsumerd_fd, len); | |
523 | if (ret < 0) { | |
524 | /* | |
525 | * display the error but continue processing to try | |
526 | * to release the subbuffer | |
527 | */ | |
528 | ERR("Error writing to tracefile"); | |
529 | } | |
530 | break; | |
531 | default: | |
532 | ERR("Unknown output method"); | |
533 | ret = -1; | |
534 | } | |
535 | ||
536 | err = kernctl_put_next_subbuf(infd); | |
537 | if (err != 0) { | |
538 | ret = errno; | |
539 | if (errno == EFAULT) { | |
540 | perror("Error in unreserving sub buffer\n"); | |
541 | } else if (errno == EIO) { | |
542 | /* Should never happen with newer LTTng versions */ | |
543 | perror("Reader has been pushed by the writer, last sub-buffer corrupted."); | |
544 | } | |
545 | goto end; | |
546 | } | |
547 | ||
548 | end: | |
549 | return ret; | |
550 | } | |
551 | ||
4de84ad9 JD |
552 | /* |
553 | * kconsumerd_poll_socket | |
554 | * | |
555 | * Poll on the should_quit pipe and the command socket | |
556 | * return -1 on error and should exit, 0 if data is | |
557 | * available on the command socket | |
558 | */ | |
559 | int kconsumerd_poll_socket(struct pollfd *kconsumerd_sockpoll) | |
560 | { | |
561 | int num_rdy; | |
562 | ||
563 | num_rdy = poll(kconsumerd_sockpoll, 2, -1); | |
564 | if (num_rdy == -1) { | |
565 | perror("Poll error"); | |
566 | goto exit; | |
567 | } | |
568 | if (kconsumerd_sockpoll[0].revents == POLLIN) { | |
569 | DBG("kconsumerd_should_quit wake up"); | |
570 | goto exit; | |
571 | } | |
572 | return 0; | |
573 | ||
574 | exit: | |
575 | return -1; | |
576 | } | |
577 | ||
1ce86c9a JD |
578 | /* |
579 | * kconsumerd_consumerd_recv_fd | |
580 | * | |
581 | * Receives an array of file descriptors and the associated | |
582 | * structures describing each fd (path name). | |
583 | * Returns the size of received data | |
584 | */ | |
4de84ad9 JD |
585 | static int kconsumerd_consumerd_recv_fd(int sfd, |
586 | struct pollfd *kconsumerd_sockpoll, int size, | |
1ce86c9a JD |
587 | enum kconsumerd_command cmd_type) |
588 | { | |
589 | struct msghdr msg; | |
590 | struct iovec iov[1]; | |
591 | int ret = 0, i, tmp2; | |
592 | struct cmsghdr *cmsg; | |
593 | int nb_fd; | |
594 | char recv_fd[CMSG_SPACE(sizeof(int))]; | |
595 | struct lttcomm_kconsumerd_msg lkm; | |
596 | ||
597 | /* the number of fds we are about to receive */ | |
598 | nb_fd = size / sizeof(struct lttcomm_kconsumerd_msg); | |
599 | ||
600 | for (i = 0; i < nb_fd; i++) { | |
601 | memset(&msg, 0, sizeof(msg)); | |
602 | ||
603 | /* Prepare to receive the structures */ | |
604 | iov[0].iov_base = &lkm; | |
605 | iov[0].iov_len = sizeof(lkm); | |
606 | msg.msg_iov = iov; | |
607 | msg.msg_iovlen = 1; | |
608 | ||
609 | msg.msg_control = recv_fd; | |
610 | msg.msg_controllen = sizeof(recv_fd); | |
611 | ||
612 | DBG("Waiting to receive fd"); | |
4de84ad9 JD |
613 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { |
614 | goto end; | |
615 | } | |
616 | ||
1ce86c9a JD |
617 | if ((ret = recvmsg(sfd, &msg, 0)) < 0) { |
618 | perror("recvmsg"); | |
619 | continue; | |
620 | } | |
621 | ||
622 | if (ret != (size / nb_fd)) { | |
623 | ERR("Received only %d, expected %d", ret, size); | |
624 | kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD); | |
625 | goto end; | |
626 | } | |
627 | ||
628 | cmsg = CMSG_FIRSTHDR(&msg); | |
629 | if (!cmsg) { | |
630 | ERR("Invalid control message header"); | |
631 | ret = -1; | |
632 | kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD); | |
633 | goto end; | |
634 | } | |
635 | /* if we received fds */ | |
636 | if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { | |
637 | switch (cmd_type) { | |
638 | case ADD_STREAM: | |
639 | DBG("kconsumerd_add_fd %s (%d)", lkm.path_name, (CMSG_DATA(cmsg)[0])); | |
640 | ret = kconsumerd_add_fd(&lkm, (CMSG_DATA(cmsg)[0])); | |
641 | if (ret < 0) { | |
642 | kconsumerd_send_error(KCONSUMERD_OUTFD_ERROR); | |
643 | goto end; | |
644 | } | |
645 | break; | |
646 | case UPDATE_STREAM: | |
647 | kconsumerd_change_fd_state(lkm.fd, lkm.state); | |
648 | break; | |
649 | default: | |
650 | break; | |
651 | } | |
1ce86c9a JD |
652 | /* signal the poll thread */ |
653 | tmp2 = write(kconsumerd_poll_pipe[1], "4", 1); | |
654 | } else { | |
655 | ERR("Didn't received any fd"); | |
656 | kconsumerd_send_error(KCONSUMERD_ERROR_RECV_FD); | |
657 | ret = -1; | |
658 | goto end; | |
659 | } | |
660 | } | |
661 | ||
662 | end: | |
1ce86c9a JD |
663 | return ret; |
664 | } | |
665 | ||
666 | /* | |
667 | * kconsumerd_thread_poll_fds | |
668 | * | |
669 | * This thread polls the fds in the ltt_fd_list to consume the data | |
670 | * and write it to tracefile if necessary. | |
671 | */ | |
672 | void *kconsumerd_thread_poll_fds(void *data) | |
673 | { | |
674 | int num_rdy, num_hup, high_prio, ret, i; | |
675 | struct pollfd *pollfd = NULL; | |
676 | /* local view of the fds */ | |
677 | struct kconsumerd_fd **local_kconsumerd_fd = NULL; | |
242cd187 | 678 | /* local view of kconsumerd_data.fds_count */ |
1ce86c9a JD |
679 | int nb_fd = 0; |
680 | char tmp; | |
681 | int tmp2; | |
682 | ||
683 | ret = pipe(kconsumerd_thread_pipe); | |
684 | if (ret < 0) { | |
685 | perror("Error creating pipe"); | |
686 | goto end; | |
687 | } | |
688 | ||
689 | local_kconsumerd_fd = malloc(sizeof(struct kconsumerd_fd)); | |
690 | ||
691 | while (1) { | |
692 | high_prio = 0; | |
693 | num_hup = 0; | |
694 | ||
695 | /* | |
696 | * the ltt_fd_list has been updated, we need to update our | |
697 | * local array as well | |
698 | */ | |
242cd187 MD |
699 | pthread_mutex_lock(&kconsumerd_data.lock); |
700 | if (kconsumerd_data.need_update) { | |
1ce86c9a JD |
701 | if (pollfd != NULL) { |
702 | free(pollfd); | |
703 | pollfd = NULL; | |
704 | } | |
705 | if (local_kconsumerd_fd != NULL) { | |
706 | free(local_kconsumerd_fd); | |
707 | local_kconsumerd_fd = NULL; | |
708 | } | |
0237248c | 709 | |
1ce86c9a | 710 | /* allocate for all fds + 1 for the kconsumerd_poll_pipe */ |
242cd187 | 711 | pollfd = malloc((kconsumerd_data.fds_count + 1) * sizeof(struct pollfd)); |
1ce86c9a JD |
712 | if (pollfd == NULL) { |
713 | perror("pollfd malloc"); | |
242cd187 | 714 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
715 | goto end; |
716 | } | |
0237248c | 717 | |
1ce86c9a | 718 | /* allocate for all fds + 1 for the kconsumerd_poll_pipe */ |
242cd187 | 719 | local_kconsumerd_fd = malloc((kconsumerd_data.fds_count + 1) * |
1ce86c9a JD |
720 | sizeof(struct kconsumerd_fd)); |
721 | if (local_kconsumerd_fd == NULL) { | |
722 | perror("local_kconsumerd_fd malloc"); | |
242cd187 | 723 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
724 | goto end; |
725 | } | |
726 | ret = kconsumerd_update_poll_array(&pollfd, local_kconsumerd_fd); | |
727 | if (ret < 0) { | |
728 | ERR("Error in allocating pollfd or local_outfds"); | |
729 | kconsumerd_send_error(KCONSUMERD_POLL_ERROR); | |
242cd187 | 730 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
731 | goto end; |
732 | } | |
733 | nb_fd = ret; | |
242cd187 | 734 | kconsumerd_data.need_update = 0; |
1ce86c9a | 735 | } |
242cd187 | 736 | pthread_mutex_unlock(&kconsumerd_data.lock); |
1ce86c9a JD |
737 | |
738 | /* poll on the array of fds */ | |
739 | DBG("polling on %d fd", nb_fd + 1); | |
740 | num_rdy = poll(pollfd, nb_fd + 1, kconsumerd_poll_timeout); | |
741 | DBG("poll num_rdy : %d", num_rdy); | |
742 | if (num_rdy == -1) { | |
743 | perror("Poll error"); | |
744 | kconsumerd_send_error(KCONSUMERD_POLL_ERROR); | |
745 | goto end; | |
746 | } else if (num_rdy == 0) { | |
747 | DBG("Polling thread timed out"); | |
748 | goto end; | |
749 | } | |
750 | ||
751 | /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */ | |
752 | if (nb_fd == 0 && kconsumerd_quit == 1) { | |
753 | goto end; | |
754 | } | |
755 | ||
756 | /* | |
242cd187 MD |
757 | * If the kconsumerd_poll_pipe triggered poll go |
758 | * directly to the beginning of the loop to update the | |
759 | * array. We want to prioritize array update over | |
760 | * low-priority reads. | |
1ce86c9a | 761 | */ |
242cd187 | 762 | if (pollfd[nb_fd].revents == POLLIN) { |
1ce86c9a JD |
763 | DBG("kconsumerd_poll_pipe wake up"); |
764 | tmp2 = read(kconsumerd_poll_pipe[0], &tmp, 1); | |
765 | continue; | |
766 | } | |
767 | ||
768 | /* Take care of high priority channels first. */ | |
769 | for (i = 0; i < nb_fd; i++) { | |
770 | switch(pollfd[i].revents) { | |
771 | case POLLERR: | |
772 | ERR("Error returned in polling fd %d.", pollfd[i].fd); | |
773 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
774 | num_hup++; |
775 | break; | |
776 | case POLLHUP: | |
777 | DBG("Polling fd %d tells it has hung up.", pollfd[i].fd); | |
778 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
779 | num_hup++; |
780 | break; | |
781 | case POLLNVAL: | |
782 | ERR("Polling fd %d tells fd is not open.", pollfd[i].fd); | |
783 | kconsumerd_del_fd(local_kconsumerd_fd[i]); | |
1ce86c9a JD |
784 | num_hup++; |
785 | break; | |
786 | case POLLPRI: | |
787 | DBG("Urgent read on fd %d", pollfd[i].fd); | |
788 | high_prio = 1; | |
789 | ret = kconsumerd_read_subbuffer(local_kconsumerd_fd[i]); | |
790 | /* it's ok to have an unavailable sub-buffer */ | |
791 | if (ret == EAGAIN) { | |
792 | ret = 0; | |
793 | } | |
794 | break; | |
795 | } | |
796 | } | |
797 | ||
798 | /* If every buffer FD has hung up, we end the read loop here */ | |
799 | if (nb_fd > 0 && num_hup == nb_fd) { | |
800 | DBG("every buffer FD has hung up\n"); | |
801 | if (kconsumerd_quit == 1) { | |
802 | goto end; | |
803 | } | |
804 | continue; | |
805 | } | |
806 | ||
807 | /* Take care of low priority channels. */ | |
808 | if (high_prio == 0) { | |
809 | for (i = 0; i < nb_fd; i++) { | |
810 | if (pollfd[i].revents == POLLIN) { | |
811 | DBG("Normal read on fd %d", pollfd[i].fd); | |
812 | ret = kconsumerd_read_subbuffer(local_kconsumerd_fd[i]); | |
813 | /* it's ok to have an unavailable subbuffer */ | |
814 | if (ret == EAGAIN) { | |
815 | ret = 0; | |
816 | } | |
817 | } | |
818 | } | |
819 | } | |
820 | } | |
821 | end: | |
822 | DBG("polling thread exiting"); | |
823 | if (pollfd != NULL) { | |
824 | free(pollfd); | |
825 | pollfd = NULL; | |
826 | } | |
827 | if (local_kconsumerd_fd != NULL) { | |
828 | free(local_kconsumerd_fd); | |
829 | local_kconsumerd_fd = NULL; | |
830 | } | |
1ce86c9a JD |
831 | return NULL; |
832 | } | |
833 | ||
834 | /* | |
4de84ad9 | 835 | * kconsumerd_init(void) |
1ce86c9a | 836 | * |
4de84ad9 JD |
837 | * initialise the necessary environnement : |
838 | * - inform the polling thread to update the polling array | |
839 | * - create the poll_pipe | |
840 | * - create the should_quit pipe (for signal handler) | |
1ce86c9a | 841 | */ |
4de84ad9 | 842 | int kconsumerd_init(void) |
1ce86c9a | 843 | { |
4de84ad9 JD |
844 | int ret; |
845 | ||
846 | /* need to update the polling array at init time */ | |
847 | kconsumerd_data.need_update = 1; | |
848 | ||
849 | ret = pipe(kconsumerd_poll_pipe); | |
850 | if (ret < 0) { | |
851 | perror("Error creating poll pipe"); | |
852 | goto end; | |
853 | } | |
854 | ||
855 | ret = pipe(kconsumerd_should_quit); | |
856 | if (ret < 0) { | |
857 | perror("Error creating recv pipe"); | |
858 | goto end; | |
859 | } | |
860 | ||
861 | end: | |
862 | return ret; | |
1ce86c9a JD |
863 | } |
864 | ||
865 | /* | |
866 | * kconsumerd_thread_receive_fds | |
867 | * | |
868 | * This thread listens on the consumerd socket and | |
869 | * receives the file descriptors from ltt-sessiond | |
870 | */ | |
871 | void *kconsumerd_thread_receive_fds(void *data) | |
872 | { | |
873 | int sock, client_socket, ret; | |
874 | struct lttcomm_kconsumerd_header tmp; | |
4de84ad9 JD |
875 | /* |
876 | * structure to poll for incoming data on communication socket | |
877 | * avoids making blocking sockets | |
878 | */ | |
879 | struct pollfd kconsumerd_sockpoll[2]; | |
880 | ||
1ce86c9a JD |
881 | |
882 | DBG("Creating command socket %s", kconsumerd_command_sock_path); | |
883 | unlink(kconsumerd_command_sock_path); | |
884 | client_socket = lttcomm_create_unix_sock(kconsumerd_command_sock_path); | |
885 | if (client_socket < 0) { | |
886 | ERR("Cannot create command socket"); | |
887 | goto end; | |
888 | } | |
889 | ||
890 | ret = lttcomm_listen_unix_sock(client_socket); | |
891 | if (ret < 0) { | |
892 | goto end; | |
893 | } | |
894 | ||
895 | DBG("Sending ready command to ltt-sessiond"); | |
896 | ret = kconsumerd_send_error(KCONSUMERD_COMMAND_SOCK_READY); | |
897 | if (ret < 0) { | |
898 | ERR("Error sending ready command to ltt-sessiond"); | |
899 | goto end; | |
900 | } | |
901 | ||
4de84ad9 JD |
902 | ret = fcntl(client_socket, F_SETFL, O_NONBLOCK); |
903 | if (ret < 0) { | |
904 | perror("fcntl O_NONBLOCK"); | |
905 | goto end; | |
906 | } | |
907 | ||
908 | /* prepare the FDs to poll : to client socket and the should_quit pipe */ | |
909 | kconsumerd_sockpoll[0].fd = kconsumerd_should_quit[0]; | |
910 | kconsumerd_sockpoll[0].events = POLLIN | POLLPRI; | |
911 | kconsumerd_sockpoll[1].fd = client_socket; | |
912 | kconsumerd_sockpoll[1].events = POLLIN | POLLPRI; | |
913 | ||
914 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { | |
915 | goto end; | |
916 | } | |
917 | DBG("Connection on client_socket"); | |
918 | ||
1ce86c9a JD |
919 | /* Blocking call, waiting for transmission */ |
920 | sock = lttcomm_accept_unix_sock(client_socket); | |
921 | if (sock <= 0) { | |
922 | WARN("On accept"); | |
923 | goto end; | |
924 | } | |
4de84ad9 JD |
925 | ret = fcntl(sock, F_SETFL, O_NONBLOCK); |
926 | if (ret < 0) { | |
927 | perror("fcntl O_NONBLOCK"); | |
928 | goto end; | |
929 | } | |
930 | ||
931 | /* update the polling structure to poll on the established socket */ | |
932 | kconsumerd_sockpoll[1].fd = sock; | |
933 | kconsumerd_sockpoll[1].events = POLLIN | POLLPRI; | |
934 | ||
1ce86c9a | 935 | while (1) { |
4de84ad9 JD |
936 | if (kconsumerd_poll_socket(kconsumerd_sockpoll) < 0) { |
937 | goto end; | |
938 | } | |
939 | DBG("Incoming fds on sock"); | |
940 | ||
1ce86c9a JD |
941 | /* We first get the number of fd we are about to receive */ |
942 | ret = lttcomm_recv_unix_sock(sock, &tmp, | |
943 | sizeof(struct lttcomm_kconsumerd_header)); | |
944 | if (ret <= 0) { | |
945 | ERR("Communication interrupted on command socket"); | |
946 | goto end; | |
947 | } | |
948 | if (tmp.cmd_type == STOP) { | |
949 | DBG("Received STOP command"); | |
950 | goto end; | |
951 | } | |
3dcd2721 MD |
952 | if (kconsumerd_quit) { |
953 | DBG("kconsumerd_thread_receive_fds received quit from signal"); | |
954 | goto end; | |
955 | } | |
4de84ad9 | 956 | |
1ce86c9a | 957 | /* we received a command to add or update fds */ |
4de84ad9 JD |
958 | ret = kconsumerd_consumerd_recv_fd(sock, kconsumerd_sockpoll, |
959 | tmp.payload_size, tmp.cmd_type); | |
1ce86c9a JD |
960 | if (ret <= 0) { |
961 | ERR("Receiving the FD, exiting"); | |
962 | goto end; | |
963 | } | |
4de84ad9 | 964 | DBG("received fds on sock"); |
1ce86c9a JD |
965 | } |
966 | ||
967 | end: | |
968 | DBG("kconsumerd_thread_receive_fds exiting"); | |
969 | ||
970 | /* | |
971 | * when all fds have hung up, the polling thread | |
972 | * can exit cleanly | |
973 | */ | |
974 | kconsumerd_quit = 1; | |
975 | ||
976 | /* | |
977 | * 2s of grace period, if no polling events occur during | |
978 | * this period, the polling thread will exit even if there | |
979 | * are still open FDs (should not happen, but safety mechanism). | |
980 | */ | |
981 | kconsumerd_poll_timeout = KCONSUMERD_POLL_GRACE_PERIOD; | |
982 | ||
983 | /* wake up the polling thread */ | |
984 | ret = write(kconsumerd_poll_pipe[1], "4", 1); | |
985 | if (ret < 0) { | |
986 | perror("poll pipe write"); | |
987 | } | |
988 | return NULL; | |
989 | } | |
990 | ||
991 | /* | |
992 | * kconsumerd_cleanup | |
993 | * | |
994 | * Cleanup the daemon's socket on exit | |
995 | */ | |
3dcd2721 | 996 | void kconsumerd_cleanup(void) |
1ce86c9a JD |
997 | { |
998 | struct kconsumerd_fd *iter; | |
999 | ||
1000 | /* remove the socket file */ | |
1001 | unlink(kconsumerd_command_sock_path); | |
1002 | ||
3dcd2721 MD |
1003 | /* |
1004 | * close all outfd. Called when there are no more threads | |
1005 | * running (after joining on the threads), no need to protect | |
1006 | * list iteration with mutex. | |
1007 | */ | |
242cd187 | 1008 | cds_list_for_each_entry(iter, &kconsumerd_data.fd_list.head, list) { |
1ce86c9a JD |
1009 | kconsumerd_del_fd(iter); |
1010 | } | |
1011 | } | |
1012 | ||
3dcd2721 | 1013 | /* |
4de84ad9 JD |
1014 | * kconsumerd_should_exit |
1015 | * | |
3dcd2721 MD |
1016 | * Called from signal handler. |
1017 | */ | |
1018 | void kconsumerd_should_exit(void) | |
1019 | { | |
4de84ad9 | 1020 | int ret; |
3dcd2721 | 1021 | kconsumerd_quit = 1; |
4de84ad9 | 1022 | ret = write(kconsumerd_should_quit[1], "4", 1); |
3dcd2721 MD |
1023 | } |
1024 | ||
1ce86c9a JD |
1025 | /* |
1026 | * kconsumerd_send_error | |
1027 | * | |
1028 | * send return code to ltt-sessiond | |
1029 | */ | |
1030 | int kconsumerd_send_error(enum lttcomm_return_code cmd) | |
1031 | { | |
1032 | if (kconsumerd_error_socket > 0) { | |
1033 | return lttcomm_send_unix_sock(kconsumerd_error_socket, &cmd, | |
1034 | sizeof(enum lttcomm_sessiond_command)); | |
1035 | } | |
1036 | ||
1037 | return 0; | |
1038 | } |