summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
cb040cc)
Only one mmap call per buffer, once the first mmap is done, we use the
offset to know where to start consuming the data.
When we receive the FD and its output mode is mmap, we mmap the buffer,
when it is closing, we munmap it.
Removed the manual padding handling because the kernel already exposes a
padded subbuffer.
Also updated the check of the return code of
kconsumerd_consumerd_recv_fd to be consistent with the rest of the code,
we are in error when it's < 0 not <= 0.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Julien Desfossez <julien.desfossez@polymtl.ca>
int fd;
uint32_t state; /* enum lttcomm_kconsumerd_fd_state */
unsigned long max_sb_size; /* the subbuffer size for this channel */
int fd;
uint32_t state; /* enum lttcomm_kconsumerd_fd_state */
unsigned long max_sb_size; /* the subbuffer size for this channel */
+ enum lttng_event_output output; /* use splice or mmap to consume this fd */
};
extern int lttcomm_create_unix_sock(const char *pathname);
};
extern int lttcomm_create_unix_sock(const char *pathname);
*/
static void kconsumerd_del_fd(struct kconsumerd_fd *lcf)
{
*/
static void kconsumerd_del_fd(struct kconsumerd_fd *lcf)
{
pthread_mutex_lock(&kconsumerd_data.lock);
cds_list_del(&lcf->list);
if (kconsumerd_data.fds_count > 0) {
kconsumerd_data.fds_count--;
if (lcf != NULL) {
pthread_mutex_lock(&kconsumerd_data.lock);
cds_list_del(&lcf->list);
if (kconsumerd_data.fds_count > 0) {
kconsumerd_data.fds_count--;
if (lcf != NULL) {
+ if (lcf->mmap_base != NULL) {
+ ret = munmap(lcf->mmap_base, lcf->mmap_len);
+ if (ret != 0) {
+ perror("munmap");
+ }
+ }
if (lcf->out_fd != 0) {
close(lcf->out_fd);
}
if (lcf->out_fd != 0) {
close(lcf->out_fd);
}
tmp_fd->max_sb_size = buf->max_sb_size;
tmp_fd->out_fd = 0;
tmp_fd->out_fd_offset = 0;
tmp_fd->max_sb_size = buf->max_sb_size;
tmp_fd->out_fd = 0;
tmp_fd->out_fd_offset = 0;
+ tmp_fd->mmap_len = 0;
+ tmp_fd->mmap_base = NULL;
+ tmp_fd->output = buf->output;
strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
tmp_fd->path_name[PATH_MAX - 1] = '\0';
strncpy(tmp_fd->path_name, buf->path_name, PATH_MAX);
tmp_fd->path_name[PATH_MAX - 1] = '\0';
tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
}
tmp_fd->sessiond_fd, tmp_fd->consumerd_fd, tmp_fd->out_fd);
}
+ if (tmp_fd->output == LTTNG_EVENT_MMAP) {
+ /* get the len of the mmap region */
+ ret = kernctl_get_mmap_len(tmp_fd->consumerd_fd, &tmp_fd->mmap_len);
+ if (ret != 0) {
+ ret = errno;
+ perror("kernctl_get_mmap_len");
+ goto end;
+ }
+
+ tmp_fd->mmap_base = mmap(NULL, tmp_fd->mmap_len,
+ PROT_READ, MAP_PRIVATE, tmp_fd->consumerd_fd, 0);
+ if (tmp_fd->mmap_base == MAP_FAILED) {
+ perror("Error mmaping");
+ ret = -1;
+ goto end;
+ }
+ }
+
cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head);
kconsumerd_data.fds_count++;
kconsumerd_data.need_update = 1;
cds_list_add(&tmp_fd->list, &kconsumerd_data.fd_list.head);
kconsumerd_data.fds_count++;
kconsumerd_data.need_update = 1;
int kconsumerd_on_read_subbuffer_mmap(struct kconsumerd_local_data *ctx,
struct kconsumerd_fd *kconsumerd_fd, unsigned long len)
{
int kconsumerd_on_read_subbuffer_mmap(struct kconsumerd_local_data *ctx,
struct kconsumerd_fd *kconsumerd_fd, unsigned long len)
{
- unsigned long mmap_len, mmap_offset, padded_len, padding_len;
- char *mmap_base;
+ unsigned long mmap_offset;
char *padding = NULL;
long ret = 0;
off_t orig_offset = kconsumerd_fd->out_fd_offset;
int fd = kconsumerd_fd->consumerd_fd;
int outfd = kconsumerd_fd->out_fd;
char *padding = NULL;
long ret = 0;
off_t orig_offset = kconsumerd_fd->out_fd_offset;
int fd = kconsumerd_fd->consumerd_fd;
int outfd = kconsumerd_fd->out_fd;
- /* get the padded subbuffer size to know the padding required */
- ret = kernctl_get_padded_subbuf_size(fd, &padded_len);
- if (ret != 0) {
- ret = errno;
- perror("kernctl_get_padded_subbuf_size");
- goto end;
- }
- padding_len = padded_len - len;
- padding = malloc(padding_len * sizeof(char));
- memset(padding, '\0', padding_len);
-
- /* get the len of the mmap region */
- ret = kernctl_get_mmap_len(fd, &mmap_len);
- if (ret != 0) {
- ret = errno;
- perror("kernctl_get_mmap_len");
- goto end;
- }
-
/* get the offset inside the fd to mmap */
ret = kernctl_get_mmap_read_offset(fd, &mmap_offset);
if (ret != 0) {
/* get the offset inside the fd to mmap */
ret = kernctl_get_mmap_read_offset(fd, &mmap_offset);
if (ret != 0) {
- mmap_base = mmap(NULL, mmap_len, PROT_READ, MAP_PRIVATE, fd, mmap_offset);
- if (mmap_base == MAP_FAILED) {
- perror("Error mmaping");
- ret = -1;
- goto end;
- }
-
- ret = write(outfd, mmap_base, len);
+ ret = write(outfd, kconsumerd_fd->mmap_base + mmap_offset, len);
if (ret >= len) {
len = 0;
} else if (ret < 0) {
if (ret >= len) {
len = 0;
} else if (ret < 0) {
kconsumerd_fd->out_fd_offset += ret;
}
kconsumerd_fd->out_fd_offset += ret;
}
- /* once all the data is written, write the padding to disk */
- ret = write(outfd, padding, padding_len);
- if (ret < 0) {
- ret = errno;
- perror("Error writing padding to file");
- goto end;
- }
-
/*
* This does a blocking write-and-wait on any page that belongs to the
* subbuffer prior to the one we just wrote.
/*
* This does a blocking write-and-wait on any page that belongs to the
* subbuffer prior to the one we just wrote.
/* we received a command to add or update fds */
ret = kconsumerd_consumerd_recv_fd(ctx, sock, kconsumerd_sockpoll,
tmp.payload_size, tmp.cmd_type);
/* we received a command to add or update fds */
ret = kconsumerd_consumerd_recv_fd(ctx, sock, kconsumerd_sockpoll,
tmp.payload_size, tmp.cmd_type);
ERR("Receiving the FD, exiting");
goto end;
}
ERR("Receiving the FD, exiting");
goto end;
}
char path_name[PATH_MAX]; /* tracefile name */
enum kconsumerd_fd_state state;
unsigned long max_sb_size; /* the subbuffer size for this channel */
char path_name[PATH_MAX]; /* tracefile name */
enum kconsumerd_fd_state state;
unsigned long max_sb_size; /* the subbuffer size for this channel */
+ void *mmap_base;
+ size_t mmap_len;
+ enum lttng_event_output output; /* splice or mmap */
};
struct kconsumerd_local_data {
};
struct kconsumerd_local_data {
break;
case LTTNG_EVENT_MMAP:
/* read the used subbuffer size */
break;
case LTTNG_EVENT_MMAP:
/* read the used subbuffer size */
- err = kernctl_get_subbuf_size(infd, &len);
+ err = kernctl_get_padded_subbuf_size(infd, &len);
if (err != 0) {
ret = errno;
perror("Getting sub-buffer len failed.");
if (err != 0) {
ret = errno;
perror("Getting sub-buffer len failed.");
lkm.fd = stream->fd;
lkm.state = stream->state;
lkm.max_sb_size = channel->channel->attr.subbuf_size;
lkm.fd = stream->fd;
lkm.state = stream->state;
lkm.max_sb_size = channel->channel->attr.subbuf_size;
+ lkm.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
strncpy(lkm.path_name, stream->pathname, PATH_MAX);
lkm.path_name[PATH_MAX - 1] = '\0';
strncpy(lkm.path_name, stream->pathname, PATH_MAX);
lkm.path_name[PATH_MAX - 1] = '\0';
lkm.fd = session->metadata_stream_fd;
lkm.state = ACTIVE_FD;
lkm.max_sb_size = session->metadata->conf->attr.subbuf_size;
lkm.fd = session->metadata_stream_fd;
lkm.state = ACTIVE_FD;
lkm.max_sb_size = session->metadata->conf->attr.subbuf_size;
+ lkm.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
strncpy(lkm.path_name, session->metadata->pathname, PATH_MAX);
lkm.path_name[PATH_MAX - 1] = '\0';
strncpy(lkm.path_name, session->metadata->pathname, PATH_MAX);
lkm.path_name[PATH_MAX - 1] = '\0';