size_t offset, size_t *header_len,
size_t *payload_len, uint64_t *timestamp,
struct lttng_ust_shm_handle *handle);
+ /*
+ * Offset and size of content size field in client.
+ */
+ void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length);
+ void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length);
};
/*
};
enum lttng_ust_lib_ring_buffer_mode_types {
- RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
- RING_BUFFER_DISCARD, /* Discard when buffer full */
+ RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
+ RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
};
enum lttng_ust_lib_ring_buffer_output_types {
#include <lttng/ust-abi.h>
#include <sys/types.h>
+#include <limits.h>
#ifndef LTTNG_UST_UUID_LEN
#define LTTNG_UST_UUID_LEN 16
enum lttng_ust_output output; /* splice, mmap */
uint32_t chan_id; /* channel ID */
unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+ char shm_path[PATH_MAX]; /* Shared memory path */
} LTTNG_PACKED;
/*
unsigned int switch_timer_interval,
unsigned int read_timer_interval,
unsigned char *uuid,
- uint32_t chan_id);
+ uint32_t chan_id,
+ const char *shm_path);
void (*channel_destroy)(struct lttng_channel *chan);
union {
void *_deprecated1;
attr->subbuf_size, attr->num_subbuf,
attr->switch_timer_interval,
attr->read_timer_interval,
- attr->uuid, attr->chan_id);
+ attr->uuid, attr->chan_id,
+ attr->shm_path[0] == '\0' ? NULL : attr->shm_path);
if (!chan->chan) {
goto chan_error;
}
{
}
+static void client_content_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.content_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
+}
+
+static void client_packet_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.packet_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
+}
+
static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
.buffer_end = client_buffer_end,
.buffer_create = client_buffer_create,
.buffer_finalize = client_buffer_finalize,
+ .content_size_field = client_content_size_field,
+ .packet_size_field = client_packet_size_field,
},
.timestamp_begin = client_timestamp_begin,
.timestamp_end = client_timestamp_end,
.cb.buffer_end = client_buffer_end,
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
+ .cb.content_size_field = client_content_size_field,
+ .cb.packet_size_field = client_packet_size_field,
.tsc_bits = LTTNG_COMPACT_TSC_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
unsigned int switch_timer_interval,
unsigned int read_timer_interval,
unsigned char *uuid,
- uint32_t chan_id)
+ uint32_t chan_id,
+ const char *shm_path)
{
struct lttng_channel chan_priv_init;
struct lttng_ust_shm_handle *handle;
sizeof(struct lttng_channel),
&chan_priv_init,
buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval);
+ switch_timer_interval, read_timer_interval,
+ shm_path);
if (!handle)
return NULL;
lttng_chan = priv;
unsigned int switch_timer_interval,
unsigned int read_timer_interval,
unsigned char *uuid,
- uint32_t chan_id)
+ uint32_t chan_id,
+ const char *shm_path)
{
struct lttng_channel chan_priv_init;
struct lttng_ust_shm_handle *handle;
sizeof(struct lttng_channel),
&chan_priv_init,
buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval);
+ switch_timer_interval, read_timer_interval,
+ shm_path);
if (!handle)
return NULL;
lttng_chan = priv;
const char *name,
const struct lttng_ust_lib_ring_buffer_config *config,
size_t subbuf_size,
- size_t num_subbuf, struct lttng_ust_shm_handle *handle);
+ size_t num_subbuf, struct lttng_ust_shm_handle *handle,
+ const char *shm_path);
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle);
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
- unsigned int read_timer_interval);
+ unsigned int read_timer_interval,
+ const char *shm_path);
/*
* channel_destroy finalizes all channel's buffers, waits for readers to
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
/* ring buffer state */
-#define RB_RING_BUFFER_PADDING 64
+#define RB_CRASH_DUMP_ABI_LEN 256
+#define RB_RING_BUFFER_PADDING 60
+
+#define RB_CRASH_DUMP_ABI_MAGIC_LEN 16
+
+/*
+ * The 128-bit magic number is xor'd in the process data so it does not
+ * cause a false positive when searching for buffers by scanning memory.
+ * The actual magic number is:
+ * 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17, 0x7B, 0xF1,
+ * 0x77, 0xBF, 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17,
+ */
+#define RB_CRASH_DUMP_ABI_MAGIC_XOR \
+ { \
+ 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, 0x77 ^ 0xFF, \
+ 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, \
+ 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, \
+ 0xF1 ^ 0xFF, 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, \
+ }
+
+#define RB_CRASH_ENDIAN 0x1234
+
+#define RB_CRASH_DUMP_ABI_MAJOR 0
+#define RB_CRASH_DUMP_ABI_MINOR 0
+
+enum lttng_crash_type {
+ LTTNG_CRASH_TYPE_UST = 0,
+ LTTNG_CRASH_TYPE_KERNEL = 1,
+};
+
+struct lttng_crash_abi {
+ uint8_t magic[RB_CRASH_DUMP_ABI_MAGIC_LEN];
+ uint64_t mmap_length; /* Overall lenght of crash record */
+ uint16_t endian; /*
+ * { 0x12, 0x34 }: big endian
+ * { 0x34, 0x12 }: little endian
+ */
+ uint16_t major; /* Major number. */
+ uint16_t minor; /* Minor number. */
+ uint8_t word_size; /* Word size (bytes). */
+ uint8_t layout_type; /* enum lttng_crash_type */
+
+ struct {
+ uint32_t prod_offset;
+ uint32_t consumed_offset;
+ uint32_t commit_hot_array;
+ uint32_t commit_hot_seq;
+ uint32_t buf_wsb_array;
+ uint32_t buf_wsb_id;
+ uint32_t sb_array;
+ uint32_t sb_array_shmp_offset;
+ uint32_t sb_backend_p_offset;
+ uint32_t content_size;
+ uint32_t packet_size;
+ } __attribute__((packed)) offset;
+ struct {
+ uint8_t prod_offset;
+ uint8_t consumed_offset;
+ uint8_t commit_hot_seq;
+ uint8_t buf_wsb_id;
+ uint8_t sb_array_shmp_offset;
+ uint8_t sb_backend_p_offset;
+ uint8_t content_size;
+ uint8_t packet_size;
+ } __attribute__((packed)) length;
+ struct {
+ uint32_t commit_hot_array;
+ uint32_t buf_wsb_array;
+ uint32_t sb_array;
+ } __attribute__((packed)) stride;
+
+ uint64_t buf_size; /* Size of the buffer */
+ uint64_t subbuf_size; /* Sub-buffer size */
+ uint64_t num_subbuf; /* Number of sub-buffers for writer */
+ uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
+} __attribute__((packed));
+
struct lttng_ust_lib_ring_buffer {
- /* First 32 bytes cache-hot cacheline */
- union v_atomic offset; /* Current offset in the buffer */
+ /* First 32 bytes are for the buffer crash dump ABI */
+ struct lttng_crash_abi crash_abi;
+
+ /* 32 bytes cache-hot cacheline */
+ union v_atomic __attribute__((aligned(32))) offset;
+ /* Current offset in the buffer */
DECLARE_SHMP(struct commit_counters_hot, commit_hot);
/* Commit count per sub-buffer */
long consumed; /*
* standard atomic access (shared)
*/
int record_disabled;
- /* End of first 32 bytes cacheline */
+ /* End of cache-hot 32 bytes cacheline */
+
union v_atomic last_tsc; /*
* Last timestamp written in the buffer.
*/
- struct lttng_ust_lib_ring_buffer_backend backend; /* Associated backend */
+ struct lttng_ust_lib_ring_buffer_backend backend;
+ /* Associated backend */
DECLARE_SHMP(struct commit_counters_cold, commit_cold);
/* Commit count per sub-buffer */
#include "smp.h"
#include "shm.h"
+#define UINT_MAX_STR_LEN 11 /* includes \0 */
+
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
* @config: ring buffer instance configuration
mmap_offset += subbuf_size;
}
}
-
return 0;
free_array:
* @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
* @num_subbuf: number of sub-buffers (power of 2)
* @lttng_ust_shm_handle: shared memory handle
+ * @shm_path: shared memory files path
*
* Returns channel pointer if successful, %NULL otherwise.
*
const char *name,
const struct lttng_ust_lib_ring_buffer_config *config,
size_t subbuf_size, size_t num_subbuf,
- struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ const char *shm_path)
{
struct channel *chan = caa_container_of(chanb, struct channel, backend);
unsigned int i;
*/
for_each_possible_cpu(i) {
struct shm_object *shmobj;
-
+ char shm_buf_path[PATH_MAX];
+
+ if (shm_path) {
+ char cpu_nr[UINT_MAX_STR_LEN]; /* unsigned int max len */
+
+ strncpy(shm_buf_path, shm_path, PATH_MAX);
+ shm_buf_path[PATH_MAX - 1] = '\0';
+ ret = snprintf(cpu_nr, UINT_MAX_STR_LEN, "%u", i);
+ if (ret != 1)
+ goto end;
+ strncat(shm_buf_path, cpu_nr,
+ PATH_MAX - strlen(shm_buf_path) - 1);
+ }
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM);
+ SHM_OBJECT_SHM, shm_path ? shm_buf_path : NULL);
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
struct lttng_ust_lib_ring_buffer *buf;
shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM);
+ SHM_OBJECT_SHM, shm_path);
if (!shmobj)
goto end;
align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
+/*
+ * Non-static to ensure the compiler does not optimize away the xor.
+ */
+uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
+
/*
* Use POSIX SHM: shm_open(3) and shm_unlink(3).
* close(2) to close the fd returned by shm_open.
/* Don't reset reader reference count */
}
+static
+void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_crash_abi *crash_abi,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb,
+ struct shm_object *shmobj,
+ struct lttng_ust_shm_handle *handle)
+{
+ int i;
+
+ for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
+ crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
+ crash_abi->mmap_length = shmobj->memory_map_size;
+ crash_abi->endian = RB_CRASH_ENDIAN;
+ crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
+ crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
+ crash_abi->word_size = sizeof(unsigned long);
+ crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
+
+ /* Offset of fields */
+ crash_abi->offset.prod_offset =
+ (uint32_t) ((char *) &buf->offset - (char *) buf);
+ crash_abi->offset.consumed_offset =
+ (uint32_t) ((char *) &buf->consumed - (char *) buf);
+ crash_abi->offset.commit_hot_array =
+ (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
+ crash_abi->offset.commit_hot_seq =
+ offsetof(struct commit_counters_hot, seq);
+ crash_abi->offset.buf_wsb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
+ crash_abi->offset.buf_wsb_id =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ crash_abi->offset.sb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
+ crash_abi->offset.sb_array_shmp_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ shmp._ref.offset);
+ crash_abi->offset.sb_backend_p_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ p._ref.offset);
+
+ /* Field length */
+ crash_abi->length.prod_offset = sizeof(buf->offset);
+ crash_abi->length.consumed_offset = sizeof(buf->consumed);
+ crash_abi->length.commit_hot_seq =
+ sizeof(((struct commit_counters_hot *) NULL)->seq);
+ crash_abi->length.buf_wsb_id =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ crash_abi->length.sb_array_shmp_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ crash_abi->length.sb_backend_p_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+
+ /* Array stride */
+ crash_abi->stride.commit_hot_array =
+ sizeof(struct commit_counters_hot);
+ crash_abi->stride.buf_wsb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ crash_abi->stride.sb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+
+ /* Buffer constants */
+ crash_abi->buf_size = chanb->buf_size;
+ crash_abi->subbuf_size = chanb->subbuf_size;
+ crash_abi->num_subbuf = chanb->num_subbuf;
+ crash_abi->mode = (uint32_t) chanb->config.mode;
+
+ if (config->cb.content_size_field) {
+ size_t offset, length;
+
+ config->cb.content_size_field(config, &offset, &length);
+ crash_abi->offset.content_size = offset;
+ crash_abi->length.content_size = length;
+ } else {
+ crash_abi->offset.content_size = 0;
+ crash_abi->length.content_size = 0;
+ }
+ if (config->cb.packet_size_field) {
+ size_t offset, length;
+
+ config->cb.packet_size_field(config, &offset, &length);
+ crash_abi->offset.packet_size = offset;
+ crash_abi->length.packet_size = length;
+ } else {
+ crash_abi->offset.packet_size = 0;
+ crash_abi->length.packet_size = 0;
+ }
+}
+
/*
* Must be called under cpu hotplug protection.
*/
if (buf->backend.allocated)
return 0;
- ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
- cpu, handle, shmobj);
- if (ret)
- return ret;
-
align_shm(shmobj, __alignof__(struct commit_counters_hot));
set_shmp(buf->commit_hot,
zalloc_shm(shmobj,
sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
if (!shmp(handle, buf->commit_hot)) {
- ret = -ENOMEM;
- goto free_chanbuf;
+ return -ENOMEM;
}
align_shm(shmobj, __alignof__(struct commit_counters_cold));
goto free_commit;
}
+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+ cpu, handle, shmobj);
+ if (ret) {
+ goto free_init;
+ }
+
/*
* Write the subbuffer header for first subbuffer so we know the total
* duration of data gathering.
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
config->cb.buffer_begin(buf, tsc, 0, handle);
v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
if (ret)
- goto free_init;
+ goto free_chanbuf;
}
+
+ init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
+
buf->backend.allocated = 1;
return 0;
* padding to let readers get those sub-buffers.
* Used for live streaming.
* @read_timer_interval: Time interval (in us) to wake up pending readers.
+ * @shm_path: Shared memory files path.
*
* Holds cpu hotplug.
* Returns NULL on failure.
void *priv_data_init,
void *buf_addr, size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ const char *shm_path)
{
int ret;
size_t shmsize, chansize;
shmsize += priv_data_size;
/* Allocate normal memory for channel (not shared) */
- shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM);
+ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
+ NULL);
if (!shmobj)
goto error_append;
/* struct channel is at object 0, offset 0 (hardcoded) */
}
ret = channel_backend_init(&chan->backend, name, config,
- subbuf_size, num_subbuf, handle);
+ subbuf_size, num_subbuf, handle,
+ shm_path);
if (ret)
goto error_backend_init;
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
+#include <sys/types.h>
#include <sys/stat.h> /* For mode constants */
#include <fcntl.h> /* For O_* constants */
#include <assert.h>
#include <signal.h>
#include <dirent.h>
#include <lttng/align.h>
-#include <helper.h>
#include <limits.h>
#include <helper.h>
return table;
}
+static
+int create_posix_shm(void)
+{
+ char tmp_name[NAME_MAX] = "/ust-shm-tmp-XXXXXX";
+ int shmfd, ret;
+
+ /*
+ * Allocate shm, and immediately unlink its shm oject, keeping
+ * only the file descriptor as a reference to the object. If it
+ * already exists (caused by short race window during which the
+ * global object exists in a concurrent shm_open), simply retry.
+ * We specifically do _not_ use the / at the beginning of the
+ * pathname so that some OS implementations can keep it local to
+ * the process (POSIX leaves this implementation-defined).
+ */
+ do {
+ /*
+ * Using mktemp filename with O_CREAT | O_EXCL open
+ * flags.
+ */
+ (void) mktemp(tmp_name);
+ if (tmp_name[0] == '\0') {
+ PERROR("mktemp");
+ goto error_shm_open;
+ }
+ shmfd = shm_open(tmp_name,
+ O_CREAT | O_EXCL | O_RDWR, 0700);
+ } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
+ if (shmfd < 0) {
+ PERROR("shm_open");
+ goto error_shm_open;
+ }
+ ret = shm_unlink(tmp_name);
+ if (ret < 0 && errno != ENOENT) {
+ PERROR("shm_unlink");
+ goto error_shm_release;
+ }
+ return shmfd;
+
+error_shm_release:
+ ret = close(shmfd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+error_shm_open:
+ return -1;
+}
+
+static
+int create_shared_file(const char *shm_path)
+{
+ return open(shm_path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+}
+
static
struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
- size_t memory_map_size)
+ size_t memory_map_size,
+ const char *shm_path)
{
int shmfd, waitfd[2], ret, i, sigblocked = 0;
struct shm_object *obj;
char *memory_map;
- char tmp_name[NAME_MAX] = "/ust-shm-tmp-XXXXXX";
sigset_t all_sigs, orig_sigs;
if (table->allocated_len >= table->size)
}
sigblocked = 1;
- /*
- * Allocate shm, and immediately unlink its shm oject, keeping
- * only the file descriptor as a reference to the object. If it
- * already exists (caused by short race window during which the
- * global object exists in a concurrent shm_open), simply retry.
- * We specifically do _not_ use the / at the beginning of the
- * pathname so that some OS implementations can keep it local to
- * the process (POSIX leaves this implementation-defined).
- */
- do {
- /*
- * Using mktemp filename with O_CREAT | O_EXCL open
- * flags.
- */
- (void) mktemp(tmp_name);
- if (tmp_name[0] == '\0') {
- PERROR("mktemp");
- goto error_shm_open;
- }
- shmfd = shm_open(tmp_name,
- O_CREAT | O_EXCL | O_RDWR, 0700);
- } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
- if (shmfd < 0) {
- PERROR("shm_open");
- goto error_shm_open;
- }
- ret = shm_unlink(tmp_name);
- if (ret < 0 && errno != ENOENT) {
- PERROR("shm_unlink");
- goto error_shm_release;
+
+ if (!shm_path) {
+ obj->shm_path[0] = '\0';
+ shmfd = create_posix_shm();
+ } else {
+ strncpy(obj->shm_path, shm_path,
+ sizeof(obj->shm_path));
+ obj->shm_path[sizeof(obj->shm_path) - 1] = '\0';
+
+ /* Path should already exist, but could fail. */
+ shmfd = create_shared_file(shm_path);
}
+ if (shmfd < 0)
+ goto error_shm_open;
+
sigblocked = 0;
ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
if (ret == -1) {
error_mmap:
error_ftruncate:
-error_shm_release:
error_zero_file:
error_sigmask_release:
ret = close(shmfd);
PERROR("close");
assert(0);
}
+ if (shm_path) {
+ ret = unlink(shm_path);
+ if (ret) {
+ PERROR("ret");
+ }
+ }
error_shm_open:
if (sigblocked) {
ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
- enum shm_object_type type)
+ enum shm_object_type type,
+ const char *shm_path)
{
switch (type) {
case SHM_OBJECT_SHM:
- return _shm_object_table_alloc_shm(table, memory_map_size);
+ return _shm_object_table_alloc_shm(table, memory_map_size,
+ shm_path);
case SHM_OBJECT_MEM:
return _shm_object_table_alloc_mem(table, memory_map_size);
default:
PERROR("close");
assert(0);
}
+ if (obj->shm_path[0]) {
+ ret = unlink(obj->shm_path);
+ if (ret) {
+ PERROR("ret");
+ }
+ }
for (i = 0; i < 2; i++) {
if (obj->wait_fd[i] < 0)
continue;
struct shm_object_table *shm_object_table_create(size_t max_nb_obj);
struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
size_t memory_map_size,
- enum shm_object_type type);
+ enum shm_object_type type,
+ const char *shm_path);
struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
int shm_fd, int wakeup_fd, uint32_t stream_nr,
size_t memory_map_size);
*/
#include <stdint.h>
+#include <limits.h>
#include "shm_internal.h"
struct channel;
char *memory_map;
size_t memory_map_size;
uint64_t allocated_len;
+ char shm_path[PATH_MAX];
};
struct shm_object_table {