projects
/
lttng-ust.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Remove lttng_ust_synchronize_trace public symbol
[lttng-ust.git]
/
libringbuffer
/
frontend_api.h
diff --git
a/libringbuffer/frontend_api.h
b/libringbuffer/frontend_api.h
index 0868a4bb6dc6a3bdcd5191c7b24ea2cf2476fc2c..f746c98ec88028008779033e8d558c2693fa72fb 100644
(file)
--- a/
libringbuffer/frontend_api.h
+++ b/
libringbuffer/frontend_api.h
@@
-1,34
+1,29
@@
-#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-#define _LINUX_RING_BUFFER_FRONTEND_API_H
-
/*
/*
- * linux/ringbuffer/frontend_api.h
- *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
+ * SPDX-License-Identifier: LGPL-2.1-only
*
*
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
*
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
- *
- * Dual LGPL v2.1/GPL v2 license.
+ * See ring_buffer_frontend.c for more information on wait-free
+ * algorithms.
+ * See frontend.h for channel allocation and read-side API.
*/
*/
-#include "frontend.h"
-#include "ust/core.h"
-#include <urcu-bp.h>
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
+
+#include <stddef.h>
+
#include <urcu/compiler.h>
#include <urcu/compiler.h>
+#include "frontend.h"
+
/**
* lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
*
/**
* lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
*
- *
Grabs RCU read-side lock and keeps a ring buffer nesting count as
- *
supplementary safety net to ensure tracer client code will never
- *
trigger an endless recursion. Returns the processor ID on success,
- *
-EPERM on failure (nesting count
too high).
+ *
Keeps a ring buffer nesting count as supplementary safety net to
+ *
ensure tracer client code will never trigger an endless recursion.
+ *
Returns the processor ID on success, -EPERM on failure (nesting count
+ * too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
@@
-41,15
+36,13
@@
int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi
{
int cpu, nesting;
{
int cpu, nesting;
- rcu_read_lock();
- cpu = ust_get_cpu();
- nesting = ++lib_ring_buffer_nesting; /* TLS */
+ cpu = lttng_ust_get_cpu();
+ nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
cmm_barrier();
if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
- lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock();
+ URCU_TLS(lib_ring_buffer_nesting)--;
return -EPERM;
} else
return cpu;
return -EPERM;
} else
return cpu;
@@
-62,8
+55,7
@@
static inline
void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
- lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock();
+ URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
}
/*
}
/*
@@
-75,6
+67,7
@@
void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf
static inline
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
static inline
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
@@
-101,7
+94,7
@@
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
- before_hdr_pad, ctx);
+ before_hdr_pad, ctx
, client_ctx
);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
@@
-143,7
+136,8
@@
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
static inline
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
static inline
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
{
struct channel *chan = ctx->chan;
struct lttng_ust_shm_handle *handle = ctx->handle;
@@
-151,21
+145,23
@@
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
- if (
uatomic_read(&chan->record_disabled
))
+ if (
caa_unlikely(uatomic_read(&chan->record_disabled)
))
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
buf = shmp(handle, chan->backend.buf[0].shmp);
- if (uatomic_read(&buf->record_disabled))
+ if (caa_unlikely(!buf))
+ return -EIO;
+ if (caa_unlikely(uatomic_read(&buf->record_disabled)))
return -EAGAIN;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
return -EAGAIN;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
- if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx,
client_ctx,
&o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
@@
-196,7
+192,7
@@
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return lib_ring_buffer_reserve_slow(ctx);
+ return lib_ring_buffer_reserve_slow(ctx
, client_ctx
);
}
/**
}
/**
@@
-241,11
+237,16
@@
void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle,
+ buf->commit_hot, endidx);
+
+ if (caa_unlikely(!cc_hot))
+ return;
/*
* Must count record before incrementing the commit count.
*/
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, &buf->backend, endidx, handle);
+ subbuffer_count_record(config,
ctx,
&buf->backend, endidx, handle);
/*
* Order all writes to buffer before the commit count update that will
/*
* Order all writes to buffer before the commit count update that will
@@
-253,7
+254,7
@@
void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
*/
cmm_smp_wmb();
*/
cmm_smp_wmb();
- v_add(config, ctx->slot_size, &
shmp_index(handle, buf->commit_hot, endidx)
->cc);
+ v_add(config, ctx->slot_size, &
cc_hot
->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
/*
* commit count read can race with concurrent OOO commit count updates.
@@
-273,17
+274,16
@@
void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &
shmp_index(handle, buf->commit_hot, endidx)
->cc);
+ commit_count = v_read(config, &
cc_hot
->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle);
+ commit_count, endidx, handle
, ctx->tsc
);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- ctx->buf_offset, commit_count,
- ctx->slot_size, handle);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offset_end, commit_count, handle, cc_hot);
}
/**
}
/**
@@
-352,4
+352,4
@@
void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config
uatomic_dec(&buf->record_disabled);
}
uatomic_dec(&buf->record_disabled);
}
-#endif /* _L
INUX
_RING_BUFFER_FRONTEND_API_H */
+#endif /* _L
TTNG
_RING_BUFFER_FRONTEND_API_H */
This page took
0.026461 seconds
and
4
git commands to generate.