See upstream commits:
commit
76ebbe78f7390aee075a7f3768af197ded1bdfbb
Author: Will Deacon <will@kernel.org>
Date: Tue Oct 24 11:22:47 2017 +0100
locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE()
In preparation for the removal of lockless_dereference(), which is the
same as READ_ONCE() on all architectures other than Alpha, add an
implicit smp_read_barrier_depends() to READ_ONCE() so that it can be
used to head dependency chains on all architectures.
commit
76ebbe78f7390aee075a7f3768af197ded1bdfbb
Author: Will Deacon <will.deacon@arm.com>
Date: Tue Oct 24 11:22:47 2017 +0100
locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE()
In preparation for the removal of lockless_dereference(), which is the
same as READ_ONCE() on all architectures other than Alpha, add an
implicit smp_read_barrier_depends() to READ_ONCE() so that it can be
used to head dependency chains on all architectures.
Change-Id: Ife8880bd9378dca2972da8838f40fc35ccdfaaac
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
TP_code_pre(
tp_locvar->extract_sensitive_payload =
- READ_ONCE(extract_sensitive_payload);
+ LTTNG_READ_ONCE(extract_sensitive_payload);
),
TP_FIELDS(
TP_code_pre(
tp_locvar->extract_sensitive_payload =
- READ_ONCE(extract_sensitive_payload);
+ LTTNG_READ_ONCE(extract_sensitive_payload);
),
TP_FIELDS(
* Only read source character once, in case it is
* modified concurrently.
*/
- c = READ_ONCE(src[count]);
+ c = LTTNG_READ_ONCE(src[count]);
if (!c)
break;
lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = READ_ONCE(bufb->buf_wsb[idx].id);
+ id = LTTNG_READ_ONCE(bufb->buf_wsb[idx].id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
#define for_each_channel_cpu(cpu, chan) \
for ((cpu) = -1; \
({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
- smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
+ smp_rmb(); (cpu) < nr_cpu_ids; });)
extern struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
- int finalized = READ_ONCE(buf->finalized);
+ int finalized = LTTNG_READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
int finalized;
retry:
- finalized = READ_ONCE(buf->finalized);
+ finalized = LTTNG_READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
return -EBUSY;
}
retry:
- finalized = READ_ONCE(buf->finalized);
+ finalized = LTTNG_READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
switch (iter->state) {
case ITER_GET_SUBBUF:
ret = lib_ring_buffer_get_next_subbuf(buf);
- if (ret && !READ_ONCE(buf->finalized)
+ if (ret && !LTTNG_READ_ONCE(buf->finalized)
&& config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
/*
* Use "pull" scheme for global buffers. The reader
size_t len;
va_list ap;
- WARN_ON_ONCE(!READ_ONCE(session->active));
+ WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
va_start(ap, fmt);
str = kvasprintf(GFP_KERNEL, fmt, ap);
{
int ret = 0;
- if (event->metadata_dumped || !READ_ONCE(session->active))
+ if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
return 0;
{
int ret = 0;
- if (chan->metadata_dumped || !READ_ONCE(session->active))
+ if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
struct lttng_event *event;
int ret = 0;
- if (!READ_ONCE(session->active))
+ if (!LTTNG_READ_ONCE(session->active))
return 0;
lttng_metadata_begin(session);
int ret;
unsigned long data = (unsigned long) p->addr;
- if (unlikely(!READ_ONCE(chan->session->active)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->session->active)))
return 0;
- if (unlikely(!READ_ONCE(chan->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->enabled)))
return 0;
- if (unlikely(!READ_ONCE(event->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(event->enabled)))
return 0;
lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx, sizeof(data),
unsigned long parent_ip;
} payload;
- if (unlikely(!READ_ONCE(chan->session->active)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->session->active)))
return 0;
- if (unlikely(!READ_ONCE(chan->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->enabled)))
return 0;
- if (unlikely(!READ_ONCE(event->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(event->enabled)))
return 0;
payload.ip = (unsigned long) krpi->rp->kp.addr;
\
if (!_TP_SESSION_CHECK(session, __session)) \
return; \
- if (unlikely(!READ_ONCE(__session->active))) \
+ if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
return; \
- if (unlikely(!READ_ONCE(__chan->enabled))) \
+ if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!READ_ONCE(__event->enabled))) \
+ if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
return; \
__lf = lttng_rcu_dereference(__session->pid_tracker.p); \
if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
\
if (!_TP_SESSION_CHECK(session, __session)) \
return; \
- if (unlikely(!READ_ONCE(__session->active))) \
+ if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
return; \
- if (unlikely(!READ_ONCE(__chan->enabled))) \
+ if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!READ_ONCE(__event->enabled))) \
+ if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
return; \
__lf = lttng_rcu_dereference(__session->pid_tracker.p); \
if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
unsigned long ip;
} payload;
- if (unlikely(!READ_ONCE(chan->session->active)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->session->active)))
return 0;
- if (unlikely(!READ_ONCE(chan->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(chan->enabled)))
return 0;
- if (unlikely(!READ_ONCE(event->enabled)))
+ if (unlikely(!LTTNG_READ_ONCE(event->enabled)))
return 0;
lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx,
#define _LTTNG_WRAPPER_COMPILER_H
#include <linux/compiler.h>
+#include <linux/version.h>
/*
* Don't allow compiling with buggy compiler.
# define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = val; })
#endif
+/*
+ * In v4.15 a smp read barrier was added to READ_ONCE to replace
+ * lockless_dereference(), replicate this behavior on prior kernels
+ * and remove calls to smp_read_barrier_depends which was dropped
+ * in v5.9.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
+#define LTTNG_READ_ONCE(x) READ_ONCE(x)
+#else
+#define LTTNG_READ_ONCE(x) \
+({ \
+ typeof(x) __val = READ_ONCE(x); \
+ smp_read_barrier_depends(); \
+ __val; \
+})
+#endif
+
#endif /* _LTTNG_WRAPPER_COMPILER_H */
static inline u64 trace_clock_read64(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (likely(!ltc)) {
return trace_clock_read64_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->read64();
}
}
static inline u64 trace_clock_freq(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_freq_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->freq();
}
}
static inline int trace_clock_uuid(char *uuid)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
- read_barrier_depends(); /* load ltc before content */
/* Use default UUID cb when NULL */
if (!ltc || !ltc->uuid) {
return trace_clock_uuid_monotonic(uuid);
static inline const char *trace_clock_name(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_name_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->name();
}
}
static inline const char *trace_clock_description(void)
{
- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_description_monotonic();
} else {
- read_barrier_depends(); /* load ltc before content */
return ltc->description();
}
}