The ACCESS_ONCE() macro was removed in kernel 4.15 and should be
replaced by READ_ONCE and WRITE_ONCE which were introduced in kernel
3.19.
This commit replaces all calls to ACCESS_ONCE() with the appropriate
READ_ONCE or WRITE_ONCE and adds compatibility macros for kernels that
have them.
See this upstream commit:
commit
b03a0fe0c5e4b46dcd400d27395b124499554a71
Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Date: Mon Oct 23 14:07:25 2017 -0700
locking/atomics, mm: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't handle comments, leaving references
to ACCESS_ONCE() instances which have been removed. As a preparatory
step, this patch converts the mm code and comments to use
{READ,WRITE}_ONCE() consistently.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
TP_code_pre(
tp_locvar->extract_sensitive_payload =
- ACCESS_ONCE(extract_sensitive_payload);
+ READ_ONCE(extract_sensitive_payload);
),
TP_FIELDS(
TP_code_pre(
tp_locvar->extract_sensitive_payload =
- ACCESS_ONCE(extract_sensitive_payload);
+ READ_ONCE(extract_sensitive_payload);
),
TP_FIELDS(
* Only read source character once, in case it is
* modified concurrently.
*/
- c = ACCESS_ONCE(src[count]);
+ c = READ_ONCE(src[count]);
if (!c)
break;
lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <wrapper/compiler.h>
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend_types.h>
#include <wrapper/ringbuffer/frontend_types.h>
tmp |= offset << SB_ID_OFFSET_SHIFT;
tmp |= SB_ID_NOREF_MASK;
/* Volatile store, read concurrently by readers. */
- ACCESS_ONCE(*id) = tmp;
+ WRITE_ONCE(*id, tmp);
}
}
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
+ id = READ_ONCE(bufb->buf_wsb[idx].id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
if (config->mode == RING_BUFFER_OVERWRITE) {
/*
* Exchange the target writer subbuffer with our own unused
- * subbuffer. No need to use ACCESS_ONCE() here to read the
+ * subbuffer. No need to use READ_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
- int finalized = ACCESS_ONCE(buf->finalized);
+ int finalized = READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
* Perform flush before writing to finalized.
*/
smp_wmb();
- ACCESS_ONCE(buf->finalized) = 1;
+ WRITE_ONCE(buf->finalized, 1);
wake_up_interruptible(&buf->read_wait);
}
} else {
* Perform flush before writing to finalized.
*/
smp_wmb();
- ACCESS_ONCE(buf->finalized) = 1;
+ WRITE_ONCE(buf->finalized, 1);
wake_up_interruptible(&buf->read_wait);
}
- ACCESS_ONCE(chan->finalized) = 1;
+ WRITE_ONCE(chan->finalized, 1);
wake_up_interruptible(&chan->hp_wait);
wake_up_interruptible(&chan->read_wait);
priv = chan->backend.priv;
int finalized;
retry:
- finalized = ACCESS_ONCE(buf->finalized);
+ finalized = READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
return -EBUSY;
}
retry:
- finalized = ACCESS_ONCE(buf->finalized);
+ finalized = READ_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
switch (iter->state) {
case ITER_GET_SUBBUF:
ret = lib_ring_buffer_get_next_subbuf(buf);
- if (ret && !ACCESS_ONCE(buf->finalized)
+ if (ret && !READ_ONCE(buf->finalized)
&& config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
/*
* Use "pull" scheme for global buffers. The reader
goto end;
}
/* set clock */
- ACCESS_ONCE(lttng_trace_clock) = ltc;
+ WRITE_ONCE(lttng_trace_clock, ltc);
lttng_trace_clock_mod = mod;
end:
mutex_unlock(&clock_mutex);
}
WARN_ON_ONCE(lttng_trace_clock_mod != mod);
- ACCESS_ONCE(lttng_trace_clock) = NULL;
+ WRITE_ONCE(lttng_trace_clock, NULL);
lttng_trace_clock_mod = NULL;
end:
mutex_unlock(&clock_mutex);
ret = try_module_get(lttng_trace_clock_mod);
if (!ret) {
printk(KERN_ERR "LTTng-clock cannot get clock plugin module\n");
- ACCESS_ONCE(lttng_trace_clock) = NULL;
+ WRITE_ONCE(lttng_trace_clock, NULL);
lttng_trace_clock_mod = NULL;
}
}
int ret;
mutex_lock(&sessions_mutex);
- ACCESS_ONCE(session->active) = 0;
+ WRITE_ONCE(session->active, 0);
list_for_each_entry(chan, &session->chan, list) {
ret = lttng_syscalls_unregister(chan);
WARN_ON(ret);
lib_ring_buffer_clear_quiescent_channel(chan->chan);
}
- ACCESS_ONCE(session->active) = 1;
- ACCESS_ONCE(session->been_active) = 1;
+ WRITE_ONCE(session->active, 1);
+ WRITE_ONCE(session->been_active, 1);
ret = _lttng_session_metadata_statedump(session);
if (ret) {
- ACCESS_ONCE(session->active) = 0;
+ WRITE_ONCE(session->active, 0);
goto end;
}
ret = lttng_statedump_start(session);
if (ret)
- ACCESS_ONCE(session->active) = 0;
+ WRITE_ONCE(session->active, 0);
end:
mutex_unlock(&sessions_mutex);
return ret;
ret = -EBUSY;
goto end;
}
- ACCESS_ONCE(session->active) = 0;
+ WRITE_ONCE(session->active, 0);
/* Set transient enabler state to "disabled" */
session->tstate = 0;
channel->tstate = 1;
lttng_session_sync_enablers(channel->session);
/* Set atomically the state to "enabled" */
- ACCESS_ONCE(channel->enabled) = 1;
+ WRITE_ONCE(channel->enabled, 1);
end:
mutex_unlock(&sessions_mutex);
return ret;
goto end;
}
/* Set atomically the state to "disabled" */
- ACCESS_ONCE(channel->enabled) = 0;
+ WRITE_ONCE(channel->enabled, 0);
/* Set transient enabler state to "enabled" */
channel->tstate = 0;
lttng_session_sync_enablers(channel->session);
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_FUNCTION:
case LTTNG_KERNEL_NOOP:
- ACCESS_ONCE(event->enabled) = 1;
+ WRITE_ONCE(event->enabled, 1);
break;
case LTTNG_KERNEL_KRETPROBE:
ret = lttng_kretprobes_event_enable_state(event, 1);
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_FUNCTION:
case LTTNG_KERNEL_NOOP:
- ACCESS_ONCE(event->enabled) = 0;
+ WRITE_ONCE(event->enabled, 0);
break;
case LTTNG_KERNEL_KRETPROBE:
ret = lttng_kretprobes_event_enable_state(event, 0);
*/
enabled = enabled && session->tstate && event->chan->tstate;
- ACCESS_ONCE(event->enabled) = enabled;
+ WRITE_ONCE(event->enabled, enabled);
/*
* Sync tracepoint registration with event enabled
* state.
va_list ap;
struct lttng_metadata_stream *stream;
- WARN_ON_ONCE(!ACCESS_ONCE(session->active));
+ WARN_ON_ONCE(!READ_ONCE(session->active));
va_start(ap, fmt);
str = kvasprintf(GFP_KERNEL, fmt, ap);
{
int ret = 0;
- if (event->metadata_dumped || !ACCESS_ONCE(session->active))
+ if (event->metadata_dumped || !READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
return 0;
{
int ret = 0;
- if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
+ if (chan->metadata_dumped || !READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
struct lttng_event *event;
int ret = 0;
- if (!ACCESS_ONCE(session->active))
+ if (!READ_ONCE(session->active))
return 0;
if (session->metadata_dumped)
goto skip_session;
} payload;
int ret;
- if (unlikely(!ACCESS_ONCE(chan->session->active)))
+ if (unlikely(!READ_ONCE(chan->session->active)))
return;
- if (unlikely(!ACCESS_ONCE(chan->enabled)))
+ if (unlikely(!READ_ONCE(chan->enabled)))
return;
- if (unlikely(!ACCESS_ONCE(event->enabled)))
+ if (unlikely(!READ_ONCE(event->enabled)))
return;
lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx,
} payload;
int ret;
- if (unlikely(!ACCESS_ONCE(chan->session->active)))
+ if (unlikely(!READ_ONCE(chan->session->active)))
return;
- if (unlikely(!ACCESS_ONCE(chan->enabled)))
+ if (unlikely(!READ_ONCE(chan->enabled)))
return;
- if (unlikely(!ACCESS_ONCE(event->enabled)))
+ if (unlikely(!READ_ONCE(event->enabled)))
return;
lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx,
int ret;
unsigned long data = (unsigned long) p->addr;
- if (unlikely(!ACCESS_ONCE(chan->session->active)))
+ if (unlikely(!READ_ONCE(chan->session->active)))
return 0;
- if (unlikely(!ACCESS_ONCE(chan->enabled)))
+ if (unlikely(!READ_ONCE(chan->enabled)))
return 0;
- if (unlikely(!ACCESS_ONCE(event->enabled)))
+ if (unlikely(!READ_ONCE(event->enabled)))
return 0;
lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx, sizeof(data),
unsigned long parent_ip;
} payload;
- if (unlikely(!ACCESS_ONCE(chan->session->active)))
+ if (unlikely(!READ_ONCE(chan->session->active)))
return 0;
- if (unlikely(!ACCESS_ONCE(chan->enabled)))
+ if (unlikely(!READ_ONCE(chan->enabled)))
return 0;
- if (unlikely(!ACCESS_ONCE(event->enabled)))
+ if (unlikely(!READ_ONCE(event->enabled)))
return 0;
payload.ip = (unsigned long) krpi->rp->kp.addr;
}
lttng_krp = event->u.kretprobe.lttng_krp;
event_return = lttng_krp->event[EVENT_RETURN];
- ACCESS_ONCE(event->enabled) = enable;
- ACCESS_ONCE(event_return->enabled) = enable;
+ WRITE_ONCE(event->enabled, enable);
+ WRITE_ONCE(event_return->enabled, enable);
return 0;
}
EXPORT_SYMBOL_GPL(lttng_kretprobes_event_enable_state);
\
if (!_TP_SESSION_CHECK(session, __session)) \
return; \
- if (unlikely(!ACCESS_ONCE(__session->active))) \
+ if (unlikely(!READ_ONCE(__session->active))) \
return; \
- if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ if (unlikely(!READ_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ if (unlikely(!READ_ONCE(__event->enabled))) \
return; \
__lpf = lttng_rcu_dereference(__session->pid_tracker); \
if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->tgid))) \
\
if (!_TP_SESSION_CHECK(session, __session)) \
return; \
- if (unlikely(!ACCESS_ONCE(__session->active))) \
+ if (unlikely(!READ_ONCE(__session->active))) \
return; \
- if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ if (unlikely(!READ_ONCE(__chan->enabled))) \
return; \
- if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ if (unlikely(!READ_ONCE(__event->enabled))) \
return; \
__lpf = lttng_rcu_dereference(__session->pid_tracker); \
if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
# endif
#endif
+/*
+ * READ/WRITE_ONCE were introduced in kernel 3.19 and ACCESS_ONCE
+ * was removed in 4.15. Prefer READ/WRITE but fallback to ACCESS
+ * when they are not available.
+ */
+#ifndef READ_ONCE
+# define READ_ONCE(x) ACCESS_ONCE(x)
+#endif
+
+#ifndef WRITE_ONCE
+# define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = val; })
+#endif
+
#endif /* _LTTNG_WRAPPER_COMPILER_H */
#include <asm/local.h>
#include <lttng-kernel-version.h>
#include <lttng-clock.h>
+#include <wrapper/compiler.h>
#include <wrapper/percpu-defs.h>
#include <wrapper/random.h>
static inline u64 trace_clock_read64(void)
{
- struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
if (likely(!ltc)) {
return trace_clock_read64_monotonic();
static inline u64 trace_clock_freq(void)
{
- struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_freq_monotonic();
static inline int trace_clock_uuid(char *uuid)
{
- struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
read_barrier_depends(); /* load ltc before content */
/* Use default UUID cb when NULL */
static inline const char *trace_clock_name(void)
{
- struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_name_monotonic();
static inline const char *trace_clock_description(void)
{
- struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock);
+ struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
if (!ltc) {
return trace_clock_description_monotonic();