* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <wrapper/compiler.h>
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend_types.h>
#include <wrapper/ringbuffer/frontend_types.h>
tmp |= offset << SB_ID_OFFSET_SHIFT;
tmp |= SB_ID_NOREF_MASK;
/* Volatile store, read concurrently by readers. */
- ACCESS_ONCE(*id) = tmp;
+ WRITE_ONCE(*id, tmp);
}
}
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
+ id = READ_ONCE(bufb->buf_wsb[idx].id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
if (config->mode == RING_BUFFER_OVERWRITE) {
/*
* Exchange the target writer subbuffer with our own unused
- * subbuffer. No need to use ACCESS_ONCE() here to read the
+ * subbuffer. No need to use READ_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
return 0;
}
+static inline __attribute__((always_inline))
+void lttng_inline_memcpy(void *dest, const void *src,
+ unsigned long len)
+{
+ switch (len) {
+ case 1:
+ *(uint8_t *) dest = *(const uint8_t *) src;
+ break;
+ case 2:
+ *(uint16_t *) dest = *(const uint16_t *) src;
+ break;
+ case 4:
+ *(uint32_t *) dest = *(const uint32_t *) src;
+ break;
+ case 8:
+ *(uint64_t *) dest = *(const uint64_t *) src;
+ break;
+ default:
+ inline_memcpy(dest, src, len);
+ }
+}
+
/*
* Use the architecture-specific memcpy implementation for constant-sized
* inputs, but rely on an inline memcpy for length statically unknown.
if (__builtin_constant_p(len)) \
memcpy(dest, src, __len); \
else \
- inline_memcpy(dest, src, __len); \
+ lttng_inline_memcpy(dest, src, __len); \
} while (0)
/*