+#if (__BYTE_ORDER == __LITTLE_ENDIAN)
+#undef _ctf_sequence_bitfield
+#define _ctf_sequence_bitfield(_type, _item, _src, \
+ _length_type, _src_length, \
+ _user, _nowrite) \
+ { \
+ _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
+ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
+ } \
+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
+ if (_user) { \
+ __chan->ops->event_write_from_user(&__ctx, _src, \
+ sizeof(_type) * __get_dynamic_len(dest)); \
+ } else { \
+ __chan->ops->event_write(&__ctx, _src, \
+ sizeof(_type) * __get_dynamic_len(dest)); \
+ }
+#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
+/*
+ * For big endian, we need to byteswap into little endian.
+ */
+#undef _ctf_sequence_bitfield
+#define _ctf_sequence_bitfield(_type, _item, _src, \
+ _length_type, _src_length, \
+ _user, _nowrite) \
+ { \
+ _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
+ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
+ } \
+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
+ { \
+ size_t _i, _length; \
+ \
+ _length = __get_dynamic_len(dest); \
+ for (_i = 0; _i < _length; _i++) { \
+ _type _tmp; \
+ \
+ if (_user) { \
+ if (get_user(_tmp, (_type *) _src + _i)) \
+ _tmp = 0; \
+ } else { \
+ _tmp = ((_type *) _src)[_i]; \
+ } \
+ switch (sizeof(_type)) { \
+ case 1: \
+ break; \
+ case 2: \
+ _tmp = cpu_to_le16(_tmp); \
+ break; \
+ case 4: \
+ _tmp = cpu_to_le32(_tmp); \
+ break; \
+ case 8: \
+ _tmp = cpu_to_le64(_tmp); \
+ break; \
+ default: \
+ BUG_ON(1); \
+ } \
+ __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
+ } \
+ }
+#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
+