* lttng_perf_lock - Protect lttng-ust perf counter data structures
*
* Nests within the ust_lock, and therefore within the libc dl lock.
- * Therefore, we need to fixup the TLS before nesting into this lock.
+ * Therefore, we need to allocate the TLS before nesting into this lock.
* Nests inside RCU bp read-side lock. Protects against concurrent
* fork.
*/
static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * Force a read (imply TLS allocation for dlopen) of TLS variables.
*/
-void lttng_ust_fixup_perf_counter_tls(void)
+void lttng_ust_perf_counter_alloc_tls(void)
{
asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
}
static
size_t perf_counter_get_size(void *priv __attribute__((unused)),
+ struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
size_t offset)
{
size_t size = 0;
static
void perf_counter_record(void *priv,
- struct lttng_ust_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
+ struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
+ struct lttng_ust_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
{
uint64_t value;
static
void perf_counter_get_value(void *priv,
+ struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
struct lttng_ust_ctx_value *value)
{
- value->u.s64 = wrapper_perf_counter_read(priv);
+ value->u.u64 = wrapper_perf_counter_read(priv);
}
/* Called with perf lock held */
lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
lttng_ust_is_signed_type(uint64_t),
- BYTE_ORDER, 10);
+ LTTNG_UST_BYTE_ORDER, 10);
/* Called with UST lock held */
int lttng_add_perf_counter_to_ctx(uint32_t type,