struct lib_counter_config *config = &counter->config;
struct lib_counter_layout *layout;
- if (cpu < 0 || cpu >= num_possible_cpus())
+ if (cpu < 0 || cpu >= get_possible_cpus_array_len())
return -EINVAL;
if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
int nr_counter_cpu_fds,
const int *counter_cpu_fds)
{
- int nr_cpus = num_possible_cpus();
+ int nr_cpus = get_possible_cpus_array_len();
if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
WARN_ON_ONCE(1);
size_t dimension, nr_elem = 1;
int cpu, ret;
int nr_handles = 0;
- int nr_cpus = num_possible_cpus();
+ int nr_cpus = get_possible_cpus_array_len();
if (validate_args(config, nr_dimensions, max_nr_elem,
global_sum_step, global_counter_fd, nr_counter_cpu_fds,
struct lib_counter_layout *layout;
int shm_fd;
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -1;
layout = &counter->percpu_counters[cpu];
shm_fd = layout->shm_fd;
switch (config->alloc) {
case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= num_possible_cpus())
+ if (cpu < 0 || cpu >= get_possible_cpus_array_len())
return -EINVAL;
layout = &counter->percpu_counters[cpu];
break;
case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
if (cpu >= 0) {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
layout = &counter->percpu_counters[cpu];
} else {
switch (config->alloc) {
case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= num_possible_cpus())
+ if (cpu < 0 || cpu >= get_possible_cpus_array_len())
return -EINVAL;
layout = &counter->percpu_counters[cpu];
break;
case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
if (cpu >= 0) {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
layout = &counter->percpu_counters[cpu];
} else {
int64_t blocking_timeout_ms;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- nr_streams = num_possible_cpus();
+ nr_streams = get_possible_cpus_array_len();
else
nr_streams = 1;
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
if (!handle->table)
goto error_table_alloc;
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len());
if (!handle->table)
goto error_table_alloc;
/* Add channel object */
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return NULL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
#define __max(a,b) ((a)>(b)?(a):(b))
-static int num_possible_cpus_cache;
+static int possible_cpus_array_len_cache;
/*
* As a fallback to parsing the CPU mask in "/sys/devices/system/cpu/possible",
}
/*
- * Get the number of CPUs from the possible cpu mask.
+ * Get the highest CPU id from a CPU mask.
*
* pmask: the mask to parse.
* len: the len of the mask excluding '\0'.
*
- * Returns the number of possible CPUs from the mask or 0 on error.
+ * Returns the highest CPU id from the mask or -1 on error.
*/
-int get_num_possible_cpus_from_mask(const char *pmask, size_t len)
+int get_max_cpuid_from_mask(const char *pmask, size_t len)
{
ssize_t i;
unsigned long cpu_index;
* CPUs.
*/
if ((&pmask[i] != endptr) && (cpu_index < INT_MAX))
- return (int) cpu_index + 1;
+ return (int) cpu_index;
error:
- return 0;
+ return -1;
}
-static void _get_num_possible_cpus(void)
+static void update_possible_cpus_array_len_cache(void)
{
int ret;
char buf[LTTNG_UST_CPUMASK_SIZE];
goto fallback;
/* Parse the possible cpu mask, on failure fallback to sysconf. */
- ret = get_num_possible_cpus_from_mask((char *) &buf, ret);
- if (ret > 0)
+ ret = get_max_cpuid_from_mask((char *) &buf, ret);
+ if (ret >= 0) {
+ /* Add 1 to convert from max cpuid to an array len. */
+ ret++;
goto end;
+ }
fallback:
/* Fallback to sysconf. */
if (ret < 1)
return;
- num_possible_cpus_cache = ret;
+ possible_cpus_array_len_cache = ret;
}
/*
- * Returns the total number of CPUs in the system. If the cache is not yet
- * initialized, get the value from "/sys/devices/system/cpu/possible" or
- * fallback to sysconf and cache it.
+ * Returns the length of an array that could contain a per-CPU element for each
+ * possible CPU id for the lifetime of the process.
+ *
+ * We currently assume CPU ids are contiguous up the maximum CPU id.
+ *
+ * If the cache is not yet initialized, get the value from
+ * "/sys/devices/system/cpu/possible" or fallback to sysconf and cache it.
*
* If all methods fail, don't populate the cache and return 0.
*/
-int num_possible_cpus(void)
+int get_possible_cpus_array_len(void)
{
- if (caa_unlikely(!num_possible_cpus_cache))
- _get_num_possible_cpus();
+ if (caa_unlikely(!possible_cpus_array_len_cache))
+ update_possible_cpus_array_len_cache();
- return num_possible_cpus_cache;
+ return possible_cpus_array_len_cache;
}
__attribute__((visibility("hidden")));
/*
- * Get the number of CPUs from the possible cpu mask.
+ * Get the highest CPU id from a CPU mask.
*
* pmask: the mask to parse.
* len: the len of the mask excluding '\0'.
*
- * Returns the number of possible CPUs from the mask or 0 on error.
+ * Returns the highest CPU id from the mask or -1 on error.
*/
-int get_num_possible_cpus_from_mask(const char *pmask, size_t len)
+int get_max_cpuid_from_mask(const char *pmask, size_t len)
__attribute__((visibility("hidden")));
/*
- * Returns the total number of CPUs in the system. If the cache is not yet
- * initialized, get the value from "/sys/devices/system/cpu/possible" or
- * fallback to sysconf and cache it.
+ * Returns the length of an array that could contain a per-CPU element for each
+ * possible CPU id for the lifetime of the process.
+ *
+ * We currently assume CPU ids are contiguous up the maximum CPU id.
+ *
+ * If the cache is not yet initialized, get the value from
+ * "/sys/devices/system/cpu/possible" or fallback to sysconf and cache it.
*
* If all methods fail, don't populate the cache and return 0.
*/
-int num_possible_cpus(void)
+int get_possible_cpus_array_len(void)
__attribute__((visibility("hidden")));
#define for_each_possible_cpu(cpu) \
- for ((cpu) = 0; (cpu) < num_possible_cpus(); (cpu)++)
+ for ((cpu) = 0; (cpu) < get_possible_cpus_array_len(); (cpu)++)
#endif /* _UST_COMMON_SMP_H */
int lttng_ust_ctl_get_nr_stream_per_channel(void)
{
- return num_possible_cpus();
+ return get_possible_cpus_array_len();
}
struct lttng_ust_ctl_consumer_channel *
int lttng_ust_ctl_get_nr_cpu_per_counter(void)
{
- return num_possible_cpus();
+ return get_possible_cpus_array_len();
}
struct lttng_ust_ctl_daemon_counter *
};
static struct parse_test_data parse_test_data[] = {
- { "", 0 },
- { "abc", 0 },
- { ",,,", 0 },
- { "--", 0 },
- { ",", 0 },
- { "-", 0 },
- { "2147483647", 0 },
- { "18446744073709551615", 0 },
- { "0-2147483647", 0 },
- { "0-18446744073709551615", 0 },
- { "0", 1 },
- { "1", 2 },
- { "0-1", 2 },
- { "1-3", 4 },
- { "0,2", 3 },
- { "1,2", 3 },
- { "0,4-6,127", 128 },
- { "0-4095", 4096 },
+ { "", -1 },
+ { "abc", -1 },
+ { ",,,", -1 },
+ { "--", -1 },
+ { ",", -1 },
+ { "-", -1 },
+ { "2147483647", -1 },
+ { "18446744073709551615", -1 },
+ { "0-2147483647", -1 },
+ { "0-18446744073709551615", -1 },
+ { "0", 0 },
+ { "1", 1 },
+ { "0-1", 1 },
+ { "1-3", 3 },
+ { "0,2", 2 },
+ { "1,2", 2 },
+ { "0,4-6,127", 127 },
+ { "0-4095", 4095 },
- { "\n", 0 },
- { "abc\n", 0 },
- { ",,,\n", 0 },
- { "--\n", 0 },
- { ",\n", 0 },
- { "-\n", 0 },
- { "2147483647\n", 0 },
- { "18446744073709551615\n", 0 },
- { "0-2147483647\n", 0 },
- { "0-18446744073709551615\n", 0 },
- { "0\n", 1 },
- { "1\n", 2 },
- { "0-1\n", 2 },
- { "1-3\n", 4 },
- { "0,2\n", 3 },
- { "1,2\n", 3 },
- { "0,4-6,127\n", 128 },
- { "0-4095\n", 4096 },
+ { "\n", -1 },
+ { "abc\n", -1 },
+ { ",,,\n", -1 },
+ { "--\n", -1 },
+ { ",\n", -1 },
+ { "-\n", -1 },
+ { "2147483647\n", -1 },
+ { "18446744073709551615\n", -1 },
+ { "0-2147483647\n", -1 },
+ { "0-18446744073709551615\n", -1 },
+ { "0\n", 0 },
+ { "1\n", 1 },
+ { "0-1\n", 1 },
+ { "1-3\n", 3 },
+ { "0,2\n", 2 },
+ { "1,2\n", 2 },
+ { "0,4-6,127\n", 127 },
+ { "0-4095\n", 4095 },
};
static int parse_test_data_len = sizeof(parse_test_data) / sizeof(parse_test_data[0]);
diag("Testing smp helpers");
for (i = 0; i < parse_test_data_len; i++) {
- ret = get_num_possible_cpus_from_mask(parse_test_data[i].buf,
+ ret = get_max_cpuid_from_mask(parse_test_data[i].buf,
strlen(parse_test_data[i].buf));
ok(ret == parse_test_data[i].expected,
- "get_num_possible_cpus_from_mask '%s', expected: '%d', result: '%d'",
+ "get_max_cpuid_from_mask '%s', expected: '%d', result: '%d'",
parse_test_data[i].buf, parse_test_data[i].expected, ret);
}
- ok(num_possible_cpus() > 0, "num_possible_cpus (%d > 0)", num_possible_cpus());
+ ok(get_possible_cpus_array_len() > 0, "get_possible_cpus_array_len (%d > 0)", get_possible_cpus_array_len());
return exit_status();
}