int a;
};
+static int no_writer_delay;
+
static struct test_array *test_rcu_pointer;
static unsigned long duration;
}
}
+/*
+ * malloc/free are reusing memory areas too quickly, which does not let us
+ * test races appropriately. Use a large circular array for allocations.
+ * ARRAY_SIZE is larger than NR_WRITE, which insures we never run over our tail.
+ */
+#define ARRAY_SIZE (1048576 * NR_WRITE)
+#define ARRAY_POISON 0xDEADBEEF
+static int array_index;
+static struct test_array test_array[ARRAY_SIZE];
+
+static struct test_array *test_array_alloc(void)
+{
+ struct test_array *ret;
+ int index;
+
+ rcu_copy_mutex_lock();
+ index = array_index % ARRAY_SIZE;
+ assert(test_array[index].a == ARRAY_POISON ||
+ test_array[index].a == 0);
+ ret = &test_array[index];
+ array_index++;
+ if (array_index == ARRAY_SIZE)
+ array_index = 0;
+ rcu_copy_mutex_unlock();
+ return ret;
+}
+
+static void test_array_free(struct test_array *ptr)
+{
+ if (!ptr)
+ return;
+ rcu_copy_mutex_lock();
+ ptr->a = ARRAY_POISON;
+ rcu_copy_mutex_unlock();
+}
+
void *thr_reader(void *arg)
{
struct test_array *local_ptr;
for (;;) {
rcu_read_lock();
local_ptr = rcu_dereference(test_rcu_pointer);
+ debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
rcu_read_unlock();
"writer", pthread_self(), (unsigned long)gettid());
for (;;) {
- new = malloc(sizeof(struct test_array));
+ new = test_array_alloc();
rcu_copy_mutex_lock();
old = test_rcu_pointer;
if (old)
/* can be done after unlock */
if (old)
old->a = 0;
- free(old);
+ test_array_free(old);
if (!test_duration())
break;
- usleep(1);
+ if (!no_writer_delay)
+ usleep(1);
}
printf("thread_end %s, thread id : %lx, tid %lu\n",
#ifdef DEBUG_YIELD
printf(" [-r] [-w] (yield reader and/or writer)");
#endif
+ printf(" [-n] (disable writer delay)");
printf("\n");
}
return -1;
}
-#ifdef DEBUG_YIELD
for (i = 2; i < argc; i++) {
if (argv[i][0] != '-')
continue;
switch (argv[i][1]) {
+#ifdef DEBUG_YIELD
case 'r':
yield_active |= YIELD_READ;
break;
case 'w':
yield_active |= YIELD_WRITE;
break;
+#endif
+ case 'n':
+ no_writer_delay = 1;
+ break;
}
}
-#endif
printf("running test for %lu seconds.\n", duration);
start_time = time(NULL);
if (err != 0)
exit(1);
}
- free(test_rcu_pointer);
+ test_array_free(test_rcu_pointer);
return 0;
}
urcu_gp_ctr ^= RCU_GP_CTR_BIT;
}
+#ifdef DEBUG_FULL_MB
+static void force_mb_all_threads(void)
+{
+ mb();
+}
+#else
static void force_mb_all_threads(void)
{
struct reader_data *index;
mb(); /* read sig_done before ending the barrier */
debug_yield_write();
}
+#endif
void wait_for_quiescent_state(void)
{
internal_urcu_unlock();
}
+#ifndef DEBUG_FULL_MB
void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
mb();
assert(act.sa_sigaction == sigurcu_handler);
free(reader_data);
}
+#endif
#include <sched.h>
#include <time.h>
#include <pthread.h>
+#include <unistd.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
+/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
+#ifdef DEBUG_FULL_MB
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+#else
+#define MAX_SLEEP 30000
+#endif
+
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
- sched_yield();
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
- sched_yield();
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
}
#endif
+#ifdef DEBUG_FULL_MB
+static inline void read_barrier()
+{
+ mb();
+}
+#else
+static inline void read_barrier()
+{
+ barrier();
+}
+#endif
+
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- barrier();
+ read_barrier();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
- barrier();
+ read_barrier();
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.