From: Mathieu Desnoyers Date: Sun, 8 Feb 2009 22:10:22 +0000 (-0500) Subject: Change API X-Git-Tag: v0.1~317 X-Git-Url: https://git.lttng.org./?a=commitdiff_plain;h=c265818b951224bcde407b1efa14f9daf44949b3;p=urcu.git Change API * rcu_read_lock > A patch below to allow nested rcu_read_lock() while keeping to the Linux > kernel API, just FYI. One can argue that the overhead of accessing the > extra per-thread variables is offset by the fact that there no longer > needs to be a return value from rcu_read_lock() nor an argument to > rcu_read_unlock(), but hard to say. > I ran your modified version within my benchmarks : with return value : 14.164 cycles per read without return value : 16.4017 cycles per read So we have a 14% performance decrease due to this. We also pollute the branch prediction buffer and we add a cache access due to the added variables in the TLS. Returning the value has the clear advantage of letting the compiler keep it around in registers or on the stack, which clearly costs less. So I think the speed factor outweights the visual considerations. Maybe we could switch to something like : unsigned int qparity; urcu_read_lock(&qparity); ... urcu_read_unlock(&qparity); That would be a bit like local_irq_save() in the kernel, except that we could do it in a static inline because we pass the address. I personnally dislike the local_irq_save() way of hiding the fact that it writes to the variable in a "clever" macro. I'd really prefer to leave the " & ". * rcu_write_lock Removed. Signed-off-by: Mathieu Desnoyers --- diff --git a/test_urcu.c b/test_urcu.c index 7752e6e..f6be45b 100644 --- a/test_urcu.c +++ b/test_urcu.c @@ -47,6 +47,28 @@ static struct test_array *test_rcu_pointer; #define NR_READ 10 #define NR_WRITE 9 +pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER; + +void rcu_copy_mutex_lock(void) +{ + int ret; + ret = pthread_mutex_lock(&rcu_copy_mutex); + if (ret) { + perror("Error in pthread mutex lock"); + exit(-1); + } +} + +void rcu_copy_mutex_unlock(void) +{ + int ret; + + ret = pthread_mutex_unlock(&rcu_copy_mutex); + if (ret) { + perror("Error in pthread mutex unlock"); + exit(-1); + } +} void *thr_reader(void *arg) { @@ -61,14 +83,14 @@ void *thr_reader(void *arg) for (i = 0; i < 100000; i++) { for (j = 0; j < 100000000; j++) { - qparity = rcu_read_lock(); + rcu_read_lock(&qparity); local_ptr = rcu_dereference(test_rcu_pointer); if (local_ptr) { assert(local_ptr->a == 8); assert(local_ptr->b == 12); assert(local_ptr->c[55] == 2); } - rcu_read_unlock(qparity); + rcu_read_unlock(&qparity); } } @@ -89,7 +111,7 @@ void *thr_writer(void *arg) for (i = 0; i < 10000000; i++) { new = malloc(sizeof(struct test_array)); - rcu_write_lock(); + rcu_copy_mutex_lock(); old = test_rcu_pointer; if (old) { assert(old->a == 8); @@ -100,7 +122,7 @@ void *thr_writer(void *arg) new->b = 12; new->a = 8; old = urcu_publish_content((void **)&test_rcu_pointer, new); - rcu_write_unlock(); + rcu_copy_mutex_unlock(); /* can be done after unlock */ if (old) { old->a = 0; diff --git a/test_urcu_timing.c b/test_urcu_timing.c index 6161192..57fda4f 100644 --- a/test_urcu_timing.c +++ b/test_urcu_timing.c @@ -52,6 +52,29 @@ static inline cycles_t get_cycles (void) #include "urcu.h" +pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER; + +void rcu_copy_mutex_lock(void) +{ + int ret; + ret = pthread_mutex_lock(&rcu_copy_mutex); + if (ret) { + perror("Error in pthread mutex lock"); + exit(-1); + } +} + +void rcu_copy_mutex_unlock(void) +{ + int ret; + + ret = pthread_mutex_unlock(&rcu_copy_mutex); + if (ret) { + perror("Error in pthread mutex unlock"); + exit(-1); + } +} + struct test_array { int a; }; @@ -84,12 +107,12 @@ void *thr_reader(void *arg) time1 = get_cycles(); for (i = 0; i < OUTER_READ_LOOP; i++) { for (j = 0; j < INNER_READ_LOOP; j++) { - qparity = rcu_read_lock(); + rcu_read_lock(&qparity); local_ptr = rcu_dereference(test_rcu_pointer); if (local_ptr) { assert(local_ptr->a == 8); } - rcu_read_unlock(qparity); + rcu_read_unlock(&qparity); } } time2 = get_cycles(); @@ -116,14 +139,14 @@ void *thr_writer(void *arg) for (i = 0; i < WRITE_LOOP; i++) { new = malloc(sizeof(struct test_array)); - rcu_write_lock(); + rcu_copy_mutex_lock(); old = test_rcu_pointer; if (old) { assert(old->a == 8); } new->a = 8; old = urcu_publish_content((void **)&test_rcu_pointer, new); - rcu_write_unlock(); + rcu_copy_mutex_unlock(); /* can be done after unlock */ if (old) { old->a = 0; diff --git a/urcu.c b/urcu.c index 1a276ce..08fb75d 100644 --- a/urcu.c +++ b/urcu.c @@ -36,7 +36,7 @@ static struct reader_data *reader_data; static int num_readers, alloc_readers; static int sig_done; -void rcu_write_lock(void) +void internal_urcu_lock(void) { int ret; ret = pthread_mutex_lock(&urcu_mutex); @@ -46,7 +46,7 @@ void rcu_write_lock(void) } } -void rcu_write_unlock(void) +void internal_urcu_unlock(void) { int ret; @@ -130,10 +130,10 @@ static void switch_qparity(void) void synchronize_rcu(void) { - rcu_write_lock(); + internal_urcu_lock(); switch_qparity(); switch_qparity(); - rcu_write_unlock(); + internal_urcu_lock(); } /* @@ -144,6 +144,7 @@ void *urcu_publish_content(void **ptr, void *new) { void *oldptr; + internal_urcu_lock(); /* * We can publish the new pointer before we change the current qparity. * Readers seeing the new pointer while being in the previous qparity @@ -159,6 +160,7 @@ void *urcu_publish_content(void **ptr, void *new) switch_qparity(); switch_qparity(); + internal_urcu_unlock(); return oldptr; } @@ -213,16 +215,16 @@ void urcu_remove_reader(pthread_t id) void urcu_register_thread(void) { - rcu_write_lock(); + internal_urcu_lock(); urcu_add_reader(pthread_self()); - rcu_write_unlock(); + internal_urcu_unlock(); } void urcu_unregister_thread(void) { - rcu_write_lock(); + internal_urcu_lock(); urcu_remove_reader(pthread_self()); - rcu_write_unlock(); + internal_urcu_unlock(); } void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) diff --git a/urcu.h b/urcu.h index 9431da5..b6b5c7b 100644 --- a/urcu.h +++ b/urcu.h @@ -77,33 +77,29 @@ static inline int get_urcu_qparity(void) } /* - * returns urcu_parity. + * urcu_parity should be declared on the caller's stack. */ -static inline int rcu_read_lock(void) +static inline void rcu_read_lock(int *urcu_parity) { - int urcu_parity = get_urcu_qparity(); - urcu_active_readers[urcu_parity]++; + *urcu_parity = get_urcu_qparity(); + urcu_active_readers[*urcu_parity]++; /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ barrier(); - return urcu_parity; } -static inline void rcu_read_unlock(int urcu_parity) +static inline void rcu_read_unlock(int *urcu_parity) { barrier(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers[urcu_parity]--; + urcu_active_readers[*urcu_parity]--; } -extern void rcu_write_lock(void); -extern void rcu_write_unlock(void); - extern void *urcu_publish_content(void **ptr, void *new); /*