NR_READERS=$((${NUM_CPUS} - ${NR_WRITERS}))
for BATCH_SIZE in ${BATCH_ARRAY}; do
echo "./runtests-batch.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d 0 -b ${BATCH_SIZE} ${EXTRA_OPTS} | tee -a batch-rcu.log" >> runall.log
- ./runtests-batch.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d 0 -b ${BATCH_SIZE} ${EXTRA_OPTS} | tee -a batch-rcu.log
+ (./runtests-batch.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d 0 -b ${BATCH_SIZE} ${EXTRA_OPTS} | tee -a batch-rcu.log) || exit 1
done
#setting gc each 32768. ** UPDATE FOR YOUR ARCHITECTURE BASED ON TEST ABOVE **
NR_READERS=$((${NUM_CPUS} - ${NR_WRITERS}))
for WDELAY in ${WDELAY_ARRAY}; do
echo "./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d ${WDELAY} ${EXTRA_OPTS} | tee -a update-fraction.log" >> runall.log
- ./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d ${WDELAY} ${EXTRA_OPTS} | tee -a update-fraction.log
+ (./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} -d ${WDELAY} ${EXTRA_OPTS} | tee -a update-fraction.log) || exit 1
done
#Test scalability :
for NR_READERS in $(seq 1 ${NUM_CPUS}); do
echo "./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS}| tee -a scalability.log" >> runall.log
- ./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS}| tee -a scalability.log
+ (./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS}| tee -a scalability.log) || exit 1
done
for READERCSLEN in ${READERCSLEN_ARRAY}; do
echo "./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS} -c ${READERCSLEN} | tee -a readercslen.log" >> runall.log
- ./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS} -c ${READERCSLEN} | tee -a readercslen.log
+ (./runtests.sh ${NR_READERS} ${NR_WRITERS} ${DURATION} ${EXTRA_OPTS} -c ${READERCSLEN} | tee -a readercslen.log) || exit 1
done
echo Executing multi-flavor RCU test
-./test_urcu_multiflavor
-./test_urcu_multiflavor_dynlink
+./test_urcu_multiflavor || exit 1
+./test_urcu_multiflavor_dynlink || exit 1
echo Executing Hash table test
-./runhash.sh
+./runhash.sh || exit 1
# rw test, single key, replace and del randomly, 4 threads, auto resize.
# key range: init, lookup, and update: 0 to 0
-${TESTPROG} 0 $((4*${THREAD_MUL})) ${TIME_UNITS} -A -s -M 1 -N 1 -O 1 ${EXTRA_PARAMS}
+${TESTPROG} 0 $((4*${THREAD_MUL})) ${TIME_UNITS} -A -s -M 1 -N 1 -O 1 ${EXTRA_PARAMS} || exit 1
# rw test, single key, add unique and del randomly, 4 threads, auto resize.
# key range: init, lookup, and update: 0 to 0
-${TESTPROG} 0 $((4*${THREAD_MUL})) ${TIME_UNITS} -A -u -M 1 -N 1 -O 1 ${EXTRA_PARAMS}
+${TESTPROG} 0 $((4*${THREAD_MUL})) ${TIME_UNITS} -A -u -M 1 -N 1 -O 1 ${EXTRA_PARAMS} || exit 1
# rw test, single key, replace and del randomly, 2 lookup threads, 2 update threads, auto resize.
# key range: init, lookup, and update: 0 to 0
-${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -s -M 1 -N 1 -O 1 ${EXTRA_PARAMS}
+${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -s -M 1 -N 1 -O 1 ${EXTRA_PARAMS} || exit 1
# rw test, single key, add and del randomly, 2 lookup threads, 2 update threads, auto resize.
# key range: init, lookup, and update: 0 to 0
-${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -M 1 -N 1 -O 1 ${EXTRA_PARAMS}
+${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -M 1 -N 1 -O 1 ${EXTRA_PARAMS} || exit 1
# ** test updates vs lookups with default table
# rw test, 2 lookup, 2 update threads, add and del randomly, auto resize.
# max 1048576 buckets
# key range: init, lookup, and update: 0 to 999999
-${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A ${EXTRA_PARAMS}
+${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add_replace and del randomly, auto resize.
# max 1048576 buckets
# key range: init, lookup, and update: 0 to 999999
-${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -s ${EXTRA_PARAMS}
+${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -s ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add_unique and del randomly, auto resize.
# max 1048576 buckets
# key range: init, lookup, and update: 0 to 999999
-${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -u ${EXTRA_PARAMS}
+${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -u ${EXTRA_PARAMS} || exit 1
# test memory management backends
# key range: init, lookup, and update: 0 to 99999999
# mm backend: "order"
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -m 1 -n 1048576 -i \
- -M 100000000 -N 100000000 -O 100000000 -B order ${EXTRA_PARAMS}
+ -M 100000000 -N 100000000 -O 100000000 -B order ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add only, auto resize.
# max buckets: 1048576
# key range: init, lookup, and update: 0 to 99999999
# mm backend: "chunk"
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -m 1 -n 1048576 -i \
- -M 100000000 -N 100000000 -O 100000000 -B chunk ${EXTRA_PARAMS}
+ -M 100000000 -N 100000000 -O 100000000 -B chunk ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add only, auto resize.
# max buckets: 1048576
# key range: init, lookup, and update: 0 to 99999999
# mm backend: "mmap"
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A -m 1 -n 1048576 -i \
- -M 100000000 -N 100000000 -O 100000000 -B mmap ${EXTRA_PARAMS}
+ -M 100000000 -N 100000000 -O 100000000 -B mmap ${EXTRA_PARAMS} || exit 1
# ** key range tests
# NOTE: reader threads in this test should never have a successful
# lookup. TODO
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -R 1000000 ${EXTRA_PARAMS}
+ -R 1000000 ${EXTRA_PARAMS} || exit 1
# ** small key range
# max 1048576 buckets
# key range: init, update, and lookups: 0 to 9
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -M 10 -N 10 -O 10 ${EXTRA_PARAMS}
+ -M 10 -N 10 -O 10 ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add_unique and del randomly, auto resize.
# max 1048576 buckets
# key range: init, update, and lookups: 0 to 9
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -M 10 -N 10 -O 10 -u ${EXTRA_PARAMS}
+ -M 10 -N 10 -O 10 -u ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add_replace and del randomly, auto resize.
# max 1048576 buckets
# key range: init, update, and lookups: 0 to 9
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -M 10 -N 10 -O 10 -s ${EXTRA_PARAMS}
+ -M 10 -N 10 -O 10 -s ${EXTRA_PARAMS} || exit 1
# ** lookup for known keys
# NOTE: reader threads in this test should always have successful
# lookups. TODO
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -M 10 -N 10 -O 10 -R 0 -T 0 -S 10 -k 10 -s ${EXTRA_PARAMS}
+ -M 10 -N 10 -O 10 -R 0 -T 0 -S 10 -k 10 -s ${EXTRA_PARAMS} || exit 1
# ** Uniqueness test
# asserts that no duplicates are observed by reader threads
# standard length hash chains
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -U ${EXTRA_PARAMS}
+ -U ${EXTRA_PARAMS} || exit 1
# rw test, 2 lookup, 2 update threads, add_unique, add_replace and del randomly, auto resize.
# max 1048576 buckets
# asserts that no duplicates are observed by reader threads
# create long hash chains: using modulo 4 on keys as hash
${TESTPROG} $((2*${THREAD_MUL})) $((2*${THREAD_MUL})) ${TIME_UNITS} -A \
- -U -C 4 ${EXTRA_PARAMS}
+ -U -C 4 ${EXTRA_PARAMS} || exit 1