Add Intel ipi urcu model run results
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 3 Jun 2009 14:15:45 +0000 (10:15 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 3 Jun 2009 14:15:45 +0000 (10:15 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
38 files changed:
formal-model/results/urcu-controldataflow-ipi-intel/.input.spin [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/DEFINES [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/Makefile [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/asserts.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/asserts.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/references.txt [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu.sh [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu.spin [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.ltl [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_nested.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input.trail [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input.trail [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input.trail [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress.ltl [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.define [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.log [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input [new file with mode: 0644]
formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input.trail [new file with mode: 0644]

diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/.input.spin b/formal-model/results/urcu-controldataflow-ipi-intel/.input.spin
new file mode 100644 (file)
index 0000000..8afab6a
--- /dev/null
@@ -0,0 +1,1249 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/DEFINES b/formal-model/results/urcu-controldataflow-ipi-intel/DEFINES
new file mode 100644 (file)
index 0000000..abea5ff
--- /dev/null
@@ -0,0 +1,18 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/Makefile b/formal-model/results/urcu-controldataflow-ipi-intel/Makefile
new file mode 100644 (file)
index 0000000..de47dff
--- /dev/null
@@ -0,0 +1,170 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Mathieu Desnoyers, 2009
+#
+# Authors: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+#CFLAGS=-DSAFETY
+#for multi-core verif, 15.5GB shared mem, use files if full
+#CFLAGS=-DHASH64 -DMEMLIM=15500 -DNCORE=2
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88 -DMEMLIM=15500 -DNCORE=8
+
+#liveness
+#CFLAGS=-DHASH64 -DCOLLAPSE -DMA=88
+CFLAGS=-DHASH64
+
+SPINFILE=urcu.spin
+
+default:
+       make urcu_free | tee urcu_free.log
+       make urcu_free_no_mb | tee urcu_free_no_mb.log
+       make urcu_free_no_rmb | tee urcu_free_no_rmb.log
+       make urcu_free_no_wmb | tee urcu_free_no_wmb.log
+       make urcu_free_single_flip | tee urcu_free_single_flip.log
+       make urcu_progress_writer | tee urcu_progress_writer.log
+       make urcu_progress_reader | tee urcu_progress_reader.log
+       make urcu_progress_writer_error | tee urcu_progress_writer_error.log
+       make asserts | tee asserts.log
+       make summary
+
+#show trail : spin -v -t -N pan.ltl input.spin
+# after each individual make.
+
+summary:
+       @echo
+       @echo "Verification summary"
+       @grep errors: *.log
+
+asserts: clean
+       cat DEFINES > .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X .input.spin
+       gcc -O2 -w ${CFLAGS} -DSAFETY -o pan pan.c
+       ./pan -v -c1 -X -m10000000 -w20
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free: clean urcu_free_ltl run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested: clean urcu_free_ltl urcu_free_nested_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_nested_define:
+       cp urcu_free_nested.define .input.define
+
+urcu_free_no_rmb: clean urcu_free_ltl urcu_free_no_rmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_rmb_define:
+       cp urcu_free_no_rmb.define .input.define
+
+urcu_free_no_wmb: clean urcu_free_ltl urcu_free_no_wmb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_wmb_define:
+       cp urcu_free_no_wmb.define .input.define
+
+urcu_free_no_mb: clean urcu_free_ltl urcu_free_no_mb_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_no_mb_define:
+       cp urcu_free_no_mb.define .input.define
+
+urcu_free_single_flip: clean urcu_free_ltl urcu_free_single_flip_define run
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_free_single_flip_define:
+       cp urcu_free_single_flip.define .input.define
+
+urcu_free_ltl:
+       touch .input.define
+       cat .input.define >> pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+
+# Progress checks
+
+urcu_progress_writer: clean urcu_progress_writer_ltl \
+               urcu_progress_writer_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_define:
+       cp urcu_progress_writer.define .input.define
+
+urcu_progress_writer_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_reader: clean urcu_progress_reader_ltl \
+               urcu_progress_reader_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_reader_define:
+       cp urcu_progress_reader.define .input.define
+
+urcu_progress_reader_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+urcu_progress_writer_error: clean urcu_progress_writer_error_ltl \
+               urcu_progress_writer_error_define run_weak_fair
+       cp .input.spin $@.spin.input
+       -cp .input.spin.trail $@.spin.input.trail
+
+urcu_progress_writer_error_define:
+       cp urcu_progress_writer_error.define .input.define
+
+urcu_progress_writer_error_ltl:
+       touch .input.define
+       cat .input.define > pan.ltl
+       cat DEFINES >> pan.ltl
+       spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+
+
+run_weak_fair: pan
+       ./pan -a -f -v -c1 -X -m10000000 -w20
+
+run: pan
+       ./pan -a -v -c1 -X -m10000000 -w20
+
+pan: pan.c
+       gcc -O2 -w ${CFLAGS} -o pan pan.c
+
+pan.c: pan.ltl ${SPINFILE}
+       cat .input.define > .input.spin
+       cat DEFINES >> .input.spin
+       cat ${SPINFILE} >> .input.spin
+       rm -f .input.spin.trail
+       spin -a -X -N pan.ltl .input.spin
+
+.PHONY: clean default distclean summary
+clean:
+       rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+distclean:
+       rm -f *.trail *.input *.log
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/asserts.log b/formal-model/results/urcu-controldataflow-ipi-intel/asserts.log
new file mode 100644 (file)
index 0000000..ddccacb
--- /dev/null
@@ -0,0 +1,473 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+cat DEFINES > .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w20
+Depth=    5407 States=    1e+06 Transitions= 5.87e+06 Memory=   542.717        t=   11.7 R=   9e+04
+Depth=    5407 States=    2e+06 Transitions= 1.64e+07 Memory=   618.986        t=   35.3 R=   6e+04
+Depth=    5407 States=    3e+06 Transitions= 3.29e+07 Memory=   695.256        t=   74.2 R=   4e+04
+pan: resizing hashtable to -w22..  done
+Depth=    5407 States=    4e+06 Transitions= 4.33e+07 Memory=   802.647        t=   97.9 R=   4e+04
+Depth=    5407 States=    5e+06 Transitions=  5.2e+07 Memory=   878.916        t=    117 R=   4e+04
+Depth=    5407 States=    6e+06 Transitions= 5.99e+07 Memory=   955.186        t=    134 R=   4e+04
+Depth=    5407 States=    7e+06 Transitions= 7.13e+07 Memory=  1031.455        t=    160 R=   4e+04
+Depth=    5407 States=    8e+06 Transitions= 8.71e+07 Memory=  1107.822        t=    198 R=   4e+04
+Depth=    5407 States=    9e+06 Transitions= 9.81e+07 Memory=  1184.092        t=    223 R=   4e+04
+pan: resizing hashtable to -w24..  done
+Depth=    5407 States=    1e+07 Transitions= 1.07e+08 Memory=  1384.455        t=    244 R=   4e+04
+Depth=    5407 States=  1.1e+07 Transitions= 1.14e+08 Memory=  1460.725        t=    260 R=   4e+04
+Depth=    5407 States=  1.2e+07 Transitions= 1.27e+08 Memory=  1536.994        t=    288 R=   4e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             - (none specified)
+       assertion violations    +
+       cycle checks            - (disabled by -DSAFETY)
+       invalid end states      +
+
+State-vector 72 byte, depth reached 5407, errors: 0
+ 12141896 states, stored
+1.1623987e+08 states, matched
+1.2838176e+08 transitions (= stored+matched)
+1.9407117e+09 atomic steps
+hash conflicts:  98957892 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1157.941      equivalent memory usage for states (stored*(State-vector + overhead))
+  962.219      actual memory usage for states (compression: 83.10%)
+               state-vector as stored = 55 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1547.834      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 261, ".input.spin", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 77, "(1)"
+       line 242, ".input.spin", state 85, "(1)"
+       line 246, ".input.spin", state 97, "(1)"
+       line 250, ".input.spin", state 105, "(1)"
+       line 400, ".input.spin", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 195, "(1)"
+       line 246, ".input.spin", state 215, "(1)"
+       line 250, ".input.spin", state 223, "(1)"
+       line 679, ".input.spin", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 400, ".input.spin", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 313, "(1)"
+       line 246, ".input.spin", state 333, "(1)"
+       line 250, ".input.spin", state 341, "(1)"
+       line 400, ".input.spin", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 424, "(1)"
+       line 246, ".input.spin", state 444, "(1)"
+       line 250, ".input.spin", state 452, "(1)"
+       line 400, ".input.spin", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, ".input.spin", state 475, "(1)"
+       line 400, ".input.spin", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, ".input.spin", state 476, "else"
+       line 400, ".input.spin", state 479, "(1)"
+       line 404, ".input.spin", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 489, "(1)"
+       line 404, ".input.spin", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, ".input.spin", state 490, "else"
+       line 404, ".input.spin", state 493, "(1)"
+       line 404, ".input.spin", state 494, "(1)"
+       line 404, ".input.spin", state 494, "(1)"
+       line 402, ".input.spin", state 499, "((i<1))"
+       line 402, ".input.spin", state 499, "((i>=1))"
+       line 409, ".input.spin", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 507, "(1)"
+       line 409, ".input.spin", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, ".input.spin", state 508, "else"
+       line 409, ".input.spin", state 511, "(1)"
+       line 409, ".input.spin", state 512, "(1)"
+       line 409, ".input.spin", state 512, "(1)"
+       line 413, ".input.spin", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 521, "(1)"
+       line 413, ".input.spin", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, ".input.spin", state 522, "else"
+       line 413, ".input.spin", state 525, "(1)"
+       line 413, ".input.spin", state 526, "(1)"
+       line 413, ".input.spin", state 526, "(1)"
+       line 411, ".input.spin", state 531, "((i<2))"
+       line 411, ".input.spin", state 531, "((i>=2))"
+       line 238, ".input.spin", state 537, "(1)"
+       line 242, ".input.spin", state 545, "(1)"
+       line 242, ".input.spin", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, ".input.spin", state 546, "else"
+       line 240, ".input.spin", state 551, "((i<1))"
+       line 240, ".input.spin", state 551, "((i>=1))"
+       line 246, ".input.spin", state 557, "(1)"
+       line 246, ".input.spin", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, ".input.spin", state 558, "else"
+       line 250, ".input.spin", state 565, "(1)"
+       line 250, ".input.spin", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, ".input.spin", state 566, "else"
+       line 248, ".input.spin", state 571, "((i<2))"
+       line 248, ".input.spin", state 571, "((i>=2))"
+       line 255, ".input.spin", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, ".input.spin", state 575, "else"
+       line 420, ".input.spin", state 577, "(1)"
+       line 420, ".input.spin", state 577, "(1)"
+       line 679, ".input.spin", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 679, ".input.spin", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 679, ".input.spin", state 582, "(1)"
+       line 400, ".input.spin", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 653, "(1)"
+       line 246, ".input.spin", state 673, "(1)"
+       line 250, ".input.spin", state 681, "(1)"
+       line 400, ".input.spin", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, ".input.spin", state 709, "(1)"
+       line 400, ".input.spin", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, ".input.spin", state 710, "else"
+       line 400, ".input.spin", state 713, "(1)"
+       line 404, ".input.spin", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 723, "(1)"
+       line 404, ".input.spin", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, ".input.spin", state 724, "else"
+       line 404, ".input.spin", state 727, "(1)"
+       line 404, ".input.spin", state 728, "(1)"
+       line 404, ".input.spin", state 728, "(1)"
+       line 402, ".input.spin", state 733, "((i<1))"
+       line 402, ".input.spin", state 733, "((i>=1))"
+       line 409, ".input.spin", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 741, "(1)"
+       line 409, ".input.spin", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, ".input.spin", state 742, "else"
+       line 409, ".input.spin", state 745, "(1)"
+       line 409, ".input.spin", state 746, "(1)"
+       line 409, ".input.spin", state 746, "(1)"
+       line 413, ".input.spin", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 755, "(1)"
+       line 413, ".input.spin", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, ".input.spin", state 756, "else"
+       line 413, ".input.spin", state 759, "(1)"
+       line 413, ".input.spin", state 760, "(1)"
+       line 413, ".input.spin", state 760, "(1)"
+       line 411, ".input.spin", state 765, "((i<2))"
+       line 411, ".input.spin", state 765, "((i>=2))"
+       line 238, ".input.spin", state 771, "(1)"
+       line 242, ".input.spin", state 779, "(1)"
+       line 242, ".input.spin", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, ".input.spin", state 780, "else"
+       line 240, ".input.spin", state 785, "((i<1))"
+       line 240, ".input.spin", state 785, "((i>=1))"
+       line 246, ".input.spin", state 791, "(1)"
+       line 246, ".input.spin", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, ".input.spin", state 792, "else"
+       line 250, ".input.spin", state 799, "(1)"
+       line 250, ".input.spin", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, ".input.spin", state 800, "else"
+       line 255, ".input.spin", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, ".input.spin", state 809, "else"
+       line 420, ".input.spin", state 811, "(1)"
+       line 420, ".input.spin", state 811, "(1)"
+       line 400, ".input.spin", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, ".input.spin", state 820, "(1)"
+       line 400, ".input.spin", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, ".input.spin", state 821, "else"
+       line 400, ".input.spin", state 824, "(1)"
+       line 404, ".input.spin", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 834, "(1)"
+       line 404, ".input.spin", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, ".input.spin", state 835, "else"
+       line 404, ".input.spin", state 838, "(1)"
+       line 404, ".input.spin", state 839, "(1)"
+       line 404, ".input.spin", state 839, "(1)"
+       line 402, ".input.spin", state 844, "((i<1))"
+       line 402, ".input.spin", state 844, "((i>=1))"
+       line 409, ".input.spin", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 852, "(1)"
+       line 409, ".input.spin", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, ".input.spin", state 853, "else"
+       line 409, ".input.spin", state 856, "(1)"
+       line 409, ".input.spin", state 857, "(1)"
+       line 409, ".input.spin", state 857, "(1)"
+       line 413, ".input.spin", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 866, "(1)"
+       line 413, ".input.spin", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, ".input.spin", state 867, "else"
+       line 413, ".input.spin", state 870, "(1)"
+       line 413, ".input.spin", state 871, "(1)"
+       line 413, ".input.spin", state 871, "(1)"
+       line 411, ".input.spin", state 876, "((i<2))"
+       line 411, ".input.spin", state 876, "((i>=2))"
+       line 238, ".input.spin", state 882, "(1)"
+       line 242, ".input.spin", state 890, "(1)"
+       line 242, ".input.spin", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, ".input.spin", state 891, "else"
+       line 240, ".input.spin", state 896, "((i<1))"
+       line 240, ".input.spin", state 896, "((i>=1))"
+       line 246, ".input.spin", state 902, "(1)"
+       line 246, ".input.spin", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, ".input.spin", state 903, "else"
+       line 250, ".input.spin", state 910, "(1)"
+       line 250, ".input.spin", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, ".input.spin", state 911, "else"
+       line 248, ".input.spin", state 916, "((i<2))"
+       line 248, ".input.spin", state 916, "((i>=2))"
+       line 255, ".input.spin", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, ".input.spin", state 920, "else"
+       line 420, ".input.spin", state 922, "(1)"
+       line 420, ".input.spin", state 922, "(1)"
+       line 687, ".input.spin", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 400, ".input.spin", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 995, "(1)"
+       line 246, ".input.spin", state 1015, "(1)"
+       line 250, ".input.spin", state 1023, "(1)"
+       line 400, ".input.spin", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1109, "(1)"
+       line 246, ".input.spin", state 1129, "(1)"
+       line 250, ".input.spin", state 1137, "(1)"
+       line 400, ".input.spin", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1224, "(1)"
+       line 246, ".input.spin", state 1244, "(1)"
+       line 250, ".input.spin", state 1252, "(1)"
+       line 400, ".input.spin", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1335, "(1)"
+       line 246, ".input.spin", state 1355, "(1)"
+       line 250, ".input.spin", state 1363, "(1)"
+       line 400, ".input.spin", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1451, "(1)"
+       line 246, ".input.spin", state 1471, "(1)"
+       line 250, ".input.spin", state 1479, "(1)"
+       line 400, ".input.spin", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1562, "(1)"
+       line 246, ".input.spin", state 1582, "(1)"
+       line 250, ".input.spin", state 1590, "(1)"
+       line 400, ".input.spin", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1676, "(1)"
+       line 246, ".input.spin", state 1696, "(1)"
+       line 250, ".input.spin", state 1704, "(1)"
+       line 726, ".input.spin", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 400, ".input.spin", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1794, "(1)"
+       line 246, ".input.spin", state 1814, "(1)"
+       line 250, ".input.spin", state 1822, "(1)"
+       line 400, ".input.spin", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1905, "(1)"
+       line 246, ".input.spin", state 1925, "(1)"
+       line 250, ".input.spin", state 1933, "(1)"
+       line 400, ".input.spin", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, ".input.spin", state 1956, "(1)"
+       line 400, ".input.spin", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, ".input.spin", state 1957, "else"
+       line 400, ".input.spin", state 1960, "(1)"
+       line 404, ".input.spin", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 1970, "(1)"
+       line 404, ".input.spin", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, ".input.spin", state 1971, "else"
+       line 404, ".input.spin", state 1974, "(1)"
+       line 404, ".input.spin", state 1975, "(1)"
+       line 404, ".input.spin", state 1975, "(1)"
+       line 402, ".input.spin", state 1980, "((i<1))"
+       line 402, ".input.spin", state 1980, "((i>=1))"
+       line 409, ".input.spin", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 1988, "(1)"
+       line 409, ".input.spin", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, ".input.spin", state 1989, "else"
+       line 409, ".input.spin", state 1992, "(1)"
+       line 409, ".input.spin", state 1993, "(1)"
+       line 409, ".input.spin", state 1993, "(1)"
+       line 413, ".input.spin", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 2002, "(1)"
+       line 413, ".input.spin", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, ".input.spin", state 2003, "else"
+       line 413, ".input.spin", state 2006, "(1)"
+       line 413, ".input.spin", state 2007, "(1)"
+       line 413, ".input.spin", state 2007, "(1)"
+       line 411, ".input.spin", state 2012, "((i<2))"
+       line 411, ".input.spin", state 2012, "((i>=2))"
+       line 238, ".input.spin", state 2018, "(1)"
+       line 242, ".input.spin", state 2026, "(1)"
+       line 242, ".input.spin", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, ".input.spin", state 2027, "else"
+       line 240, ".input.spin", state 2032, "((i<1))"
+       line 240, ".input.spin", state 2032, "((i>=1))"
+       line 246, ".input.spin", state 2038, "(1)"
+       line 246, ".input.spin", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, ".input.spin", state 2039, "else"
+       line 250, ".input.spin", state 2046, "(1)"
+       line 250, ".input.spin", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, ".input.spin", state 2047, "else"
+       line 248, ".input.spin", state 2052, "((i<2))"
+       line 248, ".input.spin", state 2052, "((i>=2))"
+       line 255, ".input.spin", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, ".input.spin", state 2056, "else"
+       line 420, ".input.spin", state 2058, "(1)"
+       line 420, ".input.spin", state 2058, "(1)"
+       line 726, ".input.spin", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 726, ".input.spin", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 726, ".input.spin", state 2063, "(1)"
+       line 400, ".input.spin", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 2134, "(1)"
+       line 246, ".input.spin", state 2154, "(1)"
+       line 250, ".input.spin", state 2162, "(1)"
+       line 400, ".input.spin", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 2251, "(1)"
+       line 246, ".input.spin", state 2271, "(1)"
+       line 250, ".input.spin", state 2279, "(1)"
+       line 400, ".input.spin", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 2362, "(1)"
+       line 246, ".input.spin", state 2382, "(1)"
+       line 250, ".input.spin", state 2390, "(1)"
+       line 238, ".input.spin", state 2421, "(1)"
+       line 246, ".input.spin", state 2441, "(1)"
+       line 250, ".input.spin", state 2449, "(1)"
+       line 238, ".input.spin", state 2464, "(1)"
+       line 246, ".input.spin", state 2484, "(1)"
+       line 250, ".input.spin", state 2492, "(1)"
+       line 886, ".input.spin", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 400, ".input.spin", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 82, "(1)"
+       line 242, ".input.spin", state 90, "(1)"
+       line 246, ".input.spin", state 102, "(1)"
+       line 261, ".input.spin", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 400, ".input.spin", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 404, ".input.spin", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 257, "(1)"
+       line 242, ".input.spin", state 265, "(1)"
+       line 246, ".input.spin", state 277, "(1)"
+       line 250, ".input.spin", state 285, "(1)"
+       line 404, ".input.spin", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, ".input.spin", state 378, "(1)"
+       line 246, ".input.spin", state 390, "(1)"
+       line 250, ".input.spin", state 398, "(1)"
+       line 404, ".input.spin", state 440, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 458, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 472, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, ".input.spin", state 498, "(1)"
+       line 246, ".input.spin", state 510, "(1)"
+       line 250, ".input.spin", state 518, "(1)"
+       line 404, ".input.spin", state 551, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 569, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 583, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, ".input.spin", state 609, "(1)"
+       line 246, ".input.spin", state 621, "(1)"
+       line 250, ".input.spin", state 629, "(1)"
+       line 404, ".input.spin", state 664, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, ".input.spin", state 682, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, ".input.spin", state 696, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, ".input.spin", state 722, "(1)"
+       line 246, ".input.spin", state 734, "(1)"
+       line 250, ".input.spin", state 742, "(1)"
+       line 261, ".input.spin", state 795, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 804, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 819, "(1)"
+       line 273, ".input.spin", state 826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 842, "(1)"
+       line 242, ".input.spin", state 850, "(1)"
+       line 246, ".input.spin", state 862, "(1)"
+       line 250, ".input.spin", state 870, "(1)"
+       line 261, ".input.spin", state 901, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 910, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 923, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 948, "(1)"
+       line 242, ".input.spin", state 956, "(1)"
+       line 246, ".input.spin", state 968, "(1)"
+       line 250, ".input.spin", state 976, "(1)"
+       line 265, ".input.spin", state 1002, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1015, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1024, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1040, "(1)"
+       line 242, ".input.spin", state 1048, "(1)"
+       line 246, ".input.spin", state 1060, "(1)"
+       line 250, ".input.spin", state 1068, "(1)"
+       line 261, ".input.spin", state 1099, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 1108, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1121, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1130, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1146, "(1)"
+       line 242, ".input.spin", state 1154, "(1)"
+       line 246, ".input.spin", state 1166, "(1)"
+       line 250, ".input.spin", state 1174, "(1)"
+       line 265, ".input.spin", state 1200, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1213, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1222, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1238, "(1)"
+       line 242, ".input.spin", state 1246, "(1)"
+       line 246, ".input.spin", state 1258, "(1)"
+       line 250, ".input.spin", state 1266, "(1)"
+       line 261, ".input.spin", state 1297, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 1306, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1319, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1344, "(1)"
+       line 242, ".input.spin", state 1352, "(1)"
+       line 246, ".input.spin", state 1364, "(1)"
+       line 250, ".input.spin", state 1372, "(1)"
+       line 265, ".input.spin", state 1398, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1411, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1420, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1436, "(1)"
+       line 242, ".input.spin", state 1444, "(1)"
+       line 246, ".input.spin", state 1456, "(1)"
+       line 250, ".input.spin", state 1464, "(1)"
+       line 261, ".input.spin", state 1495, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, ".input.spin", state 1504, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, ".input.spin", state 1517, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, ".input.spin", state 1526, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, ".input.spin", state 1542, "(1)"
+       line 242, ".input.spin", state 1550, "(1)"
+       line 246, ".input.spin", state 1562, "(1)"
+       line 250, ".input.spin", state 1570, "(1)"
+       line 1213, ".input.spin", state 1586, "-end-"
+       (103 of 1586 states)
+unreached in proctype :init:
+       (0 of 78 states)
+
+pan: elapsed time 291 seconds
+pan: rate 41688.913 states/second
+pan: avg transition delay 2.2686e-06 usec
+cp .input.spin asserts.spin.input
+cp .input.spin.trail asserts.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/asserts.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/asserts.spin.input
new file mode 100644 (file)
index 0000000..8afab6a
--- /dev/null
@@ -0,0 +1,1249 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/references.txt b/formal-model/results/urcu-controldataflow-ipi-intel/references.txt
new file mode 100644 (file)
index 0000000..72c67a2
--- /dev/null
@@ -0,0 +1,13 @@
+http://spinroot.com/spin/Man/ltl.html
+http://en.wikipedia.org/wiki/Linear_temporal_logic
+http://www.dcs.gla.ac.uk/~muffy/MRS4-2002/lect11.ppt
+
+http://www.lsv.ens-cachan.fr/~gastin/ltl2ba/index.php
+http://spinroot.com/spin/Man/index.html
+http://spinroot.com/spin/Man/promela.html
+
+LTL vs CTL :
+
+http://spinroot.com/spin/Doc/course/lecture12.pdf p. 9, p. 15, p. 18
+http://www-i2.informatik.rwth-aachen.de/i2/fileadmin/user_upload/documents/Introduction_to_Model_Checking/mc_lec18.pdf
+  (downloaded)
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu.sh b/formal-model/results/urcu-controldataflow-ipi-intel/urcu.sh
new file mode 100644 (file)
index 0000000..65ff517
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Compiles and runs the urcu.spin Promela model.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) IBM Corporation, 2009
+#               Mathieu Desnoyers, 2009
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+#          Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+
+# Basic execution, without LTL clauses. See Makefile.
+
+spin -a urcu.spin
+cc -DSAFETY -o pan pan.c
+./pan -v -c1 -X -m10000000 -w21
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu.spin b/formal-model/results/urcu-controldataflow-ipi-intel/urcu.spin
new file mode 100644 (file)
index 0000000..dd5d17d
--- /dev/null
@@ -0,0 +1,1231 @@
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.log
new file mode 100644 (file)
index 0000000..357c5a6
--- /dev/null
@@ -0,0 +1,484 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1271)
+Depth=    6767 States=    1e+06 Transitions= 5.89e+06 Memory=   550.432        t=   14.3 R=   7e+04
+Depth=    6767 States=    2e+06 Transitions= 1.64e+07 Memory=   634.318        t=     43 R=   5e+04
+Depth=    6767 States=    3e+06 Transitions= 3.29e+07 Memory=   718.303        t=     90 R=   3e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6767 States=    4e+06 Transitions= 4.33e+07 Memory=   833.311        t=    119 R=   3e+04
+Depth=    6767 States=    5e+06 Transitions= 5.21e+07 Memory=   917.295        t=    142 R=   4e+04
+Depth=    6767 States=    6e+06 Transitions= 5.99e+07 Memory=  1001.279        t=    163 R=   4e+04
+Depth=    6767 States=    7e+06 Transitions= 7.13e+07 Memory=  1085.264        t=    195 R=   4e+04
+Depth=    6767 States=    8e+06 Transitions= 8.71e+07 Memory=  1169.151        t=    240 R=   3e+04
+Depth=    6767 States=    9e+06 Transitions= 9.81e+07 Memory=  1253.135        t=    270 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    6767 States=    1e+07 Transitions= 1.07e+08 Memory=  1461.115        t=    296 R=   3e+04
+Depth=    6767 States=  1.1e+07 Transitions= 1.15e+08 Memory=  1545.100        t=    315 R=   3e+04
+Depth=    6767 States=  1.2e+07 Transitions= 1.27e+08 Memory=  1629.084        t=    349 R=   3e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6767, errors: 0
+ 12141896 states, stored
+1.1627209e+08 states, matched
+1.2841399e+08 transitions (= stored+matched)
+1.9407117e+09 atomic steps
+hash conflicts:  98962276 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1343.212      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1055.903      actual memory usage for states (compression: 78.61%)
+               state-vector as stored = 63 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1640.998      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 261, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 77, "(1)"
+       line 242, "pan.___", state 85, "(1)"
+       line 246, "pan.___", state 97, "(1)"
+       line 250, "pan.___", state 105, "(1)"
+       line 400, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 195, "(1)"
+       line 246, "pan.___", state 215, "(1)"
+       line 250, "pan.___", state 223, "(1)"
+       line 679, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 400, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 313, "(1)"
+       line 246, "pan.___", state 333, "(1)"
+       line 250, "pan.___", state 341, "(1)"
+       line 400, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 424, "(1)"
+       line 246, "pan.___", state 444, "(1)"
+       line 250, "pan.___", state 452, "(1)"
+       line 400, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, "pan.___", state 475, "(1)"
+       line 400, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, "pan.___", state 476, "else"
+       line 400, "pan.___", state 479, "(1)"
+       line 404, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 489, "(1)"
+       line 404, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, "pan.___", state 490, "else"
+       line 404, "pan.___", state 493, "(1)"
+       line 404, "pan.___", state 494, "(1)"
+       line 404, "pan.___", state 494, "(1)"
+       line 402, "pan.___", state 499, "((i<1))"
+       line 402, "pan.___", state 499, "((i>=1))"
+       line 409, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 507, "(1)"
+       line 409, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, "pan.___", state 508, "else"
+       line 409, "pan.___", state 511, "(1)"
+       line 409, "pan.___", state 512, "(1)"
+       line 409, "pan.___", state 512, "(1)"
+       line 413, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 521, "(1)"
+       line 413, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, "pan.___", state 522, "else"
+       line 413, "pan.___", state 525, "(1)"
+       line 413, "pan.___", state 526, "(1)"
+       line 413, "pan.___", state 526, "(1)"
+       line 411, "pan.___", state 531, "((i<2))"
+       line 411, "pan.___", state 531, "((i>=2))"
+       line 238, "pan.___", state 537, "(1)"
+       line 242, "pan.___", state 545, "(1)"
+       line 242, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, "pan.___", state 546, "else"
+       line 240, "pan.___", state 551, "((i<1))"
+       line 240, "pan.___", state 551, "((i>=1))"
+       line 246, "pan.___", state 557, "(1)"
+       line 246, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, "pan.___", state 558, "else"
+       line 250, "pan.___", state 565, "(1)"
+       line 250, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, "pan.___", state 566, "else"
+       line 248, "pan.___", state 571, "((i<2))"
+       line 248, "pan.___", state 571, "((i>=2))"
+       line 255, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, "pan.___", state 575, "else"
+       line 420, "pan.___", state 577, "(1)"
+       line 420, "pan.___", state 577, "(1)"
+       line 679, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 679, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 679, "pan.___", state 582, "(1)"
+       line 400, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 653, "(1)"
+       line 246, "pan.___", state 673, "(1)"
+       line 250, "pan.___", state 681, "(1)"
+       line 400, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, "pan.___", state 709, "(1)"
+       line 400, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, "pan.___", state 710, "else"
+       line 400, "pan.___", state 713, "(1)"
+       line 404, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 723, "(1)"
+       line 404, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, "pan.___", state 724, "else"
+       line 404, "pan.___", state 727, "(1)"
+       line 404, "pan.___", state 728, "(1)"
+       line 404, "pan.___", state 728, "(1)"
+       line 402, "pan.___", state 733, "((i<1))"
+       line 402, "pan.___", state 733, "((i>=1))"
+       line 409, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 741, "(1)"
+       line 409, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, "pan.___", state 742, "else"
+       line 409, "pan.___", state 745, "(1)"
+       line 409, "pan.___", state 746, "(1)"
+       line 409, "pan.___", state 746, "(1)"
+       line 413, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 755, "(1)"
+       line 413, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, "pan.___", state 756, "else"
+       line 413, "pan.___", state 759, "(1)"
+       line 413, "pan.___", state 760, "(1)"
+       line 413, "pan.___", state 760, "(1)"
+       line 411, "pan.___", state 765, "((i<2))"
+       line 411, "pan.___", state 765, "((i>=2))"
+       line 238, "pan.___", state 771, "(1)"
+       line 242, "pan.___", state 779, "(1)"
+       line 242, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, "pan.___", state 780, "else"
+       line 240, "pan.___", state 785, "((i<1))"
+       line 240, "pan.___", state 785, "((i>=1))"
+       line 246, "pan.___", state 791, "(1)"
+       line 246, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, "pan.___", state 792, "else"
+       line 250, "pan.___", state 799, "(1)"
+       line 250, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, "pan.___", state 800, "else"
+       line 255, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, "pan.___", state 809, "else"
+       line 420, "pan.___", state 811, "(1)"
+       line 420, "pan.___", state 811, "(1)"
+       line 400, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, "pan.___", state 820, "(1)"
+       line 400, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, "pan.___", state 821, "else"
+       line 400, "pan.___", state 824, "(1)"
+       line 404, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 834, "(1)"
+       line 404, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, "pan.___", state 835, "else"
+       line 404, "pan.___", state 838, "(1)"
+       line 404, "pan.___", state 839, "(1)"
+       line 404, "pan.___", state 839, "(1)"
+       line 402, "pan.___", state 844, "((i<1))"
+       line 402, "pan.___", state 844, "((i>=1))"
+       line 409, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 852, "(1)"
+       line 409, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, "pan.___", state 853, "else"
+       line 409, "pan.___", state 856, "(1)"
+       line 409, "pan.___", state 857, "(1)"
+       line 409, "pan.___", state 857, "(1)"
+       line 413, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 866, "(1)"
+       line 413, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, "pan.___", state 867, "else"
+       line 413, "pan.___", state 870, "(1)"
+       line 413, "pan.___", state 871, "(1)"
+       line 413, "pan.___", state 871, "(1)"
+       line 411, "pan.___", state 876, "((i<2))"
+       line 411, "pan.___", state 876, "((i>=2))"
+       line 238, "pan.___", state 882, "(1)"
+       line 242, "pan.___", state 890, "(1)"
+       line 242, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, "pan.___", state 891, "else"
+       line 240, "pan.___", state 896, "((i<1))"
+       line 240, "pan.___", state 896, "((i>=1))"
+       line 246, "pan.___", state 902, "(1)"
+       line 246, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, "pan.___", state 903, "else"
+       line 250, "pan.___", state 910, "(1)"
+       line 250, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, "pan.___", state 911, "else"
+       line 248, "pan.___", state 916, "((i<2))"
+       line 248, "pan.___", state 916, "((i>=2))"
+       line 255, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, "pan.___", state 920, "else"
+       line 420, "pan.___", state 922, "(1)"
+       line 420, "pan.___", state 922, "(1)"
+       line 687, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 400, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 995, "(1)"
+       line 246, "pan.___", state 1015, "(1)"
+       line 250, "pan.___", state 1023, "(1)"
+       line 400, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1109, "(1)"
+       line 246, "pan.___", state 1129, "(1)"
+       line 250, "pan.___", state 1137, "(1)"
+       line 400, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1224, "(1)"
+       line 246, "pan.___", state 1244, "(1)"
+       line 250, "pan.___", state 1252, "(1)"
+       line 400, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1335, "(1)"
+       line 246, "pan.___", state 1355, "(1)"
+       line 250, "pan.___", state 1363, "(1)"
+       line 400, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1451, "(1)"
+       line 246, "pan.___", state 1471, "(1)"
+       line 250, "pan.___", state 1479, "(1)"
+       line 400, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1562, "(1)"
+       line 246, "pan.___", state 1582, "(1)"
+       line 250, "pan.___", state 1590, "(1)"
+       line 400, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1676, "(1)"
+       line 246, "pan.___", state 1696, "(1)"
+       line 250, "pan.___", state 1704, "(1)"
+       line 726, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 400, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1794, "(1)"
+       line 246, "pan.___", state 1814, "(1)"
+       line 250, "pan.___", state 1822, "(1)"
+       line 400, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1905, "(1)"
+       line 246, "pan.___", state 1925, "(1)"
+       line 250, "pan.___", state 1933, "(1)"
+       line 400, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 400, "pan.___", state 1956, "(1)"
+       line 400, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 400, "pan.___", state 1957, "else"
+       line 400, "pan.___", state 1960, "(1)"
+       line 404, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 1970, "(1)"
+       line 404, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 404, "pan.___", state 1971, "else"
+       line 404, "pan.___", state 1974, "(1)"
+       line 404, "pan.___", state 1975, "(1)"
+       line 404, "pan.___", state 1975, "(1)"
+       line 402, "pan.___", state 1980, "((i<1))"
+       line 402, "pan.___", state 1980, "((i>=1))"
+       line 409, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 1988, "(1)"
+       line 409, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 409, "pan.___", state 1989, "else"
+       line 409, "pan.___", state 1992, "(1)"
+       line 409, "pan.___", state 1993, "(1)"
+       line 409, "pan.___", state 1993, "(1)"
+       line 413, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 2002, "(1)"
+       line 413, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 413, "pan.___", state 2003, "else"
+       line 413, "pan.___", state 2006, "(1)"
+       line 413, "pan.___", state 2007, "(1)"
+       line 413, "pan.___", state 2007, "(1)"
+       line 411, "pan.___", state 2012, "((i<2))"
+       line 411, "pan.___", state 2012, "((i>=2))"
+       line 238, "pan.___", state 2018, "(1)"
+       line 242, "pan.___", state 2026, "(1)"
+       line 242, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 242, "pan.___", state 2027, "else"
+       line 240, "pan.___", state 2032, "((i<1))"
+       line 240, "pan.___", state 2032, "((i>=1))"
+       line 246, "pan.___", state 2038, "(1)"
+       line 246, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 246, "pan.___", state 2039, "else"
+       line 250, "pan.___", state 2046, "(1)"
+       line 250, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 250, "pan.___", state 2047, "else"
+       line 248, "pan.___", state 2052, "((i<2))"
+       line 248, "pan.___", state 2052, "((i>=2))"
+       line 255, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 255, "pan.___", state 2056, "else"
+       line 420, "pan.___", state 2058, "(1)"
+       line 420, "pan.___", state 2058, "(1)"
+       line 726, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 726, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 726, "pan.___", state 2063, "(1)"
+       line 400, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 2134, "(1)"
+       line 246, "pan.___", state 2154, "(1)"
+       line 250, "pan.___", state 2162, "(1)"
+       line 400, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 2251, "(1)"
+       line 246, "pan.___", state 2271, "(1)"
+       line 250, "pan.___", state 2279, "(1)"
+       line 400, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 2362, "(1)"
+       line 246, "pan.___", state 2382, "(1)"
+       line 250, "pan.___", state 2390, "(1)"
+       line 238, "pan.___", state 2421, "(1)"
+       line 246, "pan.___", state 2441, "(1)"
+       line 250, "pan.___", state 2449, "(1)"
+       line 238, "pan.___", state 2464, "(1)"
+       line 246, "pan.___", state 2484, "(1)"
+       line 250, "pan.___", state 2492, "(1)"
+       line 886, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 400, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 82, "(1)"
+       line 242, "pan.___", state 90, "(1)"
+       line 246, "pan.___", state 102, "(1)"
+       line 261, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 400, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 404, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 257, "(1)"
+       line 242, "pan.___", state 265, "(1)"
+       line 246, "pan.___", state 277, "(1)"
+       line 250, "pan.___", state 285, "(1)"
+       line 404, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, "pan.___", state 378, "(1)"
+       line 246, "pan.___", state 390, "(1)"
+       line 250, "pan.___", state 398, "(1)"
+       line 404, "pan.___", state 440, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 458, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 472, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, "pan.___", state 498, "(1)"
+       line 246, "pan.___", state 510, "(1)"
+       line 250, "pan.___", state 518, "(1)"
+       line 404, "pan.___", state 551, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 569, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 583, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, "pan.___", state 609, "(1)"
+       line 246, "pan.___", state 621, "(1)"
+       line 250, "pan.___", state 629, "(1)"
+       line 404, "pan.___", state 664, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 409, "pan.___", state 682, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 413, "pan.___", state 696, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 242, "pan.___", state 722, "(1)"
+       line 246, "pan.___", state 734, "(1)"
+       line 250, "pan.___", state 742, "(1)"
+       line 261, "pan.___", state 795, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 804, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 819, "(1)"
+       line 273, "pan.___", state 826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 842, "(1)"
+       line 242, "pan.___", state 850, "(1)"
+       line 246, "pan.___", state 862, "(1)"
+       line 250, "pan.___", state 870, "(1)"
+       line 261, "pan.___", state 901, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 910, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 923, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 948, "(1)"
+       line 242, "pan.___", state 956, "(1)"
+       line 246, "pan.___", state 968, "(1)"
+       line 250, "pan.___", state 976, "(1)"
+       line 265, "pan.___", state 1002, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1015, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1024, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1040, "(1)"
+       line 242, "pan.___", state 1048, "(1)"
+       line 246, "pan.___", state 1060, "(1)"
+       line 250, "pan.___", state 1068, "(1)"
+       line 261, "pan.___", state 1099, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 1108, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1121, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1130, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1146, "(1)"
+       line 242, "pan.___", state 1154, "(1)"
+       line 246, "pan.___", state 1166, "(1)"
+       line 250, "pan.___", state 1174, "(1)"
+       line 265, "pan.___", state 1200, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1213, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1222, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1238, "(1)"
+       line 242, "pan.___", state 1246, "(1)"
+       line 246, "pan.___", state 1258, "(1)"
+       line 250, "pan.___", state 1266, "(1)"
+       line 261, "pan.___", state 1297, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 1306, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1319, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1344, "(1)"
+       line 242, "pan.___", state 1352, "(1)"
+       line 246, "pan.___", state 1364, "(1)"
+       line 250, "pan.___", state 1372, "(1)"
+       line 265, "pan.___", state 1398, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1411, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1420, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1436, "(1)"
+       line 242, "pan.___", state 1444, "(1)"
+       line 246, "pan.___", state 1456, "(1)"
+       line 250, "pan.___", state 1464, "(1)"
+       line 261, "pan.___", state 1495, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 265, "pan.___", state 1504, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 269, "pan.___", state 1517, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 273, "pan.___", state 1526, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 238, "pan.___", state 1542, "(1)"
+       line 242, "pan.___", state 1550, "(1)"
+       line 246, "pan.___", state 1562, "(1)"
+       line 250, "pan.___", state 1570, "(1)"
+       line 1213, "pan.___", state 1586, "-end-"
+       (103 of 1586 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1276, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 353 seconds
+pan: rate 34395.332 states/second
+pan: avg transition delay 2.749e-06 usec
+cp .input.spin urcu_free.spin.input
+cp .input.spin.trail urcu_free.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.ltl b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.ltl
new file mode 100644 (file)
index 0000000..6be1be9
--- /dev/null
@@ -0,0 +1 @@
+[] (!read_poison)
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free.spin.input
new file mode 100644 (file)
index 0000000..8afab6a
--- /dev/null
@@ -0,0 +1,1249 @@
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_nested.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_nested.define
new file mode 100644 (file)
index 0000000..0fb59bd
--- /dev/null
@@ -0,0 +1 @@
+#define READER_NEST_LEVEL 2
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.define
new file mode 100644 (file)
index 0000000..d99d793
--- /dev/null
@@ -0,0 +1 @@
+#define NO_MB
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.log
new file mode 100644 (file)
index 0000000..15f7bc2
--- /dev/null
@@ -0,0 +1,624 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_mb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+Depth=    5327 States=    1e+06 Transitions= 7.34e+06 Memory=   550.432        t=   18.2 R=   6e+04
+pan: claim violated! (at depth 1301)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 5456, errors: 1
+  1674433 states, stored
+ 12624035 states, matched
+ 14298468 transitions (= stored+matched)
+2.0423758e+08 atomic steps
+hash conflicts:   6074438 (resolved)
+
+Stats on memory usage (in Megabytes):
+  185.236      equivalent memory usage for states (stored*(State-vector + overhead))
+  141.360      actual memory usage for states (compression: 76.31%)
+               state-vector as stored = 61 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  606.975      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 34, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 56, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 65, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 81, "(1)"
+       line 243, "pan.___", state 89, "(1)"
+       line 247, "pan.___", state 101, "(1)"
+       line 251, "pan.___", state 109, "(1)"
+       line 401, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 167, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 181, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 199, "(1)"
+       line 247, "pan.___", state 219, "(1)"
+       line 251, "pan.___", state 227, "(1)"
+       line 689, "pan.___", state 246, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 253, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 285, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 299, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 317, "(1)"
+       line 247, "pan.___", state 337, "(1)"
+       line 251, "pan.___", state 345, "(1)"
+       line 401, "pan.___", state 364, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 396, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 410, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 428, "(1)"
+       line 247, "pan.___", state 448, "(1)"
+       line 251, "pan.___", state 456, "(1)"
+       line 401, "pan.___", state 477, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 479, "(1)"
+       line 401, "pan.___", state 480, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 480, "else"
+       line 401, "pan.___", state 483, "(1)"
+       line 405, "pan.___", state 491, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 494, "else"
+       line 405, "pan.___", state 497, "(1)"
+       line 405, "pan.___", state 498, "(1)"
+       line 405, "pan.___", state 498, "(1)"
+       line 403, "pan.___", state 503, "((i<1))"
+       line 403, "pan.___", state 503, "((i>=1))"
+       line 410, "pan.___", state 509, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 512, "else"
+       line 410, "pan.___", state 515, "(1)"
+       line 410, "pan.___", state 516, "(1)"
+       line 410, "pan.___", state 516, "(1)"
+       line 414, "pan.___", state 523, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 526, "else"
+       line 414, "pan.___", state 529, "(1)"
+       line 414, "pan.___", state 530, "(1)"
+       line 414, "pan.___", state 530, "(1)"
+       line 412, "pan.___", state 535, "((i<2))"
+       line 412, "pan.___", state 535, "((i>=2))"
+       line 239, "pan.___", state 541, "(1)"
+       line 243, "pan.___", state 549, "(1)"
+       line 243, "pan.___", state 550, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 550, "else"
+       line 241, "pan.___", state 555, "((i<1))"
+       line 241, "pan.___", state 555, "((i>=1))"
+       line 247, "pan.___", state 561, "(1)"
+       line 247, "pan.___", state 562, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 562, "else"
+       line 251, "pan.___", state 569, "(1)"
+       line 251, "pan.___", state 570, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 570, "else"
+       line 249, "pan.___", state 575, "((i<2))"
+       line 249, "pan.___", state 575, "((i>=2))"
+       line 256, "pan.___", state 579, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 579, "else"
+       line 421, "pan.___", state 581, "(1)"
+       line 421, "pan.___", state 581, "(1)"
+       line 689, "pan.___", state 584, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 689, "pan.___", state 585, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 689, "pan.___", state 586, "(1)"
+       line 401, "pan.___", state 593, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 625, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 639, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 657, "(1)"
+       line 247, "pan.___", state 677, "(1)"
+       line 251, "pan.___", state 685, "(1)"
+       line 401, "pan.___", state 711, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 713, "(1)"
+       line 401, "pan.___", state 714, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 714, "else"
+       line 401, "pan.___", state 717, "(1)"
+       line 405, "pan.___", state 725, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 728, "else"
+       line 405, "pan.___", state 731, "(1)"
+       line 405, "pan.___", state 732, "(1)"
+       line 405, "pan.___", state 732, "(1)"
+       line 403, "pan.___", state 737, "((i<1))"
+       line 403, "pan.___", state 737, "((i>=1))"
+       line 410, "pan.___", state 743, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 746, "else"
+       line 410, "pan.___", state 749, "(1)"
+       line 410, "pan.___", state 750, "(1)"
+       line 410, "pan.___", state 750, "(1)"
+       line 414, "pan.___", state 757, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 760, "else"
+       line 414, "pan.___", state 763, "(1)"
+       line 414, "pan.___", state 764, "(1)"
+       line 414, "pan.___", state 764, "(1)"
+       line 412, "pan.___", state 769, "((i<2))"
+       line 412, "pan.___", state 769, "((i>=2))"
+       line 239, "pan.___", state 775, "(1)"
+       line 243, "pan.___", state 783, "(1)"
+       line 243, "pan.___", state 784, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 784, "else"
+       line 241, "pan.___", state 789, "((i<1))"
+       line 241, "pan.___", state 789, "((i>=1))"
+       line 247, "pan.___", state 795, "(1)"
+       line 247, "pan.___", state 796, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 796, "else"
+       line 251, "pan.___", state 803, "(1)"
+       line 251, "pan.___", state 804, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 804, "else"
+       line 256, "pan.___", state 813, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 813, "else"
+       line 421, "pan.___", state 815, "(1)"
+       line 421, "pan.___", state 815, "(1)"
+       line 401, "pan.___", state 822, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 824, "(1)"
+       line 401, "pan.___", state 825, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 825, "else"
+       line 401, "pan.___", state 828, "(1)"
+       line 405, "pan.___", state 836, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 839, "else"
+       line 405, "pan.___", state 842, "(1)"
+       line 405, "pan.___", state 843, "(1)"
+       line 405, "pan.___", state 843, "(1)"
+       line 403, "pan.___", state 848, "((i<1))"
+       line 403, "pan.___", state 848, "((i>=1))"
+       line 410, "pan.___", state 854, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 857, "else"
+       line 410, "pan.___", state 860, "(1)"
+       line 410, "pan.___", state 861, "(1)"
+       line 410, "pan.___", state 861, "(1)"
+       line 414, "pan.___", state 868, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 871, "else"
+       line 414, "pan.___", state 874, "(1)"
+       line 414, "pan.___", state 875, "(1)"
+       line 414, "pan.___", state 875, "(1)"
+       line 412, "pan.___", state 880, "((i<2))"
+       line 412, "pan.___", state 880, "((i>=2))"
+       line 239, "pan.___", state 886, "(1)"
+       line 243, "pan.___", state 894, "(1)"
+       line 243, "pan.___", state 895, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 895, "else"
+       line 241, "pan.___", state 900, "((i<1))"
+       line 241, "pan.___", state 900, "((i>=1))"
+       line 247, "pan.___", state 906, "(1)"
+       line 247, "pan.___", state 907, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 907, "else"
+       line 251, "pan.___", state 914, "(1)"
+       line 251, "pan.___", state 915, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 915, "else"
+       line 249, "pan.___", state 920, "((i<2))"
+       line 249, "pan.___", state 920, "((i>=2))"
+       line 256, "pan.___", state 924, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 924, "else"
+       line 421, "pan.___", state 926, "(1)"
+       line 421, "pan.___", state 926, "(1)"
+       line 697, "pan.___", state 930, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 935, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 967, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 981, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 999, "(1)"
+       line 247, "pan.___", state 1019, "(1)"
+       line 251, "pan.___", state 1027, "(1)"
+       line 401, "pan.___", state 1049, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1081, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1095, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1113, "(1)"
+       line 247, "pan.___", state 1133, "(1)"
+       line 251, "pan.___", state 1141, "(1)"
+       line 401, "pan.___", state 1164, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1196, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1210, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1228, "(1)"
+       line 247, "pan.___", state 1248, "(1)"
+       line 251, "pan.___", state 1256, "(1)"
+       line 401, "pan.___", state 1275, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1307, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1321, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1339, "(1)"
+       line 247, "pan.___", state 1359, "(1)"
+       line 251, "pan.___", state 1367, "(1)"
+       line 401, "pan.___", state 1391, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1423, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1437, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1455, "(1)"
+       line 247, "pan.___", state 1475, "(1)"
+       line 251, "pan.___", state 1483, "(1)"
+       line 401, "pan.___", state 1502, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1534, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1548, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1566, "(1)"
+       line 247, "pan.___", state 1586, "(1)"
+       line 251, "pan.___", state 1594, "(1)"
+       line 401, "pan.___", state 1616, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1648, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1662, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1680, "(1)"
+       line 247, "pan.___", state 1700, "(1)"
+       line 251, "pan.___", state 1708, "(1)"
+       line 736, "pan.___", state 1727, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1734, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1766, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1780, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1798, "(1)"
+       line 247, "pan.___", state 1818, "(1)"
+       line 251, "pan.___", state 1826, "(1)"
+       line 401, "pan.___", state 1845, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1877, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1891, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1909, "(1)"
+       line 247, "pan.___", state 1929, "(1)"
+       line 251, "pan.___", state 1937, "(1)"
+       line 401, "pan.___", state 1958, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1960, "(1)"
+       line 401, "pan.___", state 1961, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1961, "else"
+       line 401, "pan.___", state 1964, "(1)"
+       line 405, "pan.___", state 1972, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1975, "else"
+       line 405, "pan.___", state 1978, "(1)"
+       line 405, "pan.___", state 1979, "(1)"
+       line 405, "pan.___", state 1979, "(1)"
+       line 403, "pan.___", state 1984, "((i<1))"
+       line 403, "pan.___", state 1984, "((i>=1))"
+       line 410, "pan.___", state 1990, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1993, "else"
+       line 410, "pan.___", state 1996, "(1)"
+       line 410, "pan.___", state 1997, "(1)"
+       line 410, "pan.___", state 1997, "(1)"
+       line 414, "pan.___", state 2004, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2007, "else"
+       line 414, "pan.___", state 2010, "(1)"
+       line 414, "pan.___", state 2011, "(1)"
+       line 414, "pan.___", state 2011, "(1)"
+       line 412, "pan.___", state 2016, "((i<2))"
+       line 412, "pan.___", state 2016, "((i>=2))"
+       line 239, "pan.___", state 2022, "(1)"
+       line 243, "pan.___", state 2030, "(1)"
+       line 243, "pan.___", state 2031, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2031, "else"
+       line 241, "pan.___", state 2036, "((i<1))"
+       line 241, "pan.___", state 2036, "((i>=1))"
+       line 247, "pan.___", state 2042, "(1)"
+       line 247, "pan.___", state 2043, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2043, "else"
+       line 251, "pan.___", state 2050, "(1)"
+       line 251, "pan.___", state 2051, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2051, "else"
+       line 249, "pan.___", state 2056, "((i<2))"
+       line 249, "pan.___", state 2056, "((i>=2))"
+       line 256, "pan.___", state 2060, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2060, "else"
+       line 421, "pan.___", state 2062, "(1)"
+       line 421, "pan.___", state 2062, "(1)"
+       line 736, "pan.___", state 2065, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 736, "pan.___", state 2066, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 736, "pan.___", state 2067, "(1)"
+       line 401, "pan.___", state 2074, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2106, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2120, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2138, "(1)"
+       line 247, "pan.___", state 2158, "(1)"
+       line 251, "pan.___", state 2166, "(1)"
+       line 401, "pan.___", state 2191, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2223, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2237, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2255, "(1)"
+       line 247, "pan.___", state 2275, "(1)"
+       line 251, "pan.___", state 2283, "(1)"
+       line 401, "pan.___", state 2302, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2334, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2348, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2366, "(1)"
+       line 247, "pan.___", state 2386, "(1)"
+       line 251, "pan.___", state 2394, "(1)"
+       line 239, "pan.___", state 2425, "(1)"
+       line 247, "pan.___", state 2445, "(1)"
+       line 251, "pan.___", state 2453, "(1)"
+       line 239, "pan.___", state 2468, "(1)"
+       line 247, "pan.___", state 2488, "(1)"
+       line 251, "pan.___", state 2496, "(1)"
+       line 887, "pan.___", state 2513, "-end-"
+       (246 of 2513 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 20, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 26, "(1)"
+       line 405, "pan.___", state 34, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 40, "(1)"
+       line 405, "pan.___", state 41, "(1)"
+       line 405, "pan.___", state 41, "(1)"
+       line 403, "pan.___", state 46, "((i<1))"
+       line 403, "pan.___", state 46, "((i>=1))"
+       line 410, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 58, "(1)"
+       line 410, "pan.___", state 59, "(1)"
+       line 410, "pan.___", state 59, "(1)"
+       line 414, "pan.___", state 72, "(1)"
+       line 414, "pan.___", state 73, "(1)"
+       line 414, "pan.___", state 73, "(1)"
+       line 412, "pan.___", state 78, "((i<2))"
+       line 412, "pan.___", state 78, "((i>=2))"
+       line 239, "pan.___", state 84, "(1)"
+       line 243, "pan.___", state 92, "(1)"
+       line 243, "pan.___", state 93, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 93, "else"
+       line 241, "pan.___", state 98, "((i<1))"
+       line 241, "pan.___", state 98, "((i>=1))"
+       line 247, "pan.___", state 104, "(1)"
+       line 247, "pan.___", state 105, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 105, "else"
+       line 251, "pan.___", state 112, "(1)"
+       line 251, "pan.___", state 113, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 113, "else"
+       line 249, "pan.___", state 118, "((i<2))"
+       line 249, "pan.___", state 118, "((i>=2))"
+       line 256, "pan.___", state 122, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 122, "else"
+       line 421, "pan.___", state 124, "(1)"
+       line 421, "pan.___", state 124, "(1)"
+       line 262, "pan.___", state 133, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 142, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 264, "pan.___", state 150, "((i<1))"
+       line 264, "pan.___", state 150, "((i>=1))"
+       line 270, "pan.___", state 155, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 1010, "pan.___", state 183, "old_data = cached_rcu_ptr.val[_pid]"
+       line 1021, "pan.___", state 187, "_proc_urcu_writer = (_proc_urcu_writer|(1<<4))"
+       line 401, "pan.___", state 195, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 201, "(1)"
+       line 405, "pan.___", state 209, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 215, "(1)"
+       line 405, "pan.___", state 216, "(1)"
+       line 405, "pan.___", state 216, "(1)"
+       line 410, "pan.___", state 229, "(1)"
+       line 414, "pan.___", state 241, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 259, "(1)"
+       line 243, "pan.___", state 267, "(1)"
+       line 251, "pan.___", state 287, "(1)"
+       line 421, "pan.___", state 299, "(1)"
+       line 421, "pan.___", state 299, "(1)"
+       line 405, "pan.___", state 322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 354, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 380, "(1)"
+       line 251, "pan.___", state 400, "(1)"
+       line 405, "pan.___", state 442, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 500, "(1)"
+       line 405, "pan.___", state 553, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 611, "(1)"
+       line 405, "pan.___", state 666, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 698, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 724, "(1)"
+       line 251, "pan.___", state 744, "(1)"
+       line 1145, "pan.___", state 769, "_proc_urcu_writer = (_proc_urcu_writer|(1<<13))"
+       line 262, "pan.___", state 797, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 799, "(1)"
+       line 266, "pan.___", state 806, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 808, "(1)"
+       line 266, "pan.___", state 809, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 809, "else"
+       line 264, "pan.___", state 814, "((i<1))"
+       line 264, "pan.___", state 814, "((i>=1))"
+       line 270, "pan.___", state 819, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 821, "(1)"
+       line 270, "pan.___", state 822, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 822, "else"
+       line 274, "pan.___", state 828, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 830, "(1)"
+       line 274, "pan.___", state 831, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 831, "else"
+       line 272, "pan.___", state 836, "((i<2))"
+       line 272, "pan.___", state 836, "((i>=2))"
+       line 239, "pan.___", state 844, "(1)"
+       line 243, "pan.___", state 852, "(1)"
+       line 243, "pan.___", state 853, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 853, "else"
+       line 241, "pan.___", state 858, "((i<1))"
+       line 241, "pan.___", state 858, "((i>=1))"
+       line 247, "pan.___", state 864, "(1)"
+       line 247, "pan.___", state 865, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 865, "else"
+       line 251, "pan.___", state 872, "(1)"
+       line 251, "pan.___", state 873, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 873, "else"
+       line 256, "pan.___", state 882, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 882, "else"
+       line 1199, "pan.___", state 898, "((i<1))"
+       line 1199, "pan.___", state 898, "((i>=1))"
+       line 262, "pan.___", state 903, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 905, "(1)"
+       line 266, "pan.___", state 912, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 914, "(1)"
+       line 266, "pan.___", state 915, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 915, "else"
+       line 264, "pan.___", state 920, "((i<1))"
+       line 264, "pan.___", state 920, "((i>=1))"
+       line 270, "pan.___", state 925, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 927, "(1)"
+       line 270, "pan.___", state 928, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 928, "else"
+       line 274, "pan.___", state 934, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 936, "(1)"
+       line 274, "pan.___", state 937, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 937, "else"
+       line 272, "pan.___", state 942, "((i<2))"
+       line 272, "pan.___", state 942, "((i>=2))"
+       line 239, "pan.___", state 950, "(1)"
+       line 243, "pan.___", state 958, "(1)"
+       line 243, "pan.___", state 959, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 959, "else"
+       line 241, "pan.___", state 964, "((i<1))"
+       line 241, "pan.___", state 964, "((i>=1))"
+       line 247, "pan.___", state 970, "(1)"
+       line 247, "pan.___", state 971, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 971, "else"
+       line 251, "pan.___", state 978, "(1)"
+       line 251, "pan.___", state 979, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 979, "else"
+       line 256, "pan.___", state 988, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 988, "else"
+       line 289, "pan.___", state 990, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 990, "else"
+       line 1199, "pan.___", state 991, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1199, "pan.___", state 991, "else"
+       line 266, "pan.___", state 1004, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1017, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1026, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1042, "(1)"
+       line 243, "pan.___", state 1050, "(1)"
+       line 247, "pan.___", state 1062, "(1)"
+       line 251, "pan.___", state 1070, "(1)"
+       line 262, "pan.___", state 1101, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1110, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1123, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1132, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1148, "(1)"
+       line 243, "pan.___", state 1156, "(1)"
+       line 247, "pan.___", state 1168, "(1)"
+       line 251, "pan.___", state 1176, "(1)"
+       line 266, "pan.___", state 1202, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1224, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1240, "(1)"
+       line 243, "pan.___", state 1248, "(1)"
+       line 247, "pan.___", state 1260, "(1)"
+       line 251, "pan.___", state 1268, "(1)"
+       line 262, "pan.___", state 1299, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1308, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1321, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1330, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1346, "(1)"
+       line 243, "pan.___", state 1354, "(1)"
+       line 247, "pan.___", state 1366, "(1)"
+       line 251, "pan.___", state 1374, "(1)"
+       line 262, "pan.___", state 1391, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 1393, "(1)"
+       line 266, "pan.___", state 1400, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1402, "(1)"
+       line 266, "pan.___", state 1403, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 1403, "else"
+       line 264, "pan.___", state 1408, "((i<1))"
+       line 264, "pan.___", state 1408, "((i>=1))"
+       line 270, "pan.___", state 1413, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1415, "(1)"
+       line 270, "pan.___", state 1416, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 1416, "else"
+       line 274, "pan.___", state 1422, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1424, "(1)"
+       line 274, "pan.___", state 1425, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 1425, "else"
+       line 272, "pan.___", state 1430, "((i<2))"
+       line 272, "pan.___", state 1430, "((i>=2))"
+       line 239, "pan.___", state 1438, "(1)"
+       line 243, "pan.___", state 1446, "(1)"
+       line 243, "pan.___", state 1447, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 1447, "else"
+       line 241, "pan.___", state 1452, "((i<1))"
+       line 241, "pan.___", state 1452, "((i>=1))"
+       line 247, "pan.___", state 1458, "(1)"
+       line 247, "pan.___", state 1459, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 1459, "else"
+       line 251, "pan.___", state 1466, "(1)"
+       line 251, "pan.___", state 1467, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 1467, "else"
+       line 256, "pan.___", state 1476, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 1476, "else"
+       line 1210, "pan.___", state 1479, "i = 0"
+       line 1210, "pan.___", state 1481, "reader_barrier = 1"
+       line 1210, "pan.___", state 1492, "((i<1))"
+       line 1210, "pan.___", state 1492, "((i>=1))"
+       line 262, "pan.___", state 1497, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 1499, "(1)"
+       line 266, "pan.___", state 1506, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1508, "(1)"
+       line 266, "pan.___", state 1509, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 1509, "else"
+       line 264, "pan.___", state 1514, "((i<1))"
+       line 264, "pan.___", state 1514, "((i>=1))"
+       line 270, "pan.___", state 1519, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1521, "(1)"
+       line 270, "pan.___", state 1522, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 1522, "else"
+       line 274, "pan.___", state 1528, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1530, "(1)"
+       line 274, "pan.___", state 1531, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 1531, "else"
+       line 272, "pan.___", state 1536, "((i<2))"
+       line 272, "pan.___", state 1536, "((i>=2))"
+       line 239, "pan.___", state 1544, "(1)"
+       line 243, "pan.___", state 1552, "(1)"
+       line 243, "pan.___", state 1553, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 1553, "else"
+       line 241, "pan.___", state 1558, "((i<1))"
+       line 241, "pan.___", state 1558, "((i>=1))"
+       line 247, "pan.___", state 1564, "(1)"
+       line 247, "pan.___", state 1565, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 1565, "else"
+       line 251, "pan.___", state 1572, "(1)"
+       line 251, "pan.___", state 1573, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 1573, "else"
+       line 256, "pan.___", state 1582, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 1582, "else"
+       line 289, "pan.___", state 1584, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 1584, "else"
+       line 1210, "pan.___", state 1585, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1210, "pan.___", state 1585, "else"
+       line 1214, "pan.___", state 1588, "-end-"
+       (179 of 1588 states)
+unreached in proctype :init:
+       line 1225, "pan.___", state 9, "((j<2))"
+       line 1225, "pan.___", state 9, "((j>=2))"
+       line 1226, "pan.___", state 20, "((j<2))"
+       line 1226, "pan.___", state 20, "((j>=2))"
+       line 1231, "pan.___", state 33, "((j<2))"
+       line 1231, "pan.___", state 33, "((j>=2))"
+       line 1229, "pan.___", state 43, "((i<1))"
+       line 1229, "pan.___", state 43, "((i>=1))"
+       line 1239, "pan.___", state 54, "((j<2))"
+       line 1239, "pan.___", state 54, "((j>=2))"
+       line 1243, "pan.___", state 67, "((j<2))"
+       line 1243, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1277, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 36.6 seconds
+pan: rate 45762.039 states/second
+pan: avg transition delay 2.559e-06 usec
+cp .input.spin urcu_free_no_mb.spin.input
+cp .input.spin.trail urcu_free_no_mb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input
new file mode 100644 (file)
index 0000000..739f3ef
--- /dev/null
@@ -0,0 +1,1250 @@
+#define NO_MB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input.trail b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_mb.spin.input.trail
new file mode 100644 (file)
index 0000000..bf64078
--- /dev/null
@@ -0,0 +1,1304 @@
+-2:3:-2
+-4:-4:-4
+1:0:4181
+2:3:4101
+3:3:4104
+4:3:4104
+5:3:4107
+6:3:4115
+7:3:4115
+8:3:4118
+9:3:4124
+10:3:4128
+11:3:4128
+12:3:4131
+13:3:4141
+14:3:4149
+15:3:4149
+16:3:4152
+17:3:4158
+18:3:4162
+19:3:4162
+20:3:4165
+21:3:4171
+22:3:4175
+23:3:4176
+24:0:4181
+25:3:4178
+26:0:4181
+27:2:2515
+28:0:4181
+29:2:2521
+30:0:4181
+31:2:2522
+32:0:4181
+33:2:2524
+34:0:4181
+35:2:2525
+36:0:4181
+37:2:2526
+38:0:4181
+39:2:2527
+40:0:4181
+41:2:2528
+42:2:2529
+43:2:2533
+44:2:2534
+45:2:2542
+46:2:2543
+47:2:2547
+48:2:2548
+49:2:2556
+50:2:2561
+51:2:2565
+52:2:2566
+53:2:2574
+54:2:2575
+55:2:2579
+56:2:2580
+57:2:2574
+58:2:2575
+59:2:2579
+60:2:2580
+61:2:2588
+62:2:2593
+63:2:2600
+64:2:2601
+65:2:2608
+66:2:2613
+67:2:2620
+68:2:2621
+69:2:2620
+70:2:2621
+71:2:2628
+72:2:2638
+73:0:4181
+74:2:2527
+75:0:4181
+76:2:2642
+77:2:2646
+78:2:2647
+79:2:2651
+80:2:2655
+81:2:2656
+82:2:2660
+83:2:2668
+84:2:2669
+85:2:2673
+86:2:2677
+87:2:2678
+88:2:2673
+89:2:2674
+90:2:2682
+91:0:4181
+92:2:2527
+93:0:4181
+94:2:2690
+95:2:2691
+96:2:2692
+97:0:4181
+98:2:2527
+99:0:4181
+100:2:2700
+101:0:4181
+102:2:2527
+103:0:4181
+104:2:2703
+105:2:2704
+106:2:2708
+107:2:2709
+108:2:2717
+109:2:2718
+110:2:2722
+111:2:2723
+112:2:2731
+113:2:2736
+114:2:2737
+115:2:2749
+116:2:2750
+117:2:2754
+118:2:2755
+119:2:2749
+120:2:2750
+121:2:2754
+122:2:2755
+123:2:2763
+124:2:2768
+125:2:2775
+126:2:2776
+127:2:2783
+128:2:2788
+129:2:2795
+130:2:2796
+131:2:2795
+132:2:2796
+133:2:2803
+134:2:2812
+135:0:4181
+136:2:2527
+137:0:4181
+138:2:2816
+139:2:2817
+140:2:2818
+141:2:2830
+142:2:2831
+143:2:2835
+144:2:2836
+145:2:2844
+146:2:2849
+147:2:2853
+148:2:2854
+149:2:2862
+150:2:2863
+151:2:2867
+152:2:2868
+153:2:2862
+154:2:2863
+155:2:2867
+156:2:2868
+157:2:2876
+158:2:2881
+159:2:2888
+160:2:2889
+161:2:2896
+162:2:2901
+163:2:2908
+164:2:2909
+165:2:2908
+166:2:2909
+167:2:2916
+168:2:2929
+169:2:2930
+170:0:4181
+171:2:2527
+172:0:4181
+173:2:2936
+174:2:2937
+175:2:2941
+176:2:2942
+177:2:2950
+178:2:2951
+179:2:2955
+180:2:2956
+181:2:2964
+182:2:2969
+183:2:2973
+184:2:2974
+185:2:2982
+186:2:2983
+187:2:2987
+188:2:2988
+189:2:2982
+190:2:2983
+191:2:2987
+192:2:2988
+193:2:2996
+194:2:3001
+195:2:3008
+196:2:3009
+197:2:3016
+198:2:3021
+199:2:3028
+200:2:3029
+201:2:3028
+202:2:3029
+203:2:3036
+204:0:4181
+205:2:2527
+206:0:4181
+207:2:3047
+208:2:3048
+209:2:3052
+210:2:3053
+211:2:3061
+212:2:3062
+213:2:3066
+214:2:3067
+215:2:3075
+216:2:3080
+217:2:3084
+218:2:3085
+219:2:3093
+220:2:3094
+221:2:3098
+222:2:3099
+223:2:3093
+224:2:3094
+225:2:3098
+226:2:3099
+227:2:3107
+228:2:3112
+229:2:3119
+230:2:3120
+231:2:3127
+232:2:3132
+233:2:3139
+234:2:3140
+235:2:3139
+236:2:3140
+237:2:3147
+238:2:3156
+239:0:4181
+240:2:2527
+241:0:4181
+242:2:3160
+243:2:3161
+244:2:3162
+245:2:3174
+246:2:3175
+247:2:3179
+248:2:3180
+249:2:3188
+250:2:3193
+251:2:3197
+252:2:3198
+253:2:3206
+254:2:3207
+255:2:3211
+256:2:3212
+257:2:3206
+258:2:3207
+259:2:3211
+260:2:3212
+261:2:3220
+262:2:3225
+263:2:3232
+264:2:3233
+265:2:3240
+266:2:3245
+267:2:3252
+268:2:3253
+269:2:3252
+270:2:3253
+271:2:3260
+272:2:3272
+273:2:3273
+274:0:4181
+275:2:2527
+276:0:4181
+277:2:3282
+278:2:3283
+279:0:4181
+280:2:2527
+281:0:4181
+282:2:3287
+283:0:4181
+284:2:3295
+285:0:4181
+286:2:2522
+287:0:4181
+288:2:2524
+289:0:4181
+290:2:2525
+291:0:4181
+292:2:2526
+293:0:4181
+294:2:2527
+295:0:4181
+296:2:2528
+297:2:2529
+298:2:2533
+299:2:2534
+300:2:2542
+301:2:2543
+302:2:2547
+303:2:2548
+304:2:2556
+305:2:2561
+306:2:2565
+307:2:2566
+308:2:2574
+309:2:2575
+310:2:2576
+311:2:2574
+312:2:2575
+313:2:2579
+314:2:2580
+315:2:2588
+316:2:2593
+317:2:2600
+318:2:2601
+319:2:2608
+320:2:2613
+321:2:2620
+322:2:2621
+323:2:2620
+324:2:2621
+325:2:2628
+326:2:2638
+327:0:4181
+328:2:2527
+329:0:4181
+330:2:2642
+331:2:2646
+332:2:2647
+333:2:2651
+334:2:2655
+335:2:2656
+336:2:2660
+337:2:2668
+338:2:2669
+339:2:2673
+340:2:2674
+341:2:2673
+342:2:2677
+343:2:2678
+344:2:2682
+345:0:4181
+346:2:2527
+347:0:4181
+348:2:2690
+349:2:2691
+350:2:2692
+351:0:4181
+352:2:2527
+353:0:4181
+354:2:2700
+355:0:4181
+356:2:2527
+357:0:4181
+358:2:2703
+359:2:2704
+360:2:2708
+361:2:2709
+362:2:2717
+363:2:2718
+364:2:2722
+365:2:2723
+366:2:2731
+367:2:2736
+368:2:2737
+369:2:2749
+370:2:2750
+371:2:2754
+372:2:2755
+373:2:2749
+374:2:2750
+375:2:2754
+376:2:2755
+377:2:2763
+378:2:2768
+379:2:2775
+380:2:2776
+381:2:2783
+382:2:2788
+383:2:2795
+384:2:2796
+385:2:2795
+386:2:2796
+387:2:2803
+388:2:2812
+389:0:4181
+390:2:2527
+391:0:4181
+392:2:2816
+393:2:2817
+394:2:2818
+395:2:2830
+396:2:2831
+397:2:2835
+398:2:2836
+399:2:2844
+400:2:2849
+401:2:2853
+402:2:2854
+403:2:2862
+404:2:2863
+405:2:2867
+406:2:2868
+407:2:2862
+408:2:2863
+409:2:2867
+410:2:2868
+411:2:2876
+412:2:2881
+413:2:2888
+414:2:2889
+415:2:2896
+416:2:2901
+417:2:2908
+418:2:2909
+419:2:2908
+420:2:2909
+421:2:2916
+422:2:2929
+423:2:2930
+424:0:4181
+425:2:2527
+426:0:4181
+427:2:2936
+428:2:2937
+429:2:2941
+430:2:2942
+431:2:2950
+432:2:2951
+433:2:2955
+434:2:2956
+435:2:2964
+436:2:2969
+437:2:2973
+438:2:2974
+439:2:2982
+440:2:2983
+441:2:2987
+442:2:2988
+443:2:2982
+444:2:2983
+445:2:2987
+446:2:2988
+447:2:2996
+448:2:3001
+449:2:3008
+450:2:3009
+451:2:3016
+452:2:3021
+453:2:3028
+454:2:3029
+455:2:3028
+456:2:3029
+457:2:3036
+458:0:4181
+459:2:2527
+460:0:4181
+461:2:3047
+462:2:3048
+463:2:3052
+464:2:3053
+465:2:3061
+466:2:3062
+467:2:3066
+468:2:3067
+469:2:3075
+470:2:3080
+471:2:3084
+472:2:3085
+473:2:3093
+474:2:3094
+475:2:3098
+476:2:3099
+477:2:3093
+478:2:3094
+479:2:3098
+480:2:3099
+481:2:3107
+482:2:3112
+483:2:3119
+484:2:3120
+485:2:3127
+486:2:3132
+487:2:3139
+488:2:3140
+489:2:3139
+490:2:3140
+491:2:3147
+492:2:3156
+493:0:4181
+494:2:2527
+495:0:4181
+496:2:3160
+497:2:3161
+498:2:3162
+499:2:3174
+500:2:3175
+501:2:3179
+502:2:3180
+503:2:3188
+504:2:3193
+505:2:3197
+506:2:3198
+507:2:3206
+508:2:3207
+509:2:3211
+510:2:3212
+511:2:3206
+512:2:3207
+513:2:3211
+514:2:3212
+515:2:3220
+516:2:3225
+517:2:3232
+518:2:3233
+519:2:3240
+520:2:3245
+521:2:3252
+522:2:3253
+523:2:3252
+524:2:3253
+525:2:3260
+526:2:3272
+527:2:3273
+528:0:4181
+529:2:2527
+530:0:4181
+531:2:3282
+532:2:3283
+533:0:4181
+534:2:2527
+535:0:4181
+536:2:3287
+537:0:4181
+538:2:3295
+539:0:4181
+540:2:2522
+541:0:4181
+542:2:2524
+543:0:4181
+544:2:2525
+545:0:4181
+546:2:2526
+547:0:4181
+548:2:2527
+549:0:4181
+550:2:2528
+551:2:2529
+552:2:2533
+553:2:2534
+554:2:2542
+555:2:2543
+556:2:2547
+557:2:2548
+558:2:2556
+559:2:2561
+560:2:2565
+561:2:2566
+562:2:2574
+563:2:2575
+564:2:2579
+565:2:2580
+566:2:2574
+567:2:2575
+568:2:2576
+569:2:2588
+570:2:2593
+571:2:2600
+572:2:2601
+573:2:2608
+574:2:2613
+575:2:2620
+576:2:2621
+577:2:2620
+578:2:2621
+579:2:2628
+580:2:2638
+581:0:4181
+582:2:2527
+583:0:4181
+584:2:2642
+585:2:2646
+586:2:2647
+587:2:2651
+588:2:2655
+589:2:2656
+590:2:2660
+591:2:2668
+592:2:2669
+593:2:2673
+594:2:2677
+595:2:2678
+596:2:2673
+597:2:2674
+598:2:2682
+599:0:4181
+600:2:2527
+601:0:4181
+602:2:2690
+603:2:2691
+604:2:2692
+605:0:4181
+606:2:2527
+607:0:4181
+608:2:2700
+609:0:4181
+610:2:2527
+611:0:4181
+612:2:2703
+613:2:2704
+614:2:2708
+615:2:2709
+616:2:2717
+617:2:2718
+618:2:2722
+619:2:2723
+620:2:2731
+621:2:2744
+622:2:2745
+623:2:2749
+624:2:2750
+625:2:2754
+626:2:2755
+627:2:2749
+628:2:2750
+629:2:2754
+630:2:2755
+631:2:2763
+632:2:2768
+633:2:2775
+634:2:2776
+635:2:2783
+636:2:2790
+637:2:2791
+638:2:2795
+639:2:2796
+640:2:2795
+641:2:2796
+642:2:2803
+643:2:2812
+644:0:4181
+645:2:2527
+646:0:4181
+647:2:2816
+648:2:2817
+649:2:2818
+650:2:2830
+651:2:2831
+652:2:2835
+653:2:2836
+654:2:2844
+655:2:2857
+656:2:2858
+657:2:2862
+658:2:2863
+659:2:2867
+660:2:2868
+661:2:2862
+662:2:2863
+663:2:2867
+664:2:2868
+665:2:2876
+666:2:2881
+667:2:2888
+668:2:2889
+669:2:2896
+670:2:2903
+671:2:2904
+672:2:2908
+673:2:2909
+674:2:2908
+675:2:2909
+676:2:2916
+677:2:2929
+678:2:2930
+679:0:4181
+680:2:2527
+681:0:4181
+682:2:2936
+683:2:2937
+684:2:2941
+685:2:2942
+686:2:2950
+687:2:2951
+688:2:2955
+689:2:2956
+690:2:2964
+691:2:2977
+692:2:2978
+693:2:2982
+694:2:2983
+695:2:2987
+696:2:2988
+697:2:2982
+698:2:2983
+699:2:2987
+700:2:2988
+701:2:2996
+702:2:3001
+703:2:3008
+704:2:3009
+705:2:3016
+706:2:3023
+707:2:3024
+708:2:3028
+709:2:3029
+710:2:3028
+711:2:3029
+712:2:3036
+713:0:4181
+714:2:2527
+715:0:4181
+716:2:3160
+717:2:3161
+718:2:3165
+719:2:3166
+720:2:3174
+721:2:3175
+722:2:3179
+723:2:3180
+724:2:3188
+725:2:3201
+726:2:3202
+727:2:3206
+728:2:3207
+729:2:3211
+730:2:3212
+731:2:3206
+732:2:3207
+733:2:3211
+734:2:3212
+735:2:3220
+736:2:3225
+737:2:3232
+738:2:3233
+739:2:3240
+740:2:3247
+741:2:3248
+742:2:3252
+743:2:3253
+744:2:3252
+745:2:3253
+746:2:3260
+747:2:3272
+748:2:3273
+749:0:4181
+750:2:2527
+751:0:4181
+752:2:3282
+753:2:3283
+754:0:4181
+755:2:2527
+756:0:4181
+757:2:3047
+758:2:3048
+759:2:3052
+760:2:3053
+761:2:3061
+762:2:3062
+763:2:3066
+764:2:3067
+765:2:3075
+766:2:3088
+767:2:3089
+768:2:3093
+769:2:3094
+770:2:3095
+771:2:3093
+772:2:3094
+773:2:3098
+774:2:3099
+775:2:3107
+776:2:3112
+777:2:3119
+778:2:3120
+779:2:3127
+780:2:3134
+781:2:3135
+782:2:3139
+783:2:3140
+784:2:3139
+785:2:3140
+786:2:3147
+787:2:3156
+788:0:4181
+789:2:2527
+790:0:4181
+791:2:3287
+792:0:4181
+793:2:3295
+794:0:4181
+795:2:3296
+796:0:4181
+797:2:3301
+798:0:4181
+799:1:2
+800:0:4181
+801:2:3302
+802:0:4181
+803:1:8
+804:0:4181
+805:2:3301
+806:0:4181
+807:1:9
+808:0:4181
+809:2:3302
+810:0:4181
+811:1:10
+812:0:4181
+813:2:3301
+814:0:4181
+815:1:11
+816:0:4181
+817:2:3302
+818:0:4181
+819:1:12
+820:0:4181
+821:2:3301
+822:0:4181
+823:1:13
+824:0:4181
+825:2:3302
+826:0:4181
+827:1:14
+828:0:4181
+829:2:3301
+830:0:4181
+831:1:15
+832:0:4181
+833:2:3302
+834:0:4181
+835:1:16
+836:0:4181
+837:2:3301
+838:0:4181
+839:1:17
+840:0:4181
+841:2:3302
+842:0:4181
+843:1:18
+844:0:4181
+845:2:3301
+846:0:4181
+847:1:19
+848:0:4181
+849:2:3302
+850:0:4181
+851:1:20
+852:0:4181
+853:2:3301
+854:0:4181
+855:1:21
+856:0:4181
+857:2:3302
+858:0:4181
+859:1:122
+860:0:4181
+861:2:3301
+862:0:4181
+863:1:124
+864:0:4181
+865:2:3302
+866:0:4181
+867:1:23
+868:0:4181
+869:2:3301
+870:0:4181
+871:1:130
+872:1:131
+873:1:135
+874:1:136
+875:1:144
+876:1:145
+877:1:149
+878:1:150
+879:1:158
+880:1:163
+881:1:167
+882:1:168
+883:1:176
+884:1:177
+885:1:181
+886:1:182
+887:1:176
+888:1:177
+889:1:181
+890:1:182
+891:1:190
+892:1:195
+893:1:202
+894:1:203
+895:1:210
+896:1:215
+897:1:222
+898:1:223
+899:1:222
+900:1:223
+901:1:230
+902:0:4181
+903:2:3302
+904:0:4181
+905:1:19
+906:0:4181
+907:2:3301
+908:0:4181
+909:1:20
+910:0:4181
+911:2:3302
+912:0:4181
+913:1:21
+914:0:4181
+915:2:3301
+916:0:4181
+917:1:122
+918:0:4181
+919:2:3302
+920:0:4181
+921:1:124
+922:0:4181
+923:2:3301
+924:0:4181
+925:1:23
+926:0:4181
+927:2:3302
+928:0:4181
+929:1:241
+930:1:242
+931:0:4181
+932:2:3301
+933:0:4181
+934:1:19
+935:0:4181
+936:2:3302
+937:0:4181
+938:1:20
+939:0:4181
+940:2:3301
+941:0:4181
+942:1:21
+943:0:4181
+944:2:3302
+945:0:4181
+946:1:122
+947:0:4181
+948:2:3301
+949:0:4181
+950:1:124
+951:0:4181
+952:2:3302
+953:0:4181
+954:1:23
+955:0:4181
+956:2:3301
+957:0:4181
+958:1:248
+959:1:249
+960:1:253
+961:1:254
+962:1:262
+963:1:263
+964:1:267
+965:1:268
+966:1:276
+967:1:281
+968:1:285
+969:1:286
+970:1:294
+971:1:295
+972:1:299
+973:1:300
+974:1:294
+975:1:295
+976:1:299
+977:1:300
+978:1:308
+979:1:313
+980:1:320
+981:1:321
+982:1:328
+983:1:333
+984:1:340
+985:1:341
+986:1:340
+987:1:341
+988:1:348
+989:0:4181
+990:2:3302
+991:0:4181
+992:1:19
+993:0:4181
+994:2:3301
+995:0:4181
+996:1:20
+997:0:4181
+998:2:3302
+999:0:4181
+1000:1:21
+1001:0:4181
+1002:2:3301
+1003:0:4181
+1004:1:122
+1005:0:4181
+1006:2:3302
+1007:0:4181
+1008:1:124
+1009:0:4181
+1010:2:3301
+1011:0:4181
+1012:1:23
+1013:0:4181
+1014:2:3302
+1015:0:4181
+1016:1:359
+1017:1:360
+1018:1:364
+1019:1:365
+1020:1:373
+1021:1:374
+1022:1:378
+1023:1:379
+1024:1:387
+1025:1:392
+1026:1:396
+1027:1:397
+1028:1:405
+1029:1:406
+1030:1:410
+1031:1:411
+1032:1:405
+1033:1:406
+1034:1:410
+1035:1:411
+1036:1:419
+1037:1:424
+1038:1:431
+1039:1:432
+1040:1:439
+1041:1:444
+1042:1:451
+1043:1:452
+1044:1:451
+1045:1:452
+1046:1:459
+1047:1:468
+1048:0:4181
+1049:2:3301
+1050:0:4181
+1051:1:19
+1052:0:4181
+1053:2:3302
+1054:0:4181
+1055:1:20
+1056:0:4181
+1057:2:3301
+1058:0:4181
+1059:1:21
+1060:0:4181
+1061:2:3302
+1062:0:4181
+1063:1:122
+1064:0:4181
+1065:2:3301
+1066:0:4181
+1067:1:124
+1068:0:4181
+1069:2:3302
+1070:0:4181
+1071:1:23
+1072:0:4181
+1073:2:3301
+1074:0:4181
+1075:1:588
+1076:1:589
+1077:1:593
+1078:1:594
+1079:1:602
+1080:1:603
+1081:1:604
+1082:1:616
+1083:1:621
+1084:1:625
+1085:1:626
+1086:1:634
+1087:1:635
+1088:1:639
+1089:1:640
+1090:1:634
+1091:1:635
+1092:1:639
+1093:1:640
+1094:1:648
+1095:1:653
+1096:1:660
+1097:1:661
+1098:1:668
+1099:1:673
+1100:1:680
+1101:1:681
+1102:1:680
+1103:1:681
+1104:1:688
+1105:0:4181
+1106:2:3302
+1107:0:4181
+1108:1:19
+1109:0:4181
+1110:2:3301
+1111:0:4181
+1112:1:20
+1113:0:4181
+1114:2:3302
+1115:0:4181
+1116:1:21
+1117:0:4181
+1118:2:3301
+1119:0:4181
+1120:1:122
+1121:0:4181
+1122:2:3302
+1123:0:4181
+1124:1:124
+1125:0:4181
+1126:2:3301
+1127:0:4181
+1128:1:23
+1129:0:4181
+1130:2:3302
+1131:0:4181
+1132:1:699
+1133:1:702
+1134:1:703
+1135:0:4181
+1136:2:3301
+1137:0:4181
+1138:1:19
+1139:0:4181
+1140:2:3302
+1141:0:4181
+1142:1:20
+1143:0:4181
+1144:2:3301
+1145:0:4181
+1146:1:21
+1147:0:4181
+1148:2:3302
+1149:0:4181
+1150:1:122
+1151:0:4181
+1152:2:3301
+1153:0:4181
+1154:1:124
+1155:0:4181
+1156:2:3302
+1157:0:4181
+1158:1:23
+1159:0:4181
+1160:2:3301
+1161:0:4181
+1162:1:930
+1163:1:931
+1164:1:935
+1165:1:936
+1166:1:944
+1167:1:945
+1168:1:949
+1169:1:950
+1170:1:958
+1171:1:963
+1172:1:967
+1173:1:968
+1174:1:976
+1175:1:977
+1176:1:981
+1177:1:982
+1178:1:976
+1179:1:977
+1180:1:981
+1181:1:982
+1182:1:990
+1183:1:995
+1184:1:1002
+1185:1:1003
+1186:1:1010
+1187:1:1015
+1188:1:1022
+1189:1:1023
+1190:1:1022
+1191:1:1023
+1192:1:1030
+1193:1:1039
+1194:1:1043
+1195:0:4181
+1196:2:3302
+1197:0:4181
+1198:1:19
+1199:0:4181
+1200:2:3301
+1201:0:4181
+1202:1:20
+1203:0:4181
+1204:2:3302
+1205:0:4181
+1206:1:21
+1207:0:4181
+1208:2:3301
+1209:0:4181
+1210:1:122
+1211:0:4181
+1212:2:3302
+1213:0:4181
+1214:1:124
+1215:0:4181
+1216:2:3301
+1217:0:4181
+1218:1:23
+1219:0:4181
+1220:2:3302
+1221:0:4181
+1222:1:1044
+1223:1:1045
+1224:1:1049
+1225:1:1050
+1226:1:1058
+1227:1:1059
+1228:1:1060
+1229:1:1072
+1230:1:1077
+1231:1:1081
+1232:1:1082
+1233:1:1090
+1234:1:1091
+1235:1:1095
+1236:1:1096
+1237:1:1090
+1238:1:1091
+1239:1:1095
+1240:1:1096
+1241:1:1104
+1242:1:1109
+1243:1:1116
+1244:1:1117
+1245:1:1124
+1246:1:1129
+1247:1:1136
+1248:1:1137
+1249:1:1136
+1250:1:1137
+1251:1:1144
+1252:0:4181
+1253:2:3301
+1254:0:4181
+1255:1:19
+1256:0:4181
+1257:2:3302
+1258:0:4181
+1259:1:20
+1260:0:4181
+1261:2:3301
+1262:0:4181
+1263:1:21
+1264:0:4181
+1265:2:3302
+1266:0:4181
+1267:1:122
+1268:0:4181
+1269:2:3301
+1270:0:4181
+1271:1:124
+1272:0:4181
+1273:2:3302
+1274:0:4181
+1275:1:23
+1276:0:4181
+1277:2:3301
+1278:0:4181
+1279:1:1155
+1280:0:4181
+1281:2:3302
+1282:0:4181
+1283:1:2421
+1284:1:2428
+1285:1:2429
+1286:1:2436
+1287:1:2441
+1288:1:2448
+1289:1:2449
+1290:1:2448
+1291:1:2449
+1292:1:2456
+1293:1:2460
+1294:0:4181
+1295:2:3301
+1296:0:4181
+1297:1:1157
+1298:1:1158
+1299:0:4179
+1300:2:3302
+1301:0:4185
+1302:1:2110
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.define
new file mode 100644 (file)
index 0000000..73e61a4
--- /dev/null
@@ -0,0 +1 @@
+#define NO_RMB
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.log
new file mode 100644 (file)
index 0000000..5bfb772
--- /dev/null
@@ -0,0 +1,491 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_rmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+Depth=    6905 States=    1e+06 Transitions= 6.24e+06 Memory=   550.432        t=   16.2 R=   6e+04
+Depth=    6905 States=    2e+06 Transitions= 1.71e+07 Memory=   634.318        t=   47.8 R=   4e+04
+Depth=    6905 States=    3e+06 Transitions= 3.39e+07 Memory=   718.303        t=   99.2 R=   3e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6905 States=    4e+06 Transitions= 4.47e+07 Memory=   833.311        t=    131 R=   3e+04
+Depth=    6905 States=    5e+06 Transitions= 5.38e+07 Memory=   917.295        t=    157 R=   3e+04
+Depth=    6905 States=    6e+06 Transitions=  6.2e+07 Memory=  1001.279        t=    180 R=   3e+04
+Depth=    6905 States=    7e+06 Transitions= 7.38e+07 Memory=  1085.264        t=    214 R=   3e+04
+Depth=    6905 States=    8e+06 Transitions= 8.99e+07 Memory=  1169.151        t=    264 R=   3e+04
+Depth=    6905 States=    9e+06 Transitions= 1.01e+08 Memory=  1253.135        t=    297 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    6905 States=    1e+07 Transitions= 1.11e+08 Memory=  1461.115        t=    325 R=   3e+04
+Depth=    6905 States=  1.1e+07 Transitions= 1.18e+08 Memory=  1545.100        t=    347 R=   3e+04
+Depth=    6905 States=  1.2e+07 Transitions= 1.31e+08 Memory=  1629.084        t=    383 R=   3e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6905, errors: 0
+ 12145921 states, stored
+1.2049274e+08 states, matched
+1.3263866e+08 transitions (= stored+matched)
+2.0136766e+09 atomic steps
+hash conflicts: 1.0176788e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1343.657      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1056.279      actual memory usage for states (compression: 78.61%)
+               state-vector as stored = 63 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1641.389      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 77, "(1)"
+       line 243, "pan.___", state 85, "(1)"
+       line 247, "pan.___", state 97, "(1)"
+       line 251, "pan.___", state 105, "(1)"
+       line 401, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 195, "(1)"
+       line 247, "pan.___", state 215, "(1)"
+       line 251, "pan.___", state 223, "(1)"
+       line 680, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 313, "(1)"
+       line 247, "pan.___", state 333, "(1)"
+       line 251, "pan.___", state 341, "(1)"
+       line 401, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 424, "(1)"
+       line 247, "pan.___", state 444, "(1)"
+       line 251, "pan.___", state 452, "(1)"
+       line 401, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 475, "(1)"
+       line 401, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 476, "else"
+       line 401, "pan.___", state 479, "(1)"
+       line 405, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 489, "(1)"
+       line 405, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 490, "else"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 403, "pan.___", state 499, "((i<1))"
+       line 403, "pan.___", state 499, "((i>=1))"
+       line 410, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 507, "(1)"
+       line 410, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 508, "else"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 414, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 521, "(1)"
+       line 414, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 522, "else"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 412, "pan.___", state 531, "((i<2))"
+       line 412, "pan.___", state 531, "((i>=2))"
+       line 239, "pan.___", state 537, "(1)"
+       line 243, "pan.___", state 545, "(1)"
+       line 243, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 546, "else"
+       line 241, "pan.___", state 551, "((i<1))"
+       line 241, "pan.___", state 551, "((i>=1))"
+       line 247, "pan.___", state 557, "(1)"
+       line 247, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 558, "else"
+       line 251, "pan.___", state 565, "(1)"
+       line 251, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 566, "else"
+       line 249, "pan.___", state 571, "((i<2))"
+       line 249, "pan.___", state 571, "((i>=2))"
+       line 256, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 575, "else"
+       line 421, "pan.___", state 577, "(1)"
+       line 421, "pan.___", state 577, "(1)"
+       line 680, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 680, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 680, "pan.___", state 582, "(1)"
+       line 401, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 653, "(1)"
+       line 247, "pan.___", state 673, "(1)"
+       line 251, "pan.___", state 681, "(1)"
+       line 401, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 709, "(1)"
+       line 401, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 710, "else"
+       line 401, "pan.___", state 713, "(1)"
+       line 405, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 723, "(1)"
+       line 405, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 724, "else"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 403, "pan.___", state 733, "((i<1))"
+       line 403, "pan.___", state 733, "((i>=1))"
+       line 410, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 741, "(1)"
+       line 410, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 742, "else"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 414, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 756, "else"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 412, "pan.___", state 765, "((i<2))"
+       line 412, "pan.___", state 765, "((i>=2))"
+       line 239, "pan.___", state 771, "(1)"
+       line 243, "pan.___", state 779, "(1)"
+       line 243, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 780, "else"
+       line 241, "pan.___", state 785, "((i<1))"
+       line 241, "pan.___", state 785, "((i>=1))"
+       line 247, "pan.___", state 791, "(1)"
+       line 247, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 792, "else"
+       line 251, "pan.___", state 799, "(1)"
+       line 251, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 800, "else"
+       line 256, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 809, "else"
+       line 421, "pan.___", state 811, "(1)"
+       line 421, "pan.___", state 811, "(1)"
+       line 401, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 820, "(1)"
+       line 401, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 821, "else"
+       line 401, "pan.___", state 824, "(1)"
+       line 405, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 834, "(1)"
+       line 405, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 835, "else"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 403, "pan.___", state 844, "((i<1))"
+       line 403, "pan.___", state 844, "((i>=1))"
+       line 410, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 852, "(1)"
+       line 410, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 853, "else"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 414, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 866, "(1)"
+       line 414, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 867, "else"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 412, "pan.___", state 876, "((i<2))"
+       line 412, "pan.___", state 876, "((i>=2))"
+       line 239, "pan.___", state 882, "(1)"
+       line 243, "pan.___", state 890, "(1)"
+       line 243, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 891, "else"
+       line 241, "pan.___", state 896, "((i<1))"
+       line 241, "pan.___", state 896, "((i>=1))"
+       line 247, "pan.___", state 902, "(1)"
+       line 247, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 903, "else"
+       line 251, "pan.___", state 910, "(1)"
+       line 251, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 911, "else"
+       line 249, "pan.___", state 916, "((i<2))"
+       line 249, "pan.___", state 916, "((i>=2))"
+       line 256, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 920, "else"
+       line 421, "pan.___", state 922, "(1)"
+       line 421, "pan.___", state 922, "(1)"
+       line 688, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 995, "(1)"
+       line 247, "pan.___", state 1015, "(1)"
+       line 251, "pan.___", state 1023, "(1)"
+       line 401, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1109, "(1)"
+       line 247, "pan.___", state 1129, "(1)"
+       line 251, "pan.___", state 1137, "(1)"
+       line 401, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1224, "(1)"
+       line 247, "pan.___", state 1244, "(1)"
+       line 251, "pan.___", state 1252, "(1)"
+       line 401, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1335, "(1)"
+       line 247, "pan.___", state 1355, "(1)"
+       line 251, "pan.___", state 1363, "(1)"
+       line 401, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1451, "(1)"
+       line 247, "pan.___", state 1471, "(1)"
+       line 251, "pan.___", state 1479, "(1)"
+       line 401, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1562, "(1)"
+       line 247, "pan.___", state 1582, "(1)"
+       line 251, "pan.___", state 1590, "(1)"
+       line 401, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1676, "(1)"
+       line 247, "pan.___", state 1696, "(1)"
+       line 251, "pan.___", state 1704, "(1)"
+       line 727, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1794, "(1)"
+       line 247, "pan.___", state 1814, "(1)"
+       line 251, "pan.___", state 1822, "(1)"
+       line 401, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1905, "(1)"
+       line 247, "pan.___", state 1925, "(1)"
+       line 251, "pan.___", state 1933, "(1)"
+       line 401, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1956, "(1)"
+       line 401, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1957, "else"
+       line 401, "pan.___", state 1960, "(1)"
+       line 405, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1970, "(1)"
+       line 405, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1971, "else"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 403, "pan.___", state 1980, "((i<1))"
+       line 403, "pan.___", state 1980, "((i>=1))"
+       line 410, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1988, "(1)"
+       line 410, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1989, "else"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 414, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2002, "(1)"
+       line 414, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2003, "else"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 412, "pan.___", state 2012, "((i<2))"
+       line 412, "pan.___", state 2012, "((i>=2))"
+       line 239, "pan.___", state 2018, "(1)"
+       line 243, "pan.___", state 2026, "(1)"
+       line 243, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2027, "else"
+       line 241, "pan.___", state 2032, "((i<1))"
+       line 241, "pan.___", state 2032, "((i>=1))"
+       line 247, "pan.___", state 2038, "(1)"
+       line 247, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2039, "else"
+       line 251, "pan.___", state 2046, "(1)"
+       line 251, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2047, "else"
+       line 249, "pan.___", state 2052, "((i<2))"
+       line 249, "pan.___", state 2052, "((i>=2))"
+       line 256, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2056, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2058, "(1)"
+       line 727, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 727, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 727, "pan.___", state 2063, "(1)"
+       line 401, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2134, "(1)"
+       line 247, "pan.___", state 2154, "(1)"
+       line 251, "pan.___", state 2162, "(1)"
+       line 401, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2251, "(1)"
+       line 247, "pan.___", state 2271, "(1)"
+       line 251, "pan.___", state 2279, "(1)"
+       line 401, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2362, "(1)"
+       line 247, "pan.___", state 2382, "(1)"
+       line 251, "pan.___", state 2390, "(1)"
+       line 401, "pan.___", state 2421, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2453, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2467, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2485, "(1)"
+       line 247, "pan.___", state 2505, "(1)"
+       line 251, "pan.___", state 2513, "(1)"
+       line 401, "pan.___", state 2530, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2562, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2576, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2594, "(1)"
+       line 247, "pan.___", state 2614, "(1)"
+       line 251, "pan.___", state 2622, "(1)"
+       line 887, "pan.___", state 2641, "-end-"
+       (252 of 2641 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 82, "(1)"
+       line 243, "pan.___", state 90, "(1)"
+       line 247, "pan.___", state 102, "(1)"
+       line 262, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 257, "(1)"
+       line 243, "pan.___", state 265, "(1)"
+       line 247, "pan.___", state 277, "(1)"
+       line 251, "pan.___", state 285, "(1)"
+       line 405, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 378, "(1)"
+       line 247, "pan.___", state 390, "(1)"
+       line 251, "pan.___", state 398, "(1)"
+       line 405, "pan.___", state 440, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 458, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 472, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 498, "(1)"
+       line 247, "pan.___", state 510, "(1)"
+       line 251, "pan.___", state 518, "(1)"
+       line 405, "pan.___", state 551, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 569, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 583, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 609, "(1)"
+       line 247, "pan.___", state 621, "(1)"
+       line 251, "pan.___", state 629, "(1)"
+       line 405, "pan.___", state 664, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 682, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 696, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 722, "(1)"
+       line 247, "pan.___", state 734, "(1)"
+       line 251, "pan.___", state 742, "(1)"
+       line 262, "pan.___", state 795, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 804, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 819, "(1)"
+       line 274, "pan.___", state 826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 842, "(1)"
+       line 243, "pan.___", state 850, "(1)"
+       line 247, "pan.___", state 862, "(1)"
+       line 251, "pan.___", state 870, "(1)"
+       line 262, "pan.___", state 901, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 910, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 923, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 948, "(1)"
+       line 243, "pan.___", state 956, "(1)"
+       line 247, "pan.___", state 968, "(1)"
+       line 251, "pan.___", state 976, "(1)"
+       line 266, "pan.___", state 1002, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1015, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1024, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1040, "(1)"
+       line 243, "pan.___", state 1048, "(1)"
+       line 247, "pan.___", state 1060, "(1)"
+       line 251, "pan.___", state 1068, "(1)"
+       line 262, "pan.___", state 1099, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1108, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1121, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1130, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1146, "(1)"
+       line 243, "pan.___", state 1154, "(1)"
+       line 247, "pan.___", state 1166, "(1)"
+       line 251, "pan.___", state 1174, "(1)"
+       line 266, "pan.___", state 1200, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1213, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1222, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1238, "(1)"
+       line 243, "pan.___", state 1246, "(1)"
+       line 247, "pan.___", state 1258, "(1)"
+       line 251, "pan.___", state 1266, "(1)"
+       line 262, "pan.___", state 1297, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1306, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1319, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1344, "(1)"
+       line 243, "pan.___", state 1352, "(1)"
+       line 247, "pan.___", state 1364, "(1)"
+       line 251, "pan.___", state 1372, "(1)"
+       line 266, "pan.___", state 1398, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1411, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1420, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1436, "(1)"
+       line 243, "pan.___", state 1444, "(1)"
+       line 247, "pan.___", state 1456, "(1)"
+       line 251, "pan.___", state 1464, "(1)"
+       line 262, "pan.___", state 1495, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1504, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1517, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1526, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1542, "(1)"
+       line 243, "pan.___", state 1550, "(1)"
+       line 247, "pan.___", state 1562, "(1)"
+       line 251, "pan.___", state 1570, "(1)"
+       line 1214, "pan.___", state 1586, "-end-"
+       (103 of 1586 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1277, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 388 seconds
+pan: rate 31299.887 states/second
+pan: avg transition delay 2.9256e-06 usec
+cp .input.spin urcu_free_no_rmb.spin.input
+cp .input.spin.trail urcu_free_no_rmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_rmb.spin.input
new file mode 100644 (file)
index 0000000..b54b6b4
--- /dev/null
@@ -0,0 +1,1250 @@
+#define NO_RMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.define
new file mode 100644 (file)
index 0000000..710f29d
--- /dev/null
@@ -0,0 +1 @@
+#define NO_WMB
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.log
new file mode 100644 (file)
index 0000000..9422810
--- /dev/null
@@ -0,0 +1,511 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_no_wmb.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+Depth=    6716 States=    1e+06 Transitions= 5.89e+06 Memory=   550.432        t=   14.4 R=   7e+04
+Depth=    6716 States=    2e+06 Transitions= 1.63e+07 Memory=   634.318        t=   42.7 R=   5e+04
+Depth=    6716 States=    3e+06 Transitions= 2.25e+07 Memory=   718.303        t=   58.7 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6716 States=    4e+06 Transitions= 3.22e+07 Memory=   833.311        t=   85.5 R=   5e+04
+Depth=    6716 States=    5e+06 Transitions= 4.86e+07 Memory=   917.295        t=    132 R=   4e+04
+Depth=    6716 States=    6e+06 Transitions= 6.42e+07 Memory=  1001.279        t=    176 R=   3e+04
+Depth=    6716 States=    7e+06 Transitions= 7.63e+07 Memory=  1085.264        t=    209 R=   3e+04
+Depth=    6716 States=    8e+06 Transitions= 8.46e+07 Memory=  1169.151        t=    232 R=   3e+04
+Depth=    6716 States=    9e+06 Transitions= 9.43e+07 Memory=  1253.135        t=    259 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    6716 States=    1e+07 Transitions= 1.03e+08 Memory=  1461.115        t=    283 R=   4e+04
+Depth=    6716 States=  1.1e+07 Transitions= 1.15e+08 Memory=  1545.100        t=    316 R=   3e+04
+Depth=    6716 States=  1.2e+07 Transitions= 1.21e+08 Memory=  1629.084        t=    331 R=   4e+04
+pan: claim violated! (at depth 1387)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6716, errors: 1
+ 12467262 states, stored
+1.1402162e+08 states, matched
+1.2648888e+08 transitions (= stored+matched)
+1.8978936e+09 atomic steps
+hash conflicts:  82801757 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 1379.206      equivalent memory usage for states (stored*(State-vector + overhead))
+ 1083.262      actual memory usage for states (compression: 78.54%)
+               state-vector as stored = 63 byte + 28 byte overhead
+  128.000      memory used for hash table (-w24)
+  457.764      memory used for DFS stack (-m10000000)
+ 1668.342      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 77, "(1)"
+       line 243, "pan.___", state 85, "(1)"
+       line 247, "pan.___", state 97, "(1)"
+       line 251, "pan.___", state 105, "(1)"
+       line 401, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 195, "(1)"
+       line 247, "pan.___", state 215, "(1)"
+       line 251, "pan.___", state 223, "(1)"
+       line 680, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 313, "(1)"
+       line 247, "pan.___", state 333, "(1)"
+       line 251, "pan.___", state 341, "(1)"
+       line 401, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 424, "(1)"
+       line 247, "pan.___", state 444, "(1)"
+       line 251, "pan.___", state 452, "(1)"
+       line 401, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 475, "(1)"
+       line 401, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 476, "else"
+       line 401, "pan.___", state 479, "(1)"
+       line 405, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 489, "(1)"
+       line 405, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 490, "else"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 403, "pan.___", state 499, "((i<1))"
+       line 403, "pan.___", state 499, "((i>=1))"
+       line 410, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 507, "(1)"
+       line 410, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 508, "else"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 414, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 521, "(1)"
+       line 414, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 522, "else"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 412, "pan.___", state 531, "((i<2))"
+       line 412, "pan.___", state 531, "((i>=2))"
+       line 239, "pan.___", state 537, "(1)"
+       line 243, "pan.___", state 545, "(1)"
+       line 243, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 546, "else"
+       line 241, "pan.___", state 551, "((i<1))"
+       line 241, "pan.___", state 551, "((i>=1))"
+       line 247, "pan.___", state 557, "(1)"
+       line 247, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 558, "else"
+       line 251, "pan.___", state 565, "(1)"
+       line 251, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 566, "else"
+       line 249, "pan.___", state 571, "((i<2))"
+       line 249, "pan.___", state 571, "((i>=2))"
+       line 256, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 575, "else"
+       line 421, "pan.___", state 577, "(1)"
+       line 421, "pan.___", state 577, "(1)"
+       line 680, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 680, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 680, "pan.___", state 582, "(1)"
+       line 401, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 653, "(1)"
+       line 247, "pan.___", state 673, "(1)"
+       line 251, "pan.___", state 681, "(1)"
+       line 401, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 709, "(1)"
+       line 401, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 710, "else"
+       line 401, "pan.___", state 713, "(1)"
+       line 405, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 723, "(1)"
+       line 405, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 724, "else"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 403, "pan.___", state 733, "((i<1))"
+       line 403, "pan.___", state 733, "((i>=1))"
+       line 410, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 741, "(1)"
+       line 410, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 742, "else"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 414, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 756, "else"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 412, "pan.___", state 765, "((i<2))"
+       line 412, "pan.___", state 765, "((i>=2))"
+       line 239, "pan.___", state 771, "(1)"
+       line 243, "pan.___", state 779, "(1)"
+       line 243, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 780, "else"
+       line 241, "pan.___", state 785, "((i<1))"
+       line 241, "pan.___", state 785, "((i>=1))"
+       line 247, "pan.___", state 791, "(1)"
+       line 247, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 792, "else"
+       line 251, "pan.___", state 799, "(1)"
+       line 251, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 800, "else"
+       line 256, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 809, "else"
+       line 421, "pan.___", state 811, "(1)"
+       line 421, "pan.___", state 811, "(1)"
+       line 401, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 820, "(1)"
+       line 401, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 821, "else"
+       line 401, "pan.___", state 824, "(1)"
+       line 405, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 834, "(1)"
+       line 405, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 835, "else"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 403, "pan.___", state 844, "((i<1))"
+       line 403, "pan.___", state 844, "((i>=1))"
+       line 410, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 852, "(1)"
+       line 410, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 853, "else"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 414, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 866, "(1)"
+       line 414, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 867, "else"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 412, "pan.___", state 876, "((i<2))"
+       line 412, "pan.___", state 876, "((i>=2))"
+       line 239, "pan.___", state 882, "(1)"
+       line 243, "pan.___", state 890, "(1)"
+       line 243, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 891, "else"
+       line 241, "pan.___", state 896, "((i<1))"
+       line 241, "pan.___", state 896, "((i>=1))"
+       line 247, "pan.___", state 902, "(1)"
+       line 247, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 903, "else"
+       line 251, "pan.___", state 910, "(1)"
+       line 251, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 911, "else"
+       line 249, "pan.___", state 916, "((i<2))"
+       line 249, "pan.___", state 916, "((i>=2))"
+       line 256, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 920, "else"
+       line 421, "pan.___", state 922, "(1)"
+       line 421, "pan.___", state 922, "(1)"
+       line 688, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 995, "(1)"
+       line 247, "pan.___", state 1015, "(1)"
+       line 251, "pan.___", state 1023, "(1)"
+       line 401, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1109, "(1)"
+       line 247, "pan.___", state 1129, "(1)"
+       line 251, "pan.___", state 1137, "(1)"
+       line 401, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1224, "(1)"
+       line 247, "pan.___", state 1244, "(1)"
+       line 251, "pan.___", state 1252, "(1)"
+       line 401, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1335, "(1)"
+       line 247, "pan.___", state 1355, "(1)"
+       line 251, "pan.___", state 1363, "(1)"
+       line 401, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1451, "(1)"
+       line 247, "pan.___", state 1471, "(1)"
+       line 251, "pan.___", state 1479, "(1)"
+       line 401, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1562, "(1)"
+       line 247, "pan.___", state 1582, "(1)"
+       line 251, "pan.___", state 1590, "(1)"
+       line 401, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1676, "(1)"
+       line 247, "pan.___", state 1696, "(1)"
+       line 251, "pan.___", state 1704, "(1)"
+       line 727, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1794, "(1)"
+       line 247, "pan.___", state 1814, "(1)"
+       line 251, "pan.___", state 1822, "(1)"
+       line 401, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1905, "(1)"
+       line 247, "pan.___", state 1925, "(1)"
+       line 251, "pan.___", state 1933, "(1)"
+       line 401, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1956, "(1)"
+       line 401, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1957, "else"
+       line 401, "pan.___", state 1960, "(1)"
+       line 405, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1970, "(1)"
+       line 405, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1971, "else"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 403, "pan.___", state 1980, "((i<1))"
+       line 403, "pan.___", state 1980, "((i>=1))"
+       line 410, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1988, "(1)"
+       line 410, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1989, "else"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 414, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2002, "(1)"
+       line 414, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2003, "else"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 412, "pan.___", state 2012, "((i<2))"
+       line 412, "pan.___", state 2012, "((i>=2))"
+       line 239, "pan.___", state 2018, "(1)"
+       line 243, "pan.___", state 2026, "(1)"
+       line 243, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2027, "else"
+       line 241, "pan.___", state 2032, "((i<1))"
+       line 241, "pan.___", state 2032, "((i>=1))"
+       line 247, "pan.___", state 2038, "(1)"
+       line 247, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2039, "else"
+       line 251, "pan.___", state 2046, "(1)"
+       line 251, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2047, "else"
+       line 249, "pan.___", state 2052, "((i<2))"
+       line 249, "pan.___", state 2052, "((i>=2))"
+       line 256, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2056, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2058, "(1)"
+       line 727, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 727, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 727, "pan.___", state 2063, "(1)"
+       line 401, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2134, "(1)"
+       line 247, "pan.___", state 2154, "(1)"
+       line 251, "pan.___", state 2162, "(1)"
+       line 401, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2251, "(1)"
+       line 247, "pan.___", state 2271, "(1)"
+       line 251, "pan.___", state 2279, "(1)"
+       line 401, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2362, "(1)"
+       line 247, "pan.___", state 2382, "(1)"
+       line 251, "pan.___", state 2390, "(1)"
+       line 239, "pan.___", state 2421, "(1)"
+       line 247, "pan.___", state 2441, "(1)"
+       line 251, "pan.___", state 2449, "(1)"
+       line 239, "pan.___", state 2464, "(1)"
+       line 247, "pan.___", state 2484, "(1)"
+       line 251, "pan.___", state 2492, "(1)"
+       line 887, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 19, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 33, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 83, "(1)"
+       line 243, "pan.___", state 91, "(1)"
+       line 262, "pan.___", state 132, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 134, "(1)"
+       line 266, "pan.___", state 141, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 143, "(1)"
+       line 266, "pan.___", state 144, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 144, "else"
+       line 264, "pan.___", state 149, "((i<1))"
+       line 264, "pan.___", state 149, "((i>=1))"
+       line 270, "pan.___", state 154, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 156, "(1)"
+       line 270, "pan.___", state 157, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 157, "else"
+       line 274, "pan.___", state 163, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 165, "(1)"
+       line 274, "pan.___", state 166, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 166, "else"
+       line 279, "pan.___", state 175, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 279, "pan.___", state 175, "else"
+       line 401, "pan.___", state 194, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 208, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 226, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 240, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 258, "(1)"
+       line 243, "pan.___", state 266, "(1)"
+       line 247, "pan.___", state 278, "(1)"
+       line 251, "pan.___", state 286, "(1)"
+       line 405, "pan.___", state 321, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 339, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 353, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 379, "(1)"
+       line 247, "pan.___", state 391, "(1)"
+       line 251, "pan.___", state 399, "(1)"
+       line 405, "pan.___", state 441, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 459, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 473, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 499, "(1)"
+       line 247, "pan.___", state 511, "(1)"
+       line 251, "pan.___", state 519, "(1)"
+       line 405, "pan.___", state 552, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 570, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 584, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 610, "(1)"
+       line 247, "pan.___", state 622, "(1)"
+       line 251, "pan.___", state 630, "(1)"
+       line 405, "pan.___", state 665, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 683, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 697, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 723, "(1)"
+       line 247, "pan.___", state 735, "(1)"
+       line 251, "pan.___", state 743, "(1)"
+       line 262, "pan.___", state 796, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 805, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 843, "(1)"
+       line 243, "pan.___", state 851, "(1)"
+       line 247, "pan.___", state 863, "(1)"
+       line 251, "pan.___", state 871, "(1)"
+       line 262, "pan.___", state 902, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 911, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 924, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 933, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 949, "(1)"
+       line 243, "pan.___", state 957, "(1)"
+       line 247, "pan.___", state 969, "(1)"
+       line 251, "pan.___", state 977, "(1)"
+       line 266, "pan.___", state 1003, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1016, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1025, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1041, "(1)"
+       line 243, "pan.___", state 1049, "(1)"
+       line 247, "pan.___", state 1061, "(1)"
+       line 251, "pan.___", state 1069, "(1)"
+       line 262, "pan.___", state 1100, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1109, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1122, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1131, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1147, "(1)"
+       line 243, "pan.___", state 1155, "(1)"
+       line 247, "pan.___", state 1167, "(1)"
+       line 251, "pan.___", state 1175, "(1)"
+       line 266, "pan.___", state 1201, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1214, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1223, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1239, "(1)"
+       line 243, "pan.___", state 1247, "(1)"
+       line 247, "pan.___", state 1259, "(1)"
+       line 251, "pan.___", state 1267, "(1)"
+       line 262, "pan.___", state 1298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1307, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1320, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1329, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1345, "(1)"
+       line 243, "pan.___", state 1353, "(1)"
+       line 247, "pan.___", state 1365, "(1)"
+       line 251, "pan.___", state 1373, "(1)"
+       line 266, "pan.___", state 1399, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1412, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1421, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1437, "(1)"
+       line 243, "pan.___", state 1445, "(1)"
+       line 247, "pan.___", state 1457, "(1)"
+       line 251, "pan.___", state 1465, "(1)"
+       line 262, "pan.___", state 1496, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1505, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1518, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1527, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1543, "(1)"
+       line 243, "pan.___", state 1551, "(1)"
+       line 247, "pan.___", state 1563, "(1)"
+       line 251, "pan.___", state 1571, "(1)"
+       line 1214, "pan.___", state 1587, "-end-"
+       (109 of 1587 states)
+unreached in proctype :init:
+       line 1225, "pan.___", state 9, "((j<2))"
+       line 1225, "pan.___", state 9, "((j>=2))"
+       line 1226, "pan.___", state 20, "((j<2))"
+       line 1226, "pan.___", state 20, "((j>=2))"
+       line 1231, "pan.___", state 33, "((j<2))"
+       line 1231, "pan.___", state 33, "((j>=2))"
+       line 1229, "pan.___", state 43, "((i<1))"
+       line 1229, "pan.___", state 43, "((i>=1))"
+       line 1239, "pan.___", state 54, "((j<2))"
+       line 1239, "pan.___", state 54, "((j>=2))"
+       line 1243, "pan.___", state 67, "((j<2))"
+       line 1243, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1277, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 346 seconds
+pan: rate 36012.773 states/second
+pan: avg transition delay 2.7369e-06 usec
+cp .input.spin urcu_free_no_wmb.spin.input
+cp .input.spin.trail urcu_free_no_wmb.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input
new file mode 100644 (file)
index 0000000..07b3110
--- /dev/null
@@ -0,0 +1,1250 @@
+#define NO_WMB
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input.trail b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_no_wmb.spin.input.trail
new file mode 100644 (file)
index 0000000..8c8735f
--- /dev/null
@@ -0,0 +1,1390 @@
+-2:3:-2
+-4:-4:-4
+1:0:4176
+2:3:4096
+3:3:4099
+4:3:4099
+5:3:4102
+6:3:4110
+7:3:4110
+8:3:4113
+9:3:4119
+10:3:4123
+11:3:4123
+12:3:4126
+13:3:4136
+14:3:4144
+15:3:4144
+16:3:4147
+17:3:4153
+18:3:4157
+19:3:4157
+20:3:4160
+21:3:4166
+22:3:4170
+23:3:4171
+24:0:4176
+25:3:4173
+26:0:4176
+27:2:2511
+28:0:4176
+29:2:2517
+30:0:4176
+31:2:2518
+32:0:4176
+33:2:2520
+34:0:4176
+35:2:2521
+36:0:4176
+37:2:2522
+38:0:4176
+39:2:2523
+40:2:2524
+41:2:2528
+42:2:2529
+43:2:2537
+44:2:2538
+45:2:2542
+46:2:2543
+47:2:2551
+48:2:2556
+49:2:2560
+50:2:2561
+51:2:2569
+52:2:2570
+53:2:2574
+54:2:2575
+55:2:2569
+56:2:2570
+57:2:2574
+58:2:2575
+59:2:2583
+60:2:2588
+61:2:2595
+62:2:2596
+63:2:2603
+64:2:2608
+65:2:2615
+66:2:2616
+67:2:2615
+68:2:2616
+69:2:2623
+70:2:2633
+71:0:4176
+72:2:2522
+73:0:4176
+74:2:2685
+75:2:2686
+76:2:2687
+77:0:4176
+78:2:2522
+79:0:4176
+80:2:2692
+81:0:4176
+82:2:3305
+83:2:3306
+84:2:3310
+85:2:3314
+86:2:3315
+87:2:3319
+88:2:3324
+89:2:3332
+90:2:3336
+91:2:3337
+92:2:3332
+93:2:3333
+94:2:3341
+95:2:3348
+96:2:3355
+97:2:3356
+98:2:3363
+99:2:3368
+100:2:3375
+101:2:3376
+102:2:3375
+103:2:3376
+104:2:3383
+105:2:3387
+106:0:4176
+107:2:3392
+108:0:4176
+109:2:3393
+110:0:4176
+111:2:3394
+112:0:4176
+113:2:3395
+114:0:4176
+115:1:2
+116:0:4176
+117:2:3396
+118:0:4176
+119:1:8
+120:0:4176
+121:1:9
+122:0:4176
+123:2:3395
+124:0:4176
+125:1:10
+126:0:4176
+127:2:3396
+128:0:4176
+129:1:11
+130:0:4176
+131:2:3395
+132:0:4176
+133:1:12
+134:0:4176
+135:2:3396
+136:0:4176
+137:1:13
+138:0:4176
+139:2:3395
+140:0:4176
+141:1:14
+142:0:4176
+143:2:3396
+144:0:4176
+145:1:15
+146:0:4176
+147:1:16
+148:0:4176
+149:2:3395
+150:0:4176
+151:1:17
+152:0:4176
+153:2:3396
+154:0:4176
+155:1:26
+156:0:4176
+157:2:3395
+158:0:4176
+159:1:30
+160:1:31
+161:1:35
+162:1:39
+163:1:40
+164:1:44
+165:1:52
+166:1:53
+167:1:57
+168:1:61
+169:1:62
+170:1:57
+171:1:61
+172:1:62
+173:1:66
+174:1:73
+175:1:80
+176:1:81
+177:1:88
+178:1:93
+179:1:100
+180:1:101
+181:1:100
+182:1:101
+183:1:108
+184:1:112
+185:0:4176
+186:2:3396
+187:0:4176
+188:1:117
+189:0:4176
+190:2:3397
+191:0:4176
+192:2:3402
+193:0:4176
+194:2:3403
+195:0:4176
+196:2:3411
+197:2:3412
+198:2:3416
+199:2:3420
+200:2:3421
+201:2:3425
+202:2:3433
+203:2:3434
+204:2:3438
+205:2:3442
+206:2:3443
+207:2:3438
+208:2:3442
+209:2:3443
+210:2:3447
+211:2:3454
+212:2:3461
+213:2:3462
+214:2:3469
+215:2:3474
+216:2:3481
+217:2:3482
+218:2:3481
+219:2:3482
+220:2:3489
+221:2:3493
+222:0:4176
+223:2:2694
+224:2:3286
+225:0:4176
+226:2:2522
+227:0:4176
+228:2:2695
+229:0:4176
+230:2:2522
+231:0:4176
+232:2:2698
+233:2:2699
+234:2:2703
+235:2:2704
+236:2:2712
+237:2:2713
+238:2:2717
+239:2:2718
+240:2:2726
+241:2:2731
+242:2:2735
+243:2:2736
+244:2:2744
+245:2:2745
+246:2:2749
+247:2:2750
+248:2:2744
+249:2:2745
+250:2:2749
+251:2:2750
+252:2:2758
+253:2:2763
+254:2:2770
+255:2:2771
+256:2:2778
+257:2:2783
+258:2:2790
+259:2:2791
+260:2:2790
+261:2:2791
+262:2:2798
+263:2:2807
+264:0:4176
+265:2:2522
+266:0:4176
+267:2:2811
+268:2:2812
+269:2:2813
+270:2:2825
+271:2:2826
+272:2:2830
+273:2:2831
+274:2:2839
+275:2:2844
+276:2:2848
+277:2:2849
+278:2:2857
+279:2:2858
+280:2:2862
+281:2:2863
+282:2:2857
+283:2:2858
+284:2:2862
+285:2:2863
+286:2:2871
+287:2:2876
+288:2:2883
+289:2:2884
+290:2:2891
+291:2:2896
+292:2:2903
+293:2:2904
+294:2:2903
+295:2:2904
+296:2:2911
+297:2:2924
+298:2:2925
+299:0:4176
+300:2:2522
+301:0:4176
+302:2:2931
+303:2:2932
+304:2:2936
+305:2:2937
+306:2:2945
+307:2:2946
+308:2:2950
+309:2:2951
+310:2:2959
+311:2:2964
+312:2:2968
+313:2:2969
+314:2:2977
+315:2:2978
+316:2:2982
+317:2:2983
+318:2:2977
+319:2:2978
+320:2:2982
+321:2:2983
+322:2:2991
+323:2:2996
+324:2:3003
+325:2:3004
+326:2:3011
+327:2:3016
+328:2:3023
+329:2:3024
+330:2:3023
+331:2:3024
+332:2:3031
+333:0:4176
+334:2:2522
+335:0:4176
+336:2:3042
+337:2:3043
+338:2:3047
+339:2:3048
+340:2:3056
+341:2:3057
+342:2:3061
+343:2:3062
+344:2:3070
+345:2:3075
+346:2:3079
+347:2:3080
+348:2:3088
+349:2:3089
+350:2:3093
+351:2:3094
+352:2:3088
+353:2:3089
+354:2:3093
+355:2:3094
+356:2:3102
+357:2:3107
+358:2:3114
+359:2:3115
+360:2:3122
+361:2:3127
+362:2:3134
+363:2:3135
+364:2:3134
+365:2:3135
+366:2:3142
+367:2:3151
+368:0:4176
+369:2:2522
+370:0:4176
+371:2:3155
+372:2:3156
+373:2:3157
+374:2:3169
+375:2:3170
+376:2:3174
+377:2:3175
+378:2:3183
+379:2:3188
+380:2:3192
+381:2:3193
+382:2:3201
+383:2:3202
+384:2:3206
+385:2:3207
+386:2:3201
+387:2:3202
+388:2:3206
+389:2:3207
+390:2:3215
+391:2:3220
+392:2:3227
+393:2:3228
+394:2:3235
+395:2:3240
+396:2:3247
+397:2:3248
+398:2:3247
+399:2:3248
+400:2:3255
+401:2:3267
+402:2:3268
+403:0:4176
+404:2:2522
+405:0:4176
+406:2:3274
+407:0:4176
+408:2:3899
+409:2:3900
+410:2:3904
+411:2:3908
+412:2:3909
+413:2:3913
+414:2:3921
+415:2:3922
+416:2:3926
+417:2:3930
+418:2:3931
+419:2:3926
+420:2:3930
+421:2:3931
+422:2:3935
+423:2:3942
+424:2:3949
+425:2:3950
+426:2:3957
+427:2:3962
+428:2:3969
+429:2:3970
+430:2:3969
+431:2:3970
+432:2:3977
+433:2:3981
+434:0:4176
+435:2:3986
+436:0:4176
+437:2:3987
+438:0:4176
+439:2:3988
+440:0:4176
+441:2:3989
+442:0:4176
+443:1:26
+444:0:4176
+445:2:3990
+446:0:4176
+447:1:30
+448:1:31
+449:1:35
+450:1:39
+451:1:40
+452:1:44
+453:1:52
+454:1:53
+455:1:57
+456:1:61
+457:1:62
+458:1:57
+459:1:61
+460:1:62
+461:1:66
+462:1:73
+463:1:80
+464:1:81
+465:1:88
+466:1:93
+467:1:100
+468:1:101
+469:1:100
+470:1:101
+471:1:108
+472:1:112
+473:0:4176
+474:2:3989
+475:0:4176
+476:1:117
+477:0:4176
+478:2:3990
+479:0:4176
+480:2:3991
+481:0:4176
+482:2:3996
+483:0:4176
+484:2:3997
+485:0:4176
+486:2:4005
+487:2:4006
+488:2:4010
+489:2:4014
+490:2:4015
+491:2:4019
+492:2:4027
+493:2:4028
+494:2:4032
+495:2:4036
+496:2:4037
+497:2:4032
+498:2:4036
+499:2:4037
+500:2:4041
+501:2:4048
+502:2:4055
+503:2:4056
+504:2:4063
+505:2:4068
+506:2:4075
+507:2:4076
+508:2:4075
+509:2:4076
+510:2:4083
+511:2:4087
+512:0:4176
+513:2:3276
+514:2:3286
+515:0:4176
+516:2:2522
+517:0:4176
+518:2:3277
+519:2:3278
+520:0:4176
+521:2:2522
+522:0:4176
+523:2:3282
+524:0:4176
+525:2:3290
+526:0:4176
+527:2:2518
+528:0:4176
+529:2:2520
+530:0:4176
+531:2:2521
+532:0:4176
+533:2:2522
+534:0:4176
+535:2:2685
+536:2:2686
+537:2:2687
+538:0:4176
+539:2:2522
+540:0:4176
+541:2:2523
+542:2:2524
+543:2:2528
+544:2:2529
+545:2:2537
+546:2:2538
+547:2:2542
+548:2:2543
+549:2:2551
+550:2:2556
+551:2:2557
+552:2:2569
+553:2:2570
+554:2:2571
+555:2:2569
+556:2:2570
+557:2:2574
+558:2:2575
+559:2:2583
+560:2:2588
+561:2:2595
+562:2:2596
+563:2:2603
+564:2:2608
+565:2:2615
+566:2:2616
+567:2:2615
+568:2:2616
+569:2:2623
+570:2:2633
+571:0:4176
+572:2:2522
+573:0:4176
+574:2:2692
+575:0:4176
+576:2:3305
+577:2:3306
+578:2:3310
+579:2:3314
+580:2:3315
+581:2:3319
+582:2:3327
+583:2:3328
+584:2:3332
+585:2:3333
+586:2:3332
+587:2:3336
+588:2:3337
+589:2:3341
+590:2:3348
+591:2:3355
+592:2:3356
+593:2:3363
+594:2:3368
+595:2:3375
+596:2:3376
+597:2:3375
+598:2:3376
+599:2:3383
+600:2:3387
+601:0:4176
+602:2:3392
+603:0:4176
+604:2:3393
+605:0:4176
+606:2:3394
+607:0:4176
+608:2:3395
+609:0:4176
+610:1:26
+611:0:4176
+612:2:3396
+613:0:4176
+614:1:30
+615:1:31
+616:1:35
+617:1:39
+618:1:40
+619:1:44
+620:1:52
+621:1:53
+622:1:57
+623:1:61
+624:1:62
+625:1:57
+626:1:61
+627:1:62
+628:1:66
+629:1:73
+630:1:80
+631:1:81
+632:1:88
+633:1:93
+634:1:100
+635:1:101
+636:1:100
+637:1:101
+638:1:108
+639:1:112
+640:0:4176
+641:2:3395
+642:0:4176
+643:1:117
+644:0:4176
+645:2:3396
+646:0:4176
+647:2:3397
+648:0:4176
+649:2:3402
+650:0:4176
+651:2:3403
+652:0:4176
+653:2:3411
+654:2:3412
+655:2:3416
+656:2:3420
+657:2:3421
+658:2:3425
+659:2:3433
+660:2:3434
+661:2:3438
+662:2:3442
+663:2:3443
+664:2:3438
+665:2:3442
+666:2:3443
+667:2:3447
+668:2:3454
+669:2:3461
+670:2:3462
+671:2:3469
+672:2:3474
+673:2:3481
+674:2:3482
+675:2:3481
+676:2:3482
+677:2:3489
+678:2:3493
+679:0:4176
+680:2:2694
+681:2:3286
+682:0:4176
+683:2:2522
+684:0:4176
+685:2:2695
+686:0:4176
+687:2:2522
+688:0:4176
+689:2:2698
+690:2:2699
+691:2:2703
+692:2:2704
+693:2:2712
+694:2:2713
+695:2:2717
+696:2:2718
+697:2:2726
+698:2:2731
+699:2:2735
+700:2:2736
+701:2:2744
+702:2:2745
+703:2:2749
+704:2:2750
+705:2:2744
+706:2:2745
+707:2:2749
+708:2:2750
+709:2:2758
+710:2:2763
+711:2:2770
+712:2:2771
+713:2:2778
+714:2:2783
+715:2:2790
+716:2:2791
+717:2:2790
+718:2:2791
+719:2:2798
+720:2:2807
+721:0:4176
+722:2:2522
+723:0:4176
+724:2:2811
+725:2:2812
+726:2:2813
+727:2:2825
+728:2:2826
+729:2:2830
+730:2:2831
+731:2:2839
+732:2:2844
+733:2:2848
+734:2:2849
+735:2:2857
+736:2:2858
+737:2:2862
+738:2:2863
+739:2:2857
+740:2:2858
+741:2:2862
+742:2:2863
+743:2:2871
+744:2:2876
+745:2:2883
+746:2:2884
+747:2:2891
+748:2:2896
+749:2:2903
+750:2:2904
+751:2:2903
+752:2:2904
+753:2:2911
+754:2:2924
+755:2:2925
+756:0:4176
+757:2:2522
+758:0:4176
+759:2:2931
+760:2:2932
+761:2:2936
+762:2:2937
+763:2:2945
+764:2:2946
+765:2:2950
+766:2:2951
+767:2:2959
+768:2:2964
+769:2:2968
+770:2:2969
+771:2:2977
+772:2:2978
+773:2:2982
+774:2:2983
+775:2:2977
+776:2:2978
+777:2:2982
+778:2:2983
+779:2:2991
+780:2:2996
+781:2:3003
+782:2:3004
+783:2:3011
+784:2:3016
+785:2:3023
+786:2:3024
+787:2:3023
+788:2:3024
+789:2:3031
+790:0:4176
+791:2:2522
+792:0:4176
+793:2:3042
+794:2:3043
+795:2:3047
+796:2:3048
+797:2:3056
+798:2:3057
+799:2:3061
+800:2:3062
+801:2:3070
+802:2:3075
+803:2:3079
+804:2:3080
+805:2:3088
+806:2:3089
+807:2:3093
+808:2:3094
+809:2:3088
+810:2:3089
+811:2:3093
+812:2:3094
+813:2:3102
+814:2:3107
+815:2:3114
+816:2:3115
+817:2:3122
+818:2:3127
+819:2:3134
+820:2:3135
+821:2:3134
+822:2:3135
+823:2:3142
+824:2:3151
+825:0:4176
+826:2:2522
+827:0:4176
+828:2:3155
+829:2:3156
+830:2:3157
+831:2:3169
+832:2:3170
+833:2:3174
+834:2:3175
+835:2:3183
+836:2:3188
+837:2:3192
+838:2:3193
+839:2:3201
+840:2:3202
+841:2:3206
+842:2:3207
+843:2:3201
+844:2:3202
+845:2:3206
+846:2:3207
+847:2:3215
+848:2:3220
+849:2:3227
+850:2:3228
+851:2:3235
+852:2:3240
+853:2:3247
+854:2:3248
+855:2:3247
+856:2:3248
+857:2:3255
+858:2:3267
+859:2:3268
+860:0:4176
+861:2:2522
+862:0:4176
+863:2:3274
+864:0:4176
+865:2:3899
+866:2:3900
+867:2:3904
+868:2:3908
+869:2:3909
+870:2:3913
+871:2:3921
+872:2:3922
+873:2:3926
+874:2:3930
+875:2:3931
+876:2:3926
+877:2:3930
+878:2:3931
+879:2:3935
+880:2:3942
+881:2:3949
+882:2:3950
+883:2:3957
+884:2:3962
+885:2:3969
+886:2:3970
+887:2:3969
+888:2:3970
+889:2:3977
+890:2:3981
+891:0:4176
+892:2:3986
+893:0:4176
+894:2:3987
+895:0:4176
+896:2:3988
+897:0:4176
+898:2:3989
+899:0:4176
+900:1:26
+901:0:4176
+902:2:3990
+903:0:4176
+904:1:30
+905:1:31
+906:1:35
+907:1:39
+908:1:40
+909:1:44
+910:1:52
+911:1:53
+912:1:57
+913:1:61
+914:1:62
+915:1:57
+916:1:61
+917:1:62
+918:1:66
+919:1:73
+920:1:80
+921:1:81
+922:1:88
+923:1:93
+924:1:100
+925:1:101
+926:1:100
+927:1:101
+928:1:108
+929:1:112
+930:0:4176
+931:2:3989
+932:0:4176
+933:1:117
+934:0:4176
+935:2:3990
+936:0:4176
+937:2:3991
+938:0:4176
+939:2:3996
+940:0:4176
+941:2:3997
+942:0:4176
+943:2:4005
+944:2:4006
+945:2:4010
+946:2:4014
+947:2:4015
+948:2:4019
+949:2:4027
+950:2:4028
+951:2:4032
+952:2:4036
+953:2:4037
+954:2:4032
+955:2:4036
+956:2:4037
+957:2:4041
+958:2:4048
+959:2:4055
+960:2:4056
+961:2:4063
+962:2:4068
+963:2:4075
+964:2:4076
+965:2:4075
+966:2:4076
+967:2:4083
+968:2:4087
+969:0:4176
+970:2:3276
+971:2:3286
+972:0:4176
+973:2:2522
+974:0:4176
+975:2:3277
+976:2:3278
+977:0:4176
+978:2:2522
+979:0:4176
+980:2:3282
+981:0:4176
+982:2:3290
+983:0:4176
+984:2:2518
+985:0:4176
+986:2:2520
+987:0:4176
+988:2:2521
+989:0:4176
+990:2:2522
+991:0:4176
+992:2:2523
+993:2:2524
+994:2:2528
+995:2:2529
+996:2:2537
+997:2:2538
+998:2:2542
+999:2:2543
+1000:2:2551
+1001:2:2556
+1002:2:2560
+1003:2:2561
+1004:2:2569
+1005:2:2570
+1006:2:2574
+1007:2:2575
+1008:2:2569
+1009:2:2570
+1010:2:2571
+1011:2:2583
+1012:2:2588
+1013:2:2595
+1014:2:2596
+1015:2:2603
+1016:2:2608
+1017:2:2615
+1018:2:2616
+1019:2:2615
+1020:2:2616
+1021:2:2623
+1022:2:2633
+1023:0:4176
+1024:2:2522
+1025:0:4176
+1026:2:2685
+1027:2:2686
+1028:2:2687
+1029:0:4176
+1030:2:2522
+1031:0:4176
+1032:2:2692
+1033:0:4176
+1034:1:118
+1035:0:4176
+1036:1:120
+1037:0:4176
+1038:1:19
+1039:0:4176
+1040:1:126
+1041:1:127
+1042:1:131
+1043:1:132
+1044:1:140
+1045:1:141
+1046:1:145
+1047:1:146
+1048:1:154
+1049:1:159
+1050:1:163
+1051:1:164
+1052:1:172
+1053:1:173
+1054:1:177
+1055:1:178
+1056:1:172
+1057:1:173
+1058:1:177
+1059:1:178
+1060:1:186
+1061:1:191
+1062:1:198
+1063:1:199
+1064:1:206
+1065:1:211
+1066:1:218
+1067:1:219
+1068:1:218
+1069:1:219
+1070:1:226
+1071:0:4176
+1072:1:15
+1073:0:4176
+1074:1:16
+1075:0:4176
+1076:1:17
+1077:0:4176
+1078:1:118
+1079:0:4176
+1080:1:120
+1081:0:4176
+1082:1:19
+1083:0:4176
+1084:1:237
+1085:1:238
+1086:0:4176
+1087:1:15
+1088:0:4176
+1089:1:16
+1090:0:4176
+1091:1:17
+1092:0:4176
+1093:1:118
+1094:0:4176
+1095:1:120
+1096:0:4176
+1097:1:19
+1098:0:4176
+1099:1:244
+1100:1:245
+1101:1:249
+1102:1:250
+1103:1:258
+1104:1:259
+1105:1:263
+1106:1:264
+1107:1:272
+1108:1:277
+1109:1:281
+1110:1:282
+1111:1:290
+1112:1:291
+1113:1:295
+1114:1:296
+1115:1:290
+1116:1:291
+1117:1:295
+1118:1:296
+1119:1:304
+1120:1:309
+1121:1:316
+1122:1:317
+1123:1:324
+1124:1:329
+1125:1:336
+1126:1:337
+1127:1:336
+1128:1:337
+1129:1:344
+1130:0:4176
+1131:1:15
+1132:0:4176
+1133:1:16
+1134:0:4176
+1135:1:17
+1136:0:4176
+1137:1:118
+1138:0:4176
+1139:1:120
+1140:0:4176
+1141:1:19
+1142:0:4176
+1143:1:355
+1144:1:356
+1145:1:360
+1146:1:361
+1147:1:369
+1148:1:370
+1149:1:374
+1150:1:375
+1151:1:383
+1152:1:388
+1153:1:392
+1154:1:393
+1155:1:401
+1156:1:402
+1157:1:406
+1158:1:407
+1159:1:401
+1160:1:402
+1161:1:406
+1162:1:407
+1163:1:415
+1164:1:420
+1165:1:427
+1166:1:428
+1167:1:435
+1168:1:440
+1169:1:447
+1170:1:448
+1171:1:447
+1172:1:448
+1173:1:455
+1174:1:464
+1175:0:4176
+1176:1:15
+1177:0:4176
+1178:1:16
+1179:0:4176
+1180:1:17
+1181:0:4176
+1182:1:118
+1183:0:4176
+1184:1:120
+1185:0:4176
+1186:1:19
+1187:0:4176
+1188:1:584
+1189:1:585
+1190:1:589
+1191:1:590
+1192:1:598
+1193:1:599
+1194:1:600
+1195:1:612
+1196:1:617
+1197:1:621
+1198:1:622
+1199:1:630
+1200:1:631
+1201:1:635
+1202:1:636
+1203:1:630
+1204:1:631
+1205:1:635
+1206:1:636
+1207:1:644
+1208:1:649
+1209:1:656
+1210:1:657
+1211:1:664
+1212:1:669
+1213:1:676
+1214:1:677
+1215:1:676
+1216:1:677
+1217:1:684
+1218:0:4176
+1219:1:15
+1220:0:4176
+1221:1:16
+1222:0:4176
+1223:1:17
+1224:0:4176
+1225:1:118
+1226:0:4176
+1227:1:120
+1228:0:4176
+1229:1:19
+1230:0:4176
+1231:1:695
+1232:1:698
+1233:1:699
+1234:0:4176
+1235:1:15
+1236:0:4176
+1237:1:16
+1238:0:4176
+1239:1:17
+1240:0:4176
+1241:1:118
+1242:0:4176
+1243:1:120
+1244:0:4176
+1245:1:19
+1246:0:4176
+1247:1:926
+1248:1:927
+1249:1:931
+1250:1:932
+1251:1:940
+1252:1:941
+1253:1:945
+1254:1:946
+1255:1:954
+1256:1:959
+1257:1:963
+1258:1:964
+1259:1:972
+1260:1:973
+1261:1:977
+1262:1:978
+1263:1:972
+1264:1:973
+1265:1:977
+1266:1:978
+1267:1:986
+1268:1:991
+1269:1:998
+1270:1:999
+1271:1:1006
+1272:1:1011
+1273:1:1018
+1274:1:1019
+1275:1:1018
+1276:1:1019
+1277:1:1026
+1278:1:1035
+1279:1:1039
+1280:0:4176
+1281:1:15
+1282:0:4176
+1283:1:16
+1284:0:4176
+1285:1:17
+1286:0:4176
+1287:1:118
+1288:0:4176
+1289:1:120
+1290:0:4176
+1291:1:19
+1292:0:4176
+1293:1:1040
+1294:1:1041
+1295:1:1045
+1296:1:1046
+1297:1:1054
+1298:1:1055
+1299:1:1056
+1300:1:1068
+1301:1:1073
+1302:1:1077
+1303:1:1078
+1304:1:1086
+1305:1:1087
+1306:1:1091
+1307:1:1092
+1308:1:1086
+1309:1:1087
+1310:1:1091
+1311:1:1092
+1312:1:1100
+1313:1:1105
+1314:1:1112
+1315:1:1113
+1316:1:1120
+1317:1:1125
+1318:1:1132
+1319:1:1133
+1320:1:1132
+1321:1:1133
+1322:1:1140
+1323:0:4176
+1324:1:15
+1325:0:4176
+1326:1:16
+1327:0:4176
+1328:1:17
+1329:0:4176
+1330:1:118
+1331:0:4176
+1332:1:120
+1333:0:4176
+1334:1:19
+1335:0:4176
+1336:1:1151
+1337:0:4176
+1338:1:2417
+1339:1:2424
+1340:1:2425
+1341:1:2432
+1342:1:2437
+1343:1:2444
+1344:1:2445
+1345:1:2444
+1346:1:2445
+1347:1:2452
+1348:1:2456
+1349:0:4176
+1350:2:3305
+1351:2:3306
+1352:2:3310
+1353:2:3314
+1354:2:3315
+1355:2:3319
+1356:2:3324
+1357:2:3332
+1358:2:3336
+1359:2:3337
+1360:2:3332
+1361:2:3333
+1362:2:3341
+1363:2:3348
+1364:2:3355
+1365:2:3356
+1366:2:3363
+1367:2:3368
+1368:2:3375
+1369:2:3376
+1370:2:3375
+1371:2:3376
+1372:2:3383
+1373:2:3387
+1374:0:4176
+1375:2:3392
+1376:0:4176
+1377:2:3393
+1378:0:4176
+1379:2:3394
+1380:0:4176
+1381:2:3395
+1382:0:4176
+1383:1:1153
+1384:1:1154
+1385:0:4174
+1386:2:3396
+1387:0:4180
+1388:1:2165
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.define
new file mode 100644 (file)
index 0000000..5e642ef
--- /dev/null
@@ -0,0 +1 @@
+#define SINGLE_FLIP
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.log
new file mode 100644 (file)
index 0000000..96aeb49
--- /dev/null
@@ -0,0 +1,678 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define >> pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_free.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_free_single_flip.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+Depth=    6479 States=    1e+06 Transitions= 6.49e+06 Memory=   550.432        t=     16 R=   6e+04
+Depth=    6479 States=    2e+06 Transitions= 1.29e+07 Memory=   634.318        t=   32.2 R=   6e+04
+Depth=    6479 States=    3e+06 Transitions= 2.07e+07 Memory=   718.303        t=     53 R=   6e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6479 States=    4e+06 Transitions= 2.91e+07 Memory=   833.311        t=   75.3 R=   5e+04
+Depth=    6479 States=    5e+06 Transitions= 3.67e+07 Memory=   917.295        t=   95.1 R=   5e+04
+pan: claim violated! (at depth 1254)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness disabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6479, errors: 1
+  5383072 states, stored
+ 34108932 states, matched
+ 39492004 transitions (= stored+matched)
+5.5430124e+08 atomic steps
+hash conflicts:  29860557 (resolved)
+
+Stats on memory usage (in Megabytes):
+  595.509      equivalent memory usage for states (stored*(State-vector + overhead))
+  460.005      actual memory usage for states (compression: 77.25%)
+               state-vector as stored = 62 byte + 28 byte overhead
+   32.000      memory used for hash table (-w22)
+  457.764      memory used for DFS stack (-m10000000)
+  949.424      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 77, "(1)"
+       line 243, "pan.___", state 85, "(1)"
+       line 247, "pan.___", state 97, "(1)"
+       line 251, "pan.___", state 105, "(1)"
+       line 401, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 195, "(1)"
+       line 247, "pan.___", state 215, "(1)"
+       line 251, "pan.___", state 223, "(1)"
+       line 680, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 313, "(1)"
+       line 247, "pan.___", state 333, "(1)"
+       line 251, "pan.___", state 341, "(1)"
+       line 401, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 424, "(1)"
+       line 247, "pan.___", state 444, "(1)"
+       line 251, "pan.___", state 452, "(1)"
+       line 401, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 475, "(1)"
+       line 401, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 476, "else"
+       line 401, "pan.___", state 479, "(1)"
+       line 405, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 489, "(1)"
+       line 405, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 490, "else"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 403, "pan.___", state 499, "((i<1))"
+       line 403, "pan.___", state 499, "((i>=1))"
+       line 410, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 507, "(1)"
+       line 410, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 508, "else"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 414, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 521, "(1)"
+       line 414, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 522, "else"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 412, "pan.___", state 531, "((i<2))"
+       line 412, "pan.___", state 531, "((i>=2))"
+       line 239, "pan.___", state 537, "(1)"
+       line 243, "pan.___", state 545, "(1)"
+       line 243, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 546, "else"
+       line 241, "pan.___", state 551, "((i<1))"
+       line 241, "pan.___", state 551, "((i>=1))"
+       line 247, "pan.___", state 557, "(1)"
+       line 247, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 558, "else"
+       line 251, "pan.___", state 565, "(1)"
+       line 251, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 566, "else"
+       line 249, "pan.___", state 571, "((i<2))"
+       line 249, "pan.___", state 571, "((i>=2))"
+       line 256, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 575, "else"
+       line 421, "pan.___", state 577, "(1)"
+       line 421, "pan.___", state 577, "(1)"
+       line 680, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 680, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 680, "pan.___", state 582, "(1)"
+       line 401, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 653, "(1)"
+       line 247, "pan.___", state 673, "(1)"
+       line 251, "pan.___", state 681, "(1)"
+       line 401, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 709, "(1)"
+       line 401, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 710, "else"
+       line 401, "pan.___", state 713, "(1)"
+       line 405, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 723, "(1)"
+       line 405, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 724, "else"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 403, "pan.___", state 733, "((i<1))"
+       line 403, "pan.___", state 733, "((i>=1))"
+       line 410, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 741, "(1)"
+       line 410, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 742, "else"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 414, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 756, "else"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 412, "pan.___", state 765, "((i<2))"
+       line 412, "pan.___", state 765, "((i>=2))"
+       line 239, "pan.___", state 771, "(1)"
+       line 243, "pan.___", state 779, "(1)"
+       line 243, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 780, "else"
+       line 241, "pan.___", state 785, "((i<1))"
+       line 241, "pan.___", state 785, "((i>=1))"
+       line 247, "pan.___", state 791, "(1)"
+       line 247, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 792, "else"
+       line 251, "pan.___", state 799, "(1)"
+       line 251, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 800, "else"
+       line 256, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 809, "else"
+       line 421, "pan.___", state 811, "(1)"
+       line 421, "pan.___", state 811, "(1)"
+       line 401, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 820, "(1)"
+       line 401, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 821, "else"
+       line 401, "pan.___", state 824, "(1)"
+       line 405, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 834, "(1)"
+       line 405, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 835, "else"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 403, "pan.___", state 844, "((i<1))"
+       line 403, "pan.___", state 844, "((i>=1))"
+       line 410, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 852, "(1)"
+       line 410, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 853, "else"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 414, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 866, "(1)"
+       line 414, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 867, "else"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 412, "pan.___", state 876, "((i<2))"
+       line 412, "pan.___", state 876, "((i>=2))"
+       line 239, "pan.___", state 882, "(1)"
+       line 243, "pan.___", state 890, "(1)"
+       line 243, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 891, "else"
+       line 241, "pan.___", state 896, "((i<1))"
+       line 241, "pan.___", state 896, "((i>=1))"
+       line 247, "pan.___", state 902, "(1)"
+       line 247, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 903, "else"
+       line 251, "pan.___", state 910, "(1)"
+       line 251, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 911, "else"
+       line 249, "pan.___", state 916, "((i<2))"
+       line 249, "pan.___", state 916, "((i>=2))"
+       line 256, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 920, "else"
+       line 421, "pan.___", state 922, "(1)"
+       line 421, "pan.___", state 922, "(1)"
+       line 688, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 995, "(1)"
+       line 247, "pan.___", state 1015, "(1)"
+       line 251, "pan.___", state 1023, "(1)"
+       line 401, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1109, "(1)"
+       line 247, "pan.___", state 1129, "(1)"
+       line 251, "pan.___", state 1137, "(1)"
+       line 401, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1224, "(1)"
+       line 247, "pan.___", state 1244, "(1)"
+       line 251, "pan.___", state 1252, "(1)"
+       line 401, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1335, "(1)"
+       line 247, "pan.___", state 1355, "(1)"
+       line 251, "pan.___", state 1363, "(1)"
+       line 401, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1451, "(1)"
+       line 247, "pan.___", state 1471, "(1)"
+       line 251, "pan.___", state 1479, "(1)"
+       line 401, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1562, "(1)"
+       line 247, "pan.___", state 1582, "(1)"
+       line 251, "pan.___", state 1590, "(1)"
+       line 401, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1676, "(1)"
+       line 247, "pan.___", state 1696, "(1)"
+       line 251, "pan.___", state 1704, "(1)"
+       line 727, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1794, "(1)"
+       line 247, "pan.___", state 1814, "(1)"
+       line 251, "pan.___", state 1822, "(1)"
+       line 401, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1905, "(1)"
+       line 247, "pan.___", state 1925, "(1)"
+       line 251, "pan.___", state 1933, "(1)"
+       line 401, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1956, "(1)"
+       line 401, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1957, "else"
+       line 401, "pan.___", state 1960, "(1)"
+       line 405, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1970, "(1)"
+       line 405, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1971, "else"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 403, "pan.___", state 1980, "((i<1))"
+       line 403, "pan.___", state 1980, "((i>=1))"
+       line 410, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1988, "(1)"
+       line 410, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1989, "else"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 414, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2002, "(1)"
+       line 414, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2003, "else"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 412, "pan.___", state 2012, "((i<2))"
+       line 412, "pan.___", state 2012, "((i>=2))"
+       line 239, "pan.___", state 2018, "(1)"
+       line 243, "pan.___", state 2026, "(1)"
+       line 243, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2027, "else"
+       line 241, "pan.___", state 2032, "((i<1))"
+       line 241, "pan.___", state 2032, "((i>=1))"
+       line 247, "pan.___", state 2038, "(1)"
+       line 247, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2039, "else"
+       line 251, "pan.___", state 2046, "(1)"
+       line 251, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2047, "else"
+       line 249, "pan.___", state 2052, "((i<2))"
+       line 249, "pan.___", state 2052, "((i>=2))"
+       line 256, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2056, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2058, "(1)"
+       line 727, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 727, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 727, "pan.___", state 2063, "(1)"
+       line 401, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2134, "(1)"
+       line 247, "pan.___", state 2154, "(1)"
+       line 251, "pan.___", state 2162, "(1)"
+       line 401, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2251, "(1)"
+       line 247, "pan.___", state 2271, "(1)"
+       line 251, "pan.___", state 2279, "(1)"
+       line 401, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2362, "(1)"
+       line 247, "pan.___", state 2382, "(1)"
+       line 251, "pan.___", state 2390, "(1)"
+       line 239, "pan.___", state 2421, "(1)"
+       line 247, "pan.___", state 2441, "(1)"
+       line 251, "pan.___", state 2449, "(1)"
+       line 239, "pan.___", state 2464, "(1)"
+       line 247, "pan.___", state 2484, "(1)"
+       line 251, "pan.___", state 2492, "(1)"
+       line 887, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 22, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 36, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 54, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 86, "(1)"
+       line 243, "pan.___", state 94, "(1)"
+       line 247, "pan.___", state 106, "(1)"
+       line 262, "pan.___", state 135, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 144, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 157, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 197, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 211, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 229, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 243, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 261, "(1)"
+       line 243, "pan.___", state 269, "(1)"
+       line 247, "pan.___", state 281, "(1)"
+       line 251, "pan.___", state 289, "(1)"
+       line 405, "pan.___", state 324, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 342, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 356, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 382, "(1)"
+       line 247, "pan.___", state 394, "(1)"
+       line 251, "pan.___", state 402, "(1)"
+       line 401, "pan.___", state 429, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 431, "(1)"
+       line 401, "pan.___", state 432, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 432, "else"
+       line 401, "pan.___", state 435, "(1)"
+       line 405, "pan.___", state 443, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 445, "(1)"
+       line 405, "pan.___", state 446, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 446, "else"
+       line 405, "pan.___", state 449, "(1)"
+       line 405, "pan.___", state 450, "(1)"
+       line 405, "pan.___", state 450, "(1)"
+       line 403, "pan.___", state 455, "((i<1))"
+       line 403, "pan.___", state 455, "((i>=1))"
+       line 410, "pan.___", state 461, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 463, "(1)"
+       line 410, "pan.___", state 464, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 464, "else"
+       line 410, "pan.___", state 467, "(1)"
+       line 410, "pan.___", state 468, "(1)"
+       line 410, "pan.___", state 468, "(1)"
+       line 414, "pan.___", state 475, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 477, "(1)"
+       line 414, "pan.___", state 478, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 478, "else"
+       line 414, "pan.___", state 481, "(1)"
+       line 414, "pan.___", state 482, "(1)"
+       line 414, "pan.___", state 482, "(1)"
+       line 412, "pan.___", state 487, "((i<2))"
+       line 412, "pan.___", state 487, "((i>=2))"
+       line 239, "pan.___", state 493, "(1)"
+       line 243, "pan.___", state 501, "(1)"
+       line 243, "pan.___", state 502, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 502, "else"
+       line 241, "pan.___", state 507, "((i<1))"
+       line 241, "pan.___", state 507, "((i>=1))"
+       line 247, "pan.___", state 513, "(1)"
+       line 247, "pan.___", state 514, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 514, "else"
+       line 251, "pan.___", state 521, "(1)"
+       line 251, "pan.___", state 522, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 522, "else"
+       line 256, "pan.___", state 531, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 531, "else"
+       line 421, "pan.___", state 533, "(1)"
+       line 421, "pan.___", state 533, "(1)"
+       line 401, "pan.___", state 540, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 542, "(1)"
+       line 401, "pan.___", state 543, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 543, "else"
+       line 401, "pan.___", state 546, "(1)"
+       line 405, "pan.___", state 554, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 556, "(1)"
+       line 405, "pan.___", state 557, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 557, "else"
+       line 405, "pan.___", state 560, "(1)"
+       line 405, "pan.___", state 561, "(1)"
+       line 405, "pan.___", state 561, "(1)"
+       line 403, "pan.___", state 566, "((i<1))"
+       line 403, "pan.___", state 566, "((i>=1))"
+       line 410, "pan.___", state 572, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 574, "(1)"
+       line 410, "pan.___", state 575, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 575, "else"
+       line 410, "pan.___", state 578, "(1)"
+       line 410, "pan.___", state 579, "(1)"
+       line 410, "pan.___", state 579, "(1)"
+       line 414, "pan.___", state 586, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 588, "(1)"
+       line 414, "pan.___", state 589, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 589, "else"
+       line 414, "pan.___", state 592, "(1)"
+       line 414, "pan.___", state 593, "(1)"
+       line 414, "pan.___", state 593, "(1)"
+       line 412, "pan.___", state 598, "((i<2))"
+       line 412, "pan.___", state 598, "((i>=2))"
+       line 239, "pan.___", state 604, "(1)"
+       line 243, "pan.___", state 612, "(1)"
+       line 243, "pan.___", state 613, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 613, "else"
+       line 241, "pan.___", state 618, "((i<1))"
+       line 241, "pan.___", state 618, "((i>=1))"
+       line 247, "pan.___", state 624, "(1)"
+       line 247, "pan.___", state 625, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 625, "else"
+       line 251, "pan.___", state 632, "(1)"
+       line 251, "pan.___", state 633, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 633, "else"
+       line 249, "pan.___", state 638, "((i<2))"
+       line 249, "pan.___", state 638, "((i>=2))"
+       line 256, "pan.___", state 642, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 642, "else"
+       line 421, "pan.___", state 644, "(1)"
+       line 421, "pan.___", state 644, "(1)"
+       line 1095, "pan.___", state 648, "_proc_urcu_writer = (_proc_urcu_writer|(1<<10))"
+       line 401, "pan.___", state 653, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 655, "(1)"
+       line 401, "pan.___", state 656, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 656, "else"
+       line 401, "pan.___", state 659, "(1)"
+       line 405, "pan.___", state 667, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 669, "(1)"
+       line 405, "pan.___", state 670, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 670, "else"
+       line 405, "pan.___", state 673, "(1)"
+       line 405, "pan.___", state 674, "(1)"
+       line 405, "pan.___", state 674, "(1)"
+       line 403, "pan.___", state 679, "((i<1))"
+       line 403, "pan.___", state 679, "((i>=1))"
+       line 410, "pan.___", state 685, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 687, "(1)"
+       line 410, "pan.___", state 688, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 688, "else"
+       line 410, "pan.___", state 691, "(1)"
+       line 410, "pan.___", state 692, "(1)"
+       line 410, "pan.___", state 692, "(1)"
+       line 414, "pan.___", state 699, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 701, "(1)"
+       line 414, "pan.___", state 702, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 702, "else"
+       line 414, "pan.___", state 705, "(1)"
+       line 414, "pan.___", state 706, "(1)"
+       line 414, "pan.___", state 706, "(1)"
+       line 412, "pan.___", state 711, "((i<2))"
+       line 412, "pan.___", state 711, "((i>=2))"
+       line 239, "pan.___", state 717, "(1)"
+       line 243, "pan.___", state 725, "(1)"
+       line 243, "pan.___", state 726, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 726, "else"
+       line 241, "pan.___", state 731, "((i<1))"
+       line 241, "pan.___", state 731, "((i>=1))"
+       line 247, "pan.___", state 737, "(1)"
+       line 247, "pan.___", state 738, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 738, "else"
+       line 251, "pan.___", state 745, "(1)"
+       line 251, "pan.___", state 746, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 746, "else"
+       line 249, "pan.___", state 751, "((i<2))"
+       line 249, "pan.___", state 751, "((i>=2))"
+       line 256, "pan.___", state 755, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 755, "else"
+       line 421, "pan.___", state 757, "(1)"
+       line 421, "pan.___", state 757, "(1)"
+       line 1110, "pan.___", state 762, "_proc_urcu_writer = (_proc_urcu_writer|(1<<11))"
+       line 1105, "pan.___", state 763, "(((tmp2&((1<<7)-1))&&((tmp2^0)&(1<<7))))"
+       line 1105, "pan.___", state 763, "else"
+       line 1130, "pan.___", state 767, "_proc_urcu_writer = (_proc_urcu_writer&~(((1<<12)|(1<<11))))"
+       line 262, "pan.___", state 798, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 807, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 822, "(1)"
+       line 274, "pan.___", state 829, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 845, "(1)"
+       line 243, "pan.___", state 853, "(1)"
+       line 247, "pan.___", state 865, "(1)"
+       line 251, "pan.___", state 873, "(1)"
+       line 262, "pan.___", state 904, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 913, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 926, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 935, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 951, "(1)"
+       line 243, "pan.___", state 959, "(1)"
+       line 247, "pan.___", state 971, "(1)"
+       line 251, "pan.___", state 979, "(1)"
+       line 266, "pan.___", state 1005, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1018, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1027, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1043, "(1)"
+       line 243, "pan.___", state 1051, "(1)"
+       line 247, "pan.___", state 1063, "(1)"
+       line 251, "pan.___", state 1071, "(1)"
+       line 262, "pan.___", state 1102, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1111, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1124, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1133, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1149, "(1)"
+       line 243, "pan.___", state 1157, "(1)"
+       line 247, "pan.___", state 1169, "(1)"
+       line 251, "pan.___", state 1177, "(1)"
+       line 262, "pan.___", state 1194, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 1196, "(1)"
+       line 266, "pan.___", state 1203, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1205, "(1)"
+       line 266, "pan.___", state 1206, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 1206, "else"
+       line 264, "pan.___", state 1211, "((i<1))"
+       line 264, "pan.___", state 1211, "((i>=1))"
+       line 270, "pan.___", state 1216, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1218, "(1)"
+       line 270, "pan.___", state 1219, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 1219, "else"
+       line 274, "pan.___", state 1225, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1227, "(1)"
+       line 274, "pan.___", state 1228, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 1228, "else"
+       line 272, "pan.___", state 1233, "((i<2))"
+       line 272, "pan.___", state 1233, "((i>=2))"
+       line 239, "pan.___", state 1241, "(1)"
+       line 243, "pan.___", state 1249, "(1)"
+       line 243, "pan.___", state 1250, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 1250, "else"
+       line 241, "pan.___", state 1255, "((i<1))"
+       line 241, "pan.___", state 1255, "((i>=1))"
+       line 247, "pan.___", state 1261, "(1)"
+       line 247, "pan.___", state 1262, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 1262, "else"
+       line 251, "pan.___", state 1269, "(1)"
+       line 251, "pan.___", state 1270, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 1270, "else"
+       line 256, "pan.___", state 1279, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 1279, "else"
+       line 1206, "pan.___", state 1282, "i = 0"
+       line 1206, "pan.___", state 1284, "reader_barrier = 1"
+       line 1206, "pan.___", state 1295, "((i<1))"
+       line 1206, "pan.___", state 1295, "((i>=1))"
+       line 262, "pan.___", state 1300, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 262, "pan.___", state 1302, "(1)"
+       line 266, "pan.___", state 1309, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1311, "(1)"
+       line 266, "pan.___", state 1312, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 266, "pan.___", state 1312, "else"
+       line 264, "pan.___", state 1317, "((i<1))"
+       line 264, "pan.___", state 1317, "((i>=1))"
+       line 270, "pan.___", state 1322, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1324, "(1)"
+       line 270, "pan.___", state 1325, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 270, "pan.___", state 1325, "else"
+       line 274, "pan.___", state 1331, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1333, "(1)"
+       line 274, "pan.___", state 1334, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 274, "pan.___", state 1334, "else"
+       line 272, "pan.___", state 1339, "((i<2))"
+       line 272, "pan.___", state 1339, "((i>=2))"
+       line 239, "pan.___", state 1347, "(1)"
+       line 243, "pan.___", state 1355, "(1)"
+       line 243, "pan.___", state 1356, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 1356, "else"
+       line 241, "pan.___", state 1361, "((i<1))"
+       line 241, "pan.___", state 1361, "((i>=1))"
+       line 247, "pan.___", state 1367, "(1)"
+       line 247, "pan.___", state 1368, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 1368, "else"
+       line 251, "pan.___", state 1375, "(1)"
+       line 251, "pan.___", state 1376, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 1376, "else"
+       line 256, "pan.___", state 1385, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 1385, "else"
+       line 289, "pan.___", state 1387, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 289, "pan.___", state 1387, "else"
+       line 1206, "pan.___", state 1388, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 1206, "pan.___", state 1388, "else"
+       line 266, "pan.___", state 1401, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1414, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1423, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1439, "(1)"
+       line 243, "pan.___", state 1447, "(1)"
+       line 247, "pan.___", state 1459, "(1)"
+       line 251, "pan.___", state 1467, "(1)"
+       line 262, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1507, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1520, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1529, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1545, "(1)"
+       line 243, "pan.___", state 1553, "(1)"
+       line 247, "pan.___", state 1565, "(1)"
+       line 251, "pan.___", state 1573, "(1)"
+       line 1214, "pan.___", state 1589, "-end-"
+       (218 of 1589 states)
+unreached in proctype :init:
+       line 1225, "pan.___", state 9, "((j<2))"
+       line 1225, "pan.___", state 9, "((j>=2))"
+       line 1226, "pan.___", state 20, "((j<2))"
+       line 1226, "pan.___", state 20, "((j>=2))"
+       line 1231, "pan.___", state 33, "((j<2))"
+       line 1231, "pan.___", state 33, "((j>=2))"
+       line 1229, "pan.___", state 43, "((i<1))"
+       line 1229, "pan.___", state 43, "((i>=1))"
+       line 1239, "pan.___", state 54, "((j<2))"
+       line 1239, "pan.___", state 54, "((j>=2))"
+       line 1243, "pan.___", state 67, "((j<2))"
+       line 1243, "pan.___", state 67, "((j>=2))"
+       (6 of 78 states)
+unreached in proctype :never:
+       line 1277, "pan.___", state 8, "-end-"
+       (1 of 8 states)
+
+pan: elapsed time 102 seconds
+pan: rate 52558.797 states/second
+pan: avg transition delay 2.5934e-06 usec
+cp .input.spin urcu_free_single_flip.spin.input
+cp .input.spin.trail urcu_free_single_flip.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input
new file mode 100644 (file)
index 0000000..240a2fd
--- /dev/null
@@ -0,0 +1,1250 @@
+#define SINGLE_FLIP
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input.trail b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_free_single_flip.spin.input.trail
new file mode 100644 (file)
index 0000000..f8289ee
--- /dev/null
@@ -0,0 +1,1257 @@
+-2:3:-2
+-4:-4:-4
+1:0:4178
+2:3:4098
+3:3:4101
+4:3:4101
+5:3:4104
+6:3:4112
+7:3:4112
+8:3:4115
+9:3:4121
+10:3:4125
+11:3:4125
+12:3:4128
+13:3:4138
+14:3:4146
+15:3:4146
+16:3:4149
+17:3:4155
+18:3:4159
+19:3:4159
+20:3:4162
+21:3:4168
+22:3:4172
+23:3:4173
+24:0:4178
+25:3:4175
+26:0:4178
+27:2:2511
+28:0:4178
+29:2:2517
+30:0:4178
+31:2:2518
+32:0:4178
+33:2:2520
+34:0:4178
+35:2:2521
+36:0:4178
+37:2:2522
+38:0:4178
+39:2:2523
+40:0:4178
+41:2:2524
+42:0:4178
+43:2:2525
+44:0:4178
+45:2:2526
+46:2:2527
+47:2:2531
+48:2:2532
+49:2:2540
+50:2:2541
+51:2:2545
+52:2:2546
+53:2:2554
+54:2:2559
+55:2:2563
+56:2:2564
+57:2:2572
+58:2:2573
+59:2:2577
+60:2:2578
+61:2:2572
+62:2:2573
+63:2:2577
+64:2:2578
+65:2:2586
+66:2:2591
+67:2:2598
+68:2:2599
+69:2:2606
+70:2:2611
+71:2:2618
+72:2:2619
+73:2:2618
+74:2:2619
+75:2:2626
+76:2:2636
+77:0:4178
+78:2:2525
+79:0:4178
+80:2:2640
+81:2:2644
+82:2:2645
+83:2:2649
+84:2:2653
+85:2:2654
+86:2:2658
+87:2:2666
+88:2:2667
+89:2:2671
+90:2:2675
+91:2:2676
+92:2:2671
+93:2:2672
+94:2:2680
+95:0:4178
+96:2:2525
+97:0:4178
+98:2:2688
+99:2:2689
+100:2:2690
+101:0:4178
+102:2:2525
+103:0:4178
+104:2:2695
+105:0:4178
+106:2:3307
+107:2:3308
+108:2:3312
+109:2:3316
+110:2:3317
+111:2:3321
+112:2:3326
+113:2:3334
+114:2:3338
+115:2:3339
+116:2:3334
+117:2:3338
+118:2:3339
+119:2:3343
+120:2:3350
+121:2:3357
+122:2:3358
+123:2:3365
+124:2:3370
+125:2:3377
+126:2:3378
+127:2:3377
+128:2:3378
+129:2:3385
+130:2:3389
+131:0:4178
+132:2:3394
+133:0:4178
+134:2:3395
+135:0:4178
+136:2:3396
+137:0:4178
+138:2:3397
+139:0:4178
+140:1:2
+141:0:4178
+142:2:3398
+143:0:4178
+144:1:8
+145:0:4178
+146:1:9
+147:0:4178
+148:2:3397
+149:0:4178
+150:1:10
+151:0:4178
+152:2:3398
+153:0:4178
+154:1:11
+155:0:4178
+156:2:3397
+157:0:4178
+158:1:12
+159:0:4178
+160:2:3398
+161:0:4178
+162:1:13
+163:0:4178
+164:2:3397
+165:0:4178
+166:1:14
+167:0:4178
+168:2:3398
+169:0:4178
+170:1:15
+171:0:4178
+172:1:16
+173:0:4178
+174:2:3397
+175:0:4178
+176:1:17
+177:0:4178
+178:2:3398
+179:0:4178
+180:1:26
+181:0:4178
+182:2:3397
+183:0:4178
+184:1:30
+185:1:31
+186:1:35
+187:1:39
+188:1:40
+189:1:44
+190:1:52
+191:1:53
+192:1:57
+193:1:61
+194:1:62
+195:1:57
+196:1:61
+197:1:62
+198:1:66
+199:1:73
+200:1:80
+201:1:81
+202:1:88
+203:1:93
+204:1:100
+205:1:101
+206:1:100
+207:1:101
+208:1:108
+209:1:112
+210:0:4178
+211:2:3398
+212:0:4178
+213:1:117
+214:0:4178
+215:2:3399
+216:0:4178
+217:2:3404
+218:0:4178
+219:2:3405
+220:0:4178
+221:2:3413
+222:2:3414
+223:2:3418
+224:2:3422
+225:2:3423
+226:2:3427
+227:2:3435
+228:2:3436
+229:2:3440
+230:2:3444
+231:2:3445
+232:2:3440
+233:2:3444
+234:2:3445
+235:2:3449
+236:2:3456
+237:2:3463
+238:2:3464
+239:2:3471
+240:2:3476
+241:2:3483
+242:2:3484
+243:2:3483
+244:2:3484
+245:2:3491
+246:2:3495
+247:0:4178
+248:2:2697
+249:2:3288
+250:0:4178
+251:2:2525
+252:0:4178
+253:2:2698
+254:0:4178
+255:2:2525
+256:0:4178
+257:2:2701
+258:2:2702
+259:2:2706
+260:2:2707
+261:2:2715
+262:2:2716
+263:2:2720
+264:2:2721
+265:2:2729
+266:2:2734
+267:2:2738
+268:2:2739
+269:2:2747
+270:2:2748
+271:2:2752
+272:2:2753
+273:2:2747
+274:2:2748
+275:2:2752
+276:2:2753
+277:2:2761
+278:2:2766
+279:2:2773
+280:2:2774
+281:2:2781
+282:2:2786
+283:2:2793
+284:2:2794
+285:2:2793
+286:2:2794
+287:2:2801
+288:2:2810
+289:0:4178
+290:2:2525
+291:0:4178
+292:2:2814
+293:2:2823
+294:2:2824
+295:2:2828
+296:2:2829
+297:2:2833
+298:2:2834
+299:2:2842
+300:2:2847
+301:2:2851
+302:2:2852
+303:2:2860
+304:2:2861
+305:2:2865
+306:2:2866
+307:2:2860
+308:2:2861
+309:2:2865
+310:2:2866
+311:2:2874
+312:2:2881
+313:2:2882
+314:2:2886
+315:2:2887
+316:2:2894
+317:2:2899
+318:2:2906
+319:2:2907
+320:2:2906
+321:2:2907
+322:2:2914
+323:2:2926
+324:2:2927
+325:0:4178
+326:2:2525
+327:0:4178
+328:2:3276
+329:0:4178
+330:1:118
+331:0:4178
+332:1:120
+333:0:4178
+334:1:19
+335:0:4178
+336:1:126
+337:1:127
+338:1:131
+339:1:132
+340:1:140
+341:1:141
+342:1:145
+343:1:146
+344:1:154
+345:1:159
+346:1:163
+347:1:164
+348:1:172
+349:1:173
+350:1:177
+351:1:178
+352:1:172
+353:1:173
+354:1:177
+355:1:178
+356:1:186
+357:1:191
+358:1:198
+359:1:199
+360:1:206
+361:1:211
+362:1:218
+363:1:219
+364:1:218
+365:1:219
+366:1:226
+367:0:4178
+368:1:15
+369:0:4178
+370:1:16
+371:0:4178
+372:1:17
+373:0:4178
+374:1:118
+375:0:4178
+376:1:120
+377:0:4178
+378:1:19
+379:0:4178
+380:1:237
+381:1:238
+382:0:4178
+383:1:15
+384:0:4178
+385:1:16
+386:0:4178
+387:1:17
+388:0:4178
+389:1:118
+390:0:4178
+391:1:120
+392:0:4178
+393:1:19
+394:0:4178
+395:1:244
+396:1:245
+397:1:249
+398:1:250
+399:1:258
+400:1:259
+401:1:263
+402:1:264
+403:1:272
+404:1:277
+405:1:281
+406:1:282
+407:1:290
+408:1:291
+409:1:295
+410:1:296
+411:1:290
+412:1:291
+413:1:295
+414:1:296
+415:1:304
+416:1:309
+417:1:316
+418:1:317
+419:1:324
+420:1:329
+421:1:336
+422:1:337
+423:1:336
+424:1:337
+425:1:344
+426:0:4178
+427:1:15
+428:0:4178
+429:1:16
+430:0:4178
+431:2:3898
+432:2:3906
+433:2:3910
+434:2:3911
+435:2:3915
+436:2:3923
+437:2:3924
+438:2:3928
+439:2:3932
+440:2:3933
+441:2:3928
+442:2:3932
+443:2:3933
+444:2:3937
+445:2:3944
+446:2:3951
+447:2:3952
+448:2:3959
+449:2:3964
+450:2:3971
+451:2:3972
+452:2:3971
+453:2:3972
+454:2:3979
+455:2:3983
+456:0:4178
+457:2:3988
+458:0:4178
+459:2:3989
+460:0:4178
+461:2:3990
+462:0:4178
+463:2:3991
+464:0:4178
+465:1:17
+466:0:4178
+467:2:3992
+468:0:4178
+469:1:26
+470:0:4178
+471:2:3991
+472:0:4178
+473:1:30
+474:1:31
+475:1:35
+476:1:39
+477:1:40
+478:1:44
+479:1:52
+480:1:53
+481:1:57
+482:1:61
+483:1:62
+484:1:57
+485:1:61
+486:1:62
+487:1:66
+488:1:73
+489:1:80
+490:1:81
+491:1:88
+492:1:93
+493:1:100
+494:1:101
+495:1:100
+496:1:101
+497:1:108
+498:1:112
+499:0:4178
+500:2:3992
+501:0:4178
+502:1:117
+503:0:4178
+504:2:3993
+505:0:4178
+506:2:3998
+507:0:4178
+508:2:3999
+509:0:4178
+510:2:4007
+511:2:4008
+512:2:4012
+513:2:4016
+514:2:4017
+515:2:4021
+516:2:4029
+517:2:4030
+518:2:4034
+519:2:4038
+520:2:4039
+521:2:4034
+522:2:4038
+523:2:4039
+524:2:4043
+525:2:4050
+526:2:4057
+527:2:4058
+528:2:4065
+529:2:4070
+530:2:4077
+531:2:4078
+532:2:4077
+533:2:4078
+534:2:4085
+535:2:4089
+536:0:4178
+537:2:3278
+538:2:3288
+539:0:4178
+540:2:2525
+541:0:4178
+542:2:3279
+543:2:3280
+544:0:4178
+545:2:2525
+546:0:4178
+547:2:3284
+548:0:4178
+549:2:3292
+550:0:4178
+551:2:2518
+552:0:4178
+553:2:2520
+554:0:4178
+555:2:2521
+556:0:4178
+557:2:2522
+558:0:4178
+559:2:2523
+560:0:4178
+561:2:2524
+562:0:4178
+563:2:2525
+564:0:4178
+565:2:2526
+566:2:2527
+567:2:2531
+568:2:2532
+569:2:2540
+570:2:2541
+571:2:2545
+572:2:2546
+573:2:2554
+574:2:2559
+575:2:2563
+576:2:2564
+577:2:2572
+578:2:2573
+579:2:2574
+580:2:2572
+581:2:2573
+582:2:2577
+583:2:2578
+584:2:2586
+585:2:2591
+586:2:2598
+587:2:2599
+588:2:2606
+589:2:2611
+590:2:2618
+591:2:2619
+592:2:2618
+593:2:2619
+594:2:2626
+595:2:2636
+596:0:4178
+597:2:2525
+598:0:4178
+599:2:2640
+600:2:2644
+601:2:2645
+602:2:2649
+603:2:2653
+604:2:2654
+605:2:2658
+606:2:2666
+607:2:2667
+608:2:2671
+609:2:2672
+610:2:2671
+611:2:2675
+612:2:2676
+613:2:2680
+614:0:4178
+615:2:2525
+616:0:4178
+617:2:2688
+618:2:2689
+619:2:2690
+620:0:4178
+621:2:2525
+622:0:4178
+623:2:2695
+624:0:4178
+625:1:118
+626:0:4178
+627:1:120
+628:0:4178
+629:1:19
+630:0:4178
+631:1:355
+632:1:356
+633:1:360
+634:1:361
+635:1:369
+636:1:370
+637:1:374
+638:1:375
+639:1:383
+640:1:388
+641:1:392
+642:1:393
+643:1:401
+644:1:402
+645:1:406
+646:1:407
+647:1:401
+648:1:402
+649:1:406
+650:1:407
+651:1:415
+652:1:420
+653:1:427
+654:1:428
+655:1:435
+656:1:440
+657:1:447
+658:1:448
+659:1:447
+660:1:448
+661:1:455
+662:1:464
+663:0:4178
+664:1:15
+665:0:4178
+666:1:16
+667:0:4178
+668:1:17
+669:0:4178
+670:1:118
+671:0:4178
+672:1:120
+673:0:4178
+674:1:19
+675:0:4178
+676:1:584
+677:1:585
+678:1:589
+679:1:590
+680:1:598
+681:1:599
+682:1:600
+683:1:612
+684:1:617
+685:1:621
+686:1:622
+687:1:630
+688:1:631
+689:1:635
+690:1:636
+691:1:630
+692:1:631
+693:1:635
+694:1:636
+695:1:644
+696:1:649
+697:1:656
+698:1:657
+699:1:664
+700:1:669
+701:1:676
+702:1:677
+703:1:676
+704:1:677
+705:1:684
+706:0:4178
+707:1:15
+708:0:4178
+709:1:16
+710:0:4178
+711:1:17
+712:0:4178
+713:1:118
+714:0:4178
+715:1:120
+716:0:4178
+717:1:19
+718:0:4178
+719:1:695
+720:1:698
+721:1:699
+722:0:4178
+723:1:15
+724:0:4178
+725:1:16
+726:0:4178
+727:1:17
+728:0:4178
+729:1:118
+730:0:4178
+731:1:120
+732:0:4178
+733:1:19
+734:0:4178
+735:1:926
+736:1:927
+737:1:931
+738:1:932
+739:1:940
+740:1:941
+741:1:945
+742:1:946
+743:1:954
+744:1:959
+745:1:963
+746:1:964
+747:1:972
+748:1:973
+749:1:977
+750:1:978
+751:1:972
+752:1:973
+753:1:977
+754:1:978
+755:1:986
+756:1:991
+757:1:998
+758:1:999
+759:1:1006
+760:1:1011
+761:1:1018
+762:1:1019
+763:1:1018
+764:1:1019
+765:1:1026
+766:1:1035
+767:1:1039
+768:0:4178
+769:1:15
+770:0:4178
+771:1:16
+772:0:4178
+773:1:17
+774:0:4178
+775:1:118
+776:0:4178
+777:1:120
+778:0:4178
+779:1:19
+780:0:4178
+781:1:1040
+782:1:1041
+783:1:1045
+784:1:1046
+785:1:1054
+786:1:1055
+787:1:1056
+788:1:1068
+789:1:1073
+790:1:1077
+791:1:1078
+792:1:1086
+793:1:1087
+794:1:1091
+795:1:1092
+796:1:1086
+797:1:1087
+798:1:1091
+799:1:1092
+800:1:1100
+801:1:1105
+802:1:1112
+803:1:1113
+804:1:1120
+805:1:1125
+806:1:1132
+807:1:1133
+808:1:1132
+809:1:1133
+810:1:1140
+811:0:4178
+812:1:15
+813:0:4178
+814:1:16
+815:0:4178
+816:2:3307
+817:2:3308
+818:2:3312
+819:2:3316
+820:2:3317
+821:2:3321
+822:2:3326
+823:2:3334
+824:2:3338
+825:2:3339
+826:2:3334
+827:2:3338
+828:2:3339
+829:2:3343
+830:2:3350
+831:2:3357
+832:2:3358
+833:2:3365
+834:2:3370
+835:2:3377
+836:2:3378
+837:2:3377
+838:2:3378
+839:2:3385
+840:2:3389
+841:0:4178
+842:2:3394
+843:0:4178
+844:2:3395
+845:0:4178
+846:2:3396
+847:0:4178
+848:2:3397
+849:0:4178
+850:1:17
+851:0:4178
+852:2:3398
+853:0:4178
+854:1:26
+855:0:4178
+856:2:3397
+857:0:4178
+858:1:30
+859:1:31
+860:1:35
+861:1:39
+862:1:40
+863:1:44
+864:1:52
+865:1:53
+866:1:57
+867:1:61
+868:1:62
+869:1:57
+870:1:61
+871:1:62
+872:1:66
+873:1:73
+874:1:80
+875:1:81
+876:1:88
+877:1:93
+878:1:100
+879:1:101
+880:1:100
+881:1:101
+882:1:108
+883:1:112
+884:0:4178
+885:2:3398
+886:0:4178
+887:1:117
+888:0:4178
+889:2:3399
+890:0:4178
+891:2:3404
+892:0:4178
+893:2:3405
+894:0:4178
+895:2:3413
+896:2:3414
+897:2:3418
+898:2:3422
+899:2:3423
+900:2:3427
+901:2:3435
+902:2:3436
+903:2:3440
+904:2:3444
+905:2:3445
+906:2:3440
+907:2:3444
+908:2:3445
+909:2:3449
+910:2:3456
+911:2:3463
+912:2:3464
+913:2:3471
+914:2:3476
+915:2:3483
+916:2:3484
+917:2:3483
+918:2:3484
+919:2:3491
+920:2:3495
+921:0:4178
+922:2:2697
+923:2:3288
+924:0:4178
+925:2:2525
+926:0:4178
+927:2:2698
+928:0:4178
+929:2:2525
+930:0:4178
+931:2:2701
+932:2:2702
+933:2:2706
+934:2:2707
+935:2:2715
+936:2:2716
+937:2:2720
+938:2:2721
+939:2:2729
+940:2:2734
+941:2:2738
+942:2:2739
+943:2:2747
+944:2:2748
+945:2:2752
+946:2:2753
+947:2:2747
+948:2:2748
+949:2:2752
+950:2:2753
+951:2:2761
+952:2:2766
+953:2:2773
+954:2:2774
+955:2:2781
+956:2:2786
+957:2:2793
+958:2:2794
+959:2:2793
+960:2:2794
+961:2:2801
+962:2:2810
+963:0:4178
+964:2:2525
+965:0:4178
+966:2:2814
+967:2:2815
+968:2:2816
+969:2:2828
+970:2:2829
+971:2:2833
+972:2:2834
+973:2:2842
+974:2:2847
+975:2:2851
+976:2:2852
+977:2:2860
+978:2:2861
+979:2:2865
+980:2:2866
+981:2:2860
+982:2:2861
+983:2:2865
+984:2:2866
+985:2:2874
+986:2:2879
+987:2:2886
+988:2:2887
+989:2:2894
+990:2:2899
+991:2:2906
+992:2:2907
+993:2:2906
+994:2:2907
+995:2:2914
+996:2:2926
+997:2:2927
+998:0:4178
+999:2:2525
+1000:0:4178
+1001:2:3276
+1002:0:4178
+1003:2:3901
+1004:2:3902
+1005:2:3906
+1006:2:3910
+1007:2:3911
+1008:2:3915
+1009:2:3923
+1010:2:3924
+1011:2:3928
+1012:2:3932
+1013:2:3933
+1014:2:3928
+1015:2:3932
+1016:2:3933
+1017:2:3937
+1018:2:3944
+1019:2:3951
+1020:2:3952
+1021:2:3959
+1022:2:3964
+1023:2:3971
+1024:2:3972
+1025:2:3971
+1026:2:3972
+1027:2:3979
+1028:2:3983
+1029:0:4178
+1030:2:3988
+1031:0:4178
+1032:2:3989
+1033:0:4178
+1034:2:3990
+1035:0:4178
+1036:2:3991
+1037:0:4178
+1038:1:26
+1039:0:4178
+1040:2:3992
+1041:0:4178
+1042:1:30
+1043:1:31
+1044:1:35
+1045:1:39
+1046:1:40
+1047:1:44
+1048:1:52
+1049:1:53
+1050:1:57
+1051:1:61
+1052:1:62
+1053:1:57
+1054:1:61
+1055:1:62
+1056:1:66
+1057:1:73
+1058:1:80
+1059:1:81
+1060:1:88
+1061:1:93
+1062:1:100
+1063:1:101
+1064:1:100
+1065:1:101
+1066:1:108
+1067:1:112
+1068:0:4178
+1069:2:3991
+1070:0:4178
+1071:1:117
+1072:0:4178
+1073:2:3992
+1074:0:4178
+1075:2:3993
+1076:0:4178
+1077:2:3998
+1078:0:4178
+1079:2:3999
+1080:0:4178
+1081:2:4007
+1082:2:4008
+1083:2:4012
+1084:2:4016
+1085:2:4017
+1086:2:4021
+1087:2:4029
+1088:2:4030
+1089:2:4034
+1090:2:4038
+1091:2:4039
+1092:2:4034
+1093:2:4038
+1094:2:4039
+1095:2:4043
+1096:2:4050
+1097:2:4057
+1098:2:4058
+1099:2:4065
+1100:2:4070
+1101:2:4077
+1102:2:4078
+1103:2:4077
+1104:2:4078
+1105:2:4085
+1106:2:4089
+1107:0:4178
+1108:2:3278
+1109:2:3288
+1110:0:4178
+1111:2:2525
+1112:0:4178
+1113:2:3279
+1114:2:3280
+1115:0:4178
+1116:2:2525
+1117:0:4178
+1118:2:3284
+1119:0:4178
+1120:2:3292
+1121:0:4178
+1122:2:2518
+1123:0:4178
+1124:2:2520
+1125:0:4178
+1126:2:2521
+1127:0:4178
+1128:2:2522
+1129:0:4178
+1130:2:2523
+1131:0:4178
+1132:2:2524
+1133:0:4178
+1134:2:2525
+1135:0:4178
+1136:2:2526
+1137:2:2527
+1138:2:2531
+1139:2:2532
+1140:2:2540
+1141:2:2541
+1142:2:2545
+1143:2:2546
+1144:2:2554
+1145:2:2559
+1146:2:2563
+1147:2:2564
+1148:2:2572
+1149:2:2573
+1150:2:2577
+1151:2:2578
+1152:2:2572
+1153:2:2573
+1154:2:2574
+1155:2:2586
+1156:2:2591
+1157:2:2598
+1158:2:2599
+1159:2:2606
+1160:2:2611
+1161:2:2618
+1162:2:2619
+1163:2:2618
+1164:2:2619
+1165:2:2626
+1166:2:2636
+1167:0:4178
+1168:2:2525
+1169:0:4178
+1170:1:118
+1171:0:4178
+1172:1:120
+1173:0:4178
+1174:1:19
+1175:0:4178
+1176:1:1151
+1177:0:4178
+1178:1:2417
+1179:1:2424
+1180:1:2425
+1181:1:2432
+1182:1:2437
+1183:1:2444
+1184:1:2445
+1185:1:2444
+1186:1:2445
+1187:1:2452
+1188:1:2456
+1189:0:4178
+1190:2:2640
+1191:2:2644
+1192:2:2645
+1193:2:2649
+1194:2:2653
+1195:2:2654
+1196:2:2658
+1197:2:2666
+1198:2:2667
+1199:2:2671
+1200:2:2675
+1201:2:2676
+1202:2:2671
+1203:2:2672
+1204:2:2680
+1205:0:4178
+1206:2:2525
+1207:0:4178
+1208:2:2688
+1209:2:2689
+1210:2:2690
+1211:0:4178
+1212:2:2525
+1213:0:4178
+1214:2:2695
+1215:0:4178
+1216:2:3307
+1217:2:3308
+1218:2:3312
+1219:2:3316
+1220:2:3317
+1221:2:3321
+1222:2:3326
+1223:2:3334
+1224:2:3338
+1225:2:3339
+1226:2:3334
+1227:2:3338
+1228:2:3339
+1229:2:3343
+1230:2:3350
+1231:2:3357
+1232:2:3358
+1233:2:3365
+1234:2:3370
+1235:2:3377
+1236:2:3378
+1237:2:3377
+1238:2:3378
+1239:2:3385
+1240:2:3389
+1241:0:4178
+1242:2:3394
+1243:0:4178
+1244:2:3395
+1245:0:4178
+1246:2:3396
+1247:0:4178
+1248:2:3397
+1249:0:4178
+1250:1:1153
+1251:1:1154
+1252:0:4176
+1253:2:3398
+1254:0:4182
+1255:1:2145
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress.ltl b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress.ltl
new file mode 100644 (file)
index 0000000..8718641
--- /dev/null
@@ -0,0 +1 @@
+([] <> !np_)
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.define
new file mode 100644 (file)
index 0000000..ff3f783
--- /dev/null
@@ -0,0 +1 @@
+#define READER_PROGRESS
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.log
new file mode 100644 (file)
index 0000000..9fd20b0
--- /dev/null
@@ -0,0 +1,535 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_reader.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+depth 23: Claim reached state 9 (line 1277)
+depth 132: Claim reached state 9 (line 1276)
+Depth=   29926 States=    1e+06 Transitions= 7.64e+06 Memory=   520.256        t=   19.9 R=   5e+04
+Depth=   33506 States=    2e+06 Transitions= 1.57e+07 Memory=   571.818        t=   41.2 R=   5e+04
+Depth=   33506 States=    3e+06 Transitions= 2.45e+07 Memory=   624.943        t=   65.4 R=   5e+04
+pan: resizing hashtable to -w22..  done
+Depth=   33506 States=    4e+06 Transitions= 3.46e+07 Memory=   709.873        t=   93.2 R=   4e+04
+Depth=   33506 States=    5e+06 Transitions= 4.22e+07 Memory=   764.756        t=    113 R=   4e+04
+Depth=   33506 States=    6e+06 Transitions= 6.02e+07 Memory=   815.830        t=    165 R=   4e+04
+Depth=   33506 States=    7e+06 Transitions= 7.41e+07 Memory=   867.002        t=    205 R=   3e+04
+Depth=   33506 States=    8e+06 Transitions= 9.02e+07 Memory=   917.197        t=    251 R=   3e+04
+Depth=   33506 States=    9e+06 Transitions= 1.01e+08 Memory=   967.881        t=    283 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=   33506 States=    1e+07 Transitions= 1.17e+08 Memory=  1146.467        t=    327 R=   3e+04
+Depth=   33506 States=  1.1e+07 Transitions= 1.53e+08 Memory=  1201.154        t=    438 R=   3e+04
+Depth=   33506 States=  1.2e+07 Transitions= 1.72e+08 Memory=  1252.717        t=    492 R=   2e+04
+Depth=   33506 States=  1.3e+07 Transitions= 1.84e+08 Memory=  1303.401        t=    525 R=   2e+04
+Depth=   33506 States=  1.4e+07 Transitions= 2.05e+08 Memory=  1354.377        t=    586 R=   2e+04
+Depth=   33506 States=  1.5e+07 Transitions= 2.19e+08 Memory=  1404.572        t=    627 R=   2e+04
+Depth=   33506 States=  1.6e+07 Transitions= 2.33e+08 Memory=  1455.451        t=    666 R=   2e+04
+Depth=   33506 States=  1.7e+07 Transitions= 2.45e+08 Memory=  1506.428        t=    700 R=   2e+04
+Depth=   33506 States=  1.8e+07 Transitions= 2.57e+08 Memory=  1557.502        t=    733 R=   2e+04
+Depth=   33506 States=  1.9e+07 Transitions= 2.74e+08 Memory=  1607.697        t=    784 R=   2e+04
+Depth=   33506 States=    2e+07 Transitions= 2.86e+08 Memory=  1659.358        t=    818 R=   2e+04
+Depth=   33506 States=  2.1e+07 Transitions= 2.98e+08 Memory=  1710.920        t=    850 R=   2e+04
+Depth=   33506 States=  2.2e+07 Transitions=  3.1e+08 Memory=  1763.166        t=    883 R=   2e+04
+Depth=   33506 States=  2.3e+07 Transitions= 3.25e+08 Memory=  1814.143        t=    926 R=   2e+04
+Depth=   33506 States=  2.4e+07 Transitions= 3.44e+08 Memory=  1864.631        t=    983 R=   2e+04
+Depth=   33506 States=  2.5e+07 Transitions= 3.62e+08 Memory=  1913.264        t= 1.03e+03 R=   2e+04
+Depth=   33506 States=  2.6e+07 Transitions= 3.75e+08 Memory=  1964.045        t= 1.07e+03 R=   2e+04
+Depth=   33506 States=  2.7e+07 Transitions= 3.86e+08 Memory=  2016.291        t= 1.1e+03 R=   2e+04
+Depth=   33506 States=  2.8e+07 Transitions= 4.23e+08 Memory=  2074.592        t= 1.21e+03 R=   2e+04
+Depth=   33506 States=  2.9e+07 Transitions= 4.42e+08 Memory=  2123.225        t= 1.27e+03 R=   2e+04
+Depth=   33506 States=    3e+07 Transitions= 4.58e+08 Memory=  2173.029        t= 1.32e+03 R=   2e+04
+Depth=   33506 States=  3.1e+07 Transitions= 4.74e+08 Memory=  2224.397        t= 1.36e+03 R=   2e+04
+Depth=   33506 States=  3.2e+07 Transitions= 4.94e+08 Memory=  2276.252        t= 1.42e+03 R=   2e+04
+Depth=   33506 States=  3.3e+07 Transitions= 5.06e+08 Memory=  2325.276        t= 1.46e+03 R=   2e+04
+Depth=   33506 States=  3.4e+07 Transitions= 5.18e+08 Memory=  2376.838        t= 1.49e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=   33506 States=  3.5e+07 Transitions= 5.31e+08 Memory=  2923.408        t= 1.53e+03 R=   2e+04
+Depth=   33506 States=  3.6e+07 Transitions= 5.46e+08 Memory=  2976.143        t= 1.58e+03 R=   2e+04
+Depth=   33506 States=  3.7e+07 Transitions= 5.58e+08 Memory=  3026.338        t= 1.61e+03 R=   2e+04
+Depth=   33506 States=  3.8e+07 Transitions=  5.7e+08 Memory=  3079.170        t= 1.64e+03 R=   2e+04
+Depth=   33506 States=  3.9e+07 Transitions= 5.81e+08 Memory=  3130.537        t= 1.67e+03 R=   2e+04
+Depth=   33506 States=    4e+07 Transitions=    6e+08 Memory=  3181.318        t= 1.73e+03 R=   2e+04
+Depth=   33506 States=  4.1e+07 Transitions= 6.17e+08 Memory=  3231.611        t= 1.78e+03 R=   2e+04
+Depth=   33506 States=  4.2e+07 Transitions= 6.34e+08 Memory=  3280.244        t= 1.83e+03 R=   2e+04
+Depth=   33506 States=  4.3e+07 Transitions= 6.52e+08 Memory=  3329.268        t= 1.88e+03 R=   2e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 33506, errors: 0
+ 26582171 states, stored (4.34241e+07 visited)
+6.159304e+08 states, matched
+6.5935446e+08 transitions (= visited+matched)
+1.0143566e+10 atomic steps
+hash conflicts: 2.9064765e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 2940.685      equivalent memory usage for states (stored*(State-vector + overhead))
+ 2381.281      actual memory usage for states (compression: 80.98%)
+               state-vector as stored = 66 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    1.367      memory lost to fragmentation
+ 3349.678      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 77, "(1)"
+       line 243, "pan.___", state 85, "(1)"
+       line 247, "pan.___", state 97, "(1)"
+       line 251, "pan.___", state 105, "(1)"
+       line 401, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 195, "(1)"
+       line 247, "pan.___", state 215, "(1)"
+       line 251, "pan.___", state 223, "(1)"
+       line 680, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 313, "(1)"
+       line 247, "pan.___", state 333, "(1)"
+       line 251, "pan.___", state 341, "(1)"
+       line 401, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 424, "(1)"
+       line 247, "pan.___", state 444, "(1)"
+       line 251, "pan.___", state 452, "(1)"
+       line 401, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 475, "(1)"
+       line 401, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 476, "else"
+       line 401, "pan.___", state 479, "(1)"
+       line 405, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 489, "(1)"
+       line 405, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 490, "else"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 403, "pan.___", state 499, "((i<1))"
+       line 403, "pan.___", state 499, "((i>=1))"
+       line 410, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 507, "(1)"
+       line 410, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 508, "else"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 414, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 521, "(1)"
+       line 414, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 522, "else"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 412, "pan.___", state 531, "((i<2))"
+       line 412, "pan.___", state 531, "((i>=2))"
+       line 239, "pan.___", state 537, "(1)"
+       line 243, "pan.___", state 545, "(1)"
+       line 243, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 546, "else"
+       line 241, "pan.___", state 551, "((i<1))"
+       line 241, "pan.___", state 551, "((i>=1))"
+       line 247, "pan.___", state 557, "(1)"
+       line 247, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 558, "else"
+       line 251, "pan.___", state 565, "(1)"
+       line 251, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 566, "else"
+       line 249, "pan.___", state 571, "((i<2))"
+       line 249, "pan.___", state 571, "((i>=2))"
+       line 256, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 575, "else"
+       line 421, "pan.___", state 577, "(1)"
+       line 421, "pan.___", state 577, "(1)"
+       line 680, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 680, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 680, "pan.___", state 582, "(1)"
+       line 401, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 653, "(1)"
+       line 247, "pan.___", state 673, "(1)"
+       line 251, "pan.___", state 681, "(1)"
+       line 401, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 709, "(1)"
+       line 401, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 710, "else"
+       line 401, "pan.___", state 713, "(1)"
+       line 405, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 723, "(1)"
+       line 405, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 724, "else"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 403, "pan.___", state 733, "((i<1))"
+       line 403, "pan.___", state 733, "((i>=1))"
+       line 410, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 741, "(1)"
+       line 410, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 742, "else"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 414, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 756, "else"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 412, "pan.___", state 765, "((i<2))"
+       line 412, "pan.___", state 765, "((i>=2))"
+       line 239, "pan.___", state 771, "(1)"
+       line 243, "pan.___", state 779, "(1)"
+       line 243, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 780, "else"
+       line 241, "pan.___", state 785, "((i<1))"
+       line 241, "pan.___", state 785, "((i>=1))"
+       line 247, "pan.___", state 791, "(1)"
+       line 247, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 792, "else"
+       line 251, "pan.___", state 799, "(1)"
+       line 251, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 800, "else"
+       line 256, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 809, "else"
+       line 421, "pan.___", state 811, "(1)"
+       line 421, "pan.___", state 811, "(1)"
+       line 401, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 820, "(1)"
+       line 401, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 821, "else"
+       line 401, "pan.___", state 824, "(1)"
+       line 405, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 834, "(1)"
+       line 405, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 835, "else"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 403, "pan.___", state 844, "((i<1))"
+       line 403, "pan.___", state 844, "((i>=1))"
+       line 410, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 852, "(1)"
+       line 410, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 853, "else"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 414, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 866, "(1)"
+       line 414, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 867, "else"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 412, "pan.___", state 876, "((i<2))"
+       line 412, "pan.___", state 876, "((i>=2))"
+       line 239, "pan.___", state 882, "(1)"
+       line 243, "pan.___", state 890, "(1)"
+       line 243, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 891, "else"
+       line 241, "pan.___", state 896, "((i<1))"
+       line 241, "pan.___", state 896, "((i>=1))"
+       line 247, "pan.___", state 902, "(1)"
+       line 247, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 903, "else"
+       line 251, "pan.___", state 910, "(1)"
+       line 251, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 911, "else"
+       line 249, "pan.___", state 916, "((i<2))"
+       line 249, "pan.___", state 916, "((i>=2))"
+       line 256, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 920, "else"
+       line 421, "pan.___", state 922, "(1)"
+       line 421, "pan.___", state 922, "(1)"
+       line 688, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 995, "(1)"
+       line 247, "pan.___", state 1015, "(1)"
+       line 251, "pan.___", state 1023, "(1)"
+       line 401, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1109, "(1)"
+       line 247, "pan.___", state 1129, "(1)"
+       line 251, "pan.___", state 1137, "(1)"
+       line 401, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1224, "(1)"
+       line 247, "pan.___", state 1244, "(1)"
+       line 251, "pan.___", state 1252, "(1)"
+       line 401, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1335, "(1)"
+       line 247, "pan.___", state 1355, "(1)"
+       line 251, "pan.___", state 1363, "(1)"
+       line 401, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1451, "(1)"
+       line 247, "pan.___", state 1471, "(1)"
+       line 251, "pan.___", state 1479, "(1)"
+       line 401, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1562, "(1)"
+       line 247, "pan.___", state 1582, "(1)"
+       line 251, "pan.___", state 1590, "(1)"
+       line 401, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1676, "(1)"
+       line 247, "pan.___", state 1696, "(1)"
+       line 251, "pan.___", state 1704, "(1)"
+       line 727, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1794, "(1)"
+       line 247, "pan.___", state 1814, "(1)"
+       line 251, "pan.___", state 1822, "(1)"
+       line 401, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1905, "(1)"
+       line 247, "pan.___", state 1925, "(1)"
+       line 251, "pan.___", state 1933, "(1)"
+       line 401, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1956, "(1)"
+       line 401, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1957, "else"
+       line 401, "pan.___", state 1960, "(1)"
+       line 405, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1970, "(1)"
+       line 405, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1971, "else"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 403, "pan.___", state 1980, "((i<1))"
+       line 403, "pan.___", state 1980, "((i>=1))"
+       line 410, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1988, "(1)"
+       line 410, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1989, "else"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 414, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2002, "(1)"
+       line 414, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2003, "else"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 412, "pan.___", state 2012, "((i<2))"
+       line 412, "pan.___", state 2012, "((i>=2))"
+       line 239, "pan.___", state 2018, "(1)"
+       line 243, "pan.___", state 2026, "(1)"
+       line 243, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2027, "else"
+       line 241, "pan.___", state 2032, "((i<1))"
+       line 241, "pan.___", state 2032, "((i>=1))"
+       line 247, "pan.___", state 2038, "(1)"
+       line 247, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2039, "else"
+       line 251, "pan.___", state 2046, "(1)"
+       line 251, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2047, "else"
+       line 249, "pan.___", state 2052, "((i<2))"
+       line 249, "pan.___", state 2052, "((i>=2))"
+       line 256, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2056, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2058, "(1)"
+       line 727, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 727, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 727, "pan.___", state 2063, "(1)"
+       line 401, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2134, "(1)"
+       line 247, "pan.___", state 2154, "(1)"
+       line 251, "pan.___", state 2162, "(1)"
+       line 401, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2251, "(1)"
+       line 247, "pan.___", state 2271, "(1)"
+       line 251, "pan.___", state 2279, "(1)"
+       line 401, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2362, "(1)"
+       line 247, "pan.___", state 2382, "(1)"
+       line 251, "pan.___", state 2390, "(1)"
+       line 239, "pan.___", state 2421, "(1)"
+       line 247, "pan.___", state 2441, "(1)"
+       line 251, "pan.___", state 2449, "(1)"
+       line 239, "pan.___", state 2464, "(1)"
+       line 247, "pan.___", state 2484, "(1)"
+       line 251, "pan.___", state 2492, "(1)"
+       line 887, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 82, "(1)"
+       line 243, "pan.___", state 90, "(1)"
+       line 247, "pan.___", state 102, "(1)"
+       line 262, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 257, "(1)"
+       line 243, "pan.___", state 265, "(1)"
+       line 247, "pan.___", state 277, "(1)"
+       line 251, "pan.___", state 285, "(1)"
+       line 405, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 378, "(1)"
+       line 247, "pan.___", state 390, "(1)"
+       line 251, "pan.___", state 398, "(1)"
+       line 405, "pan.___", state 440, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 458, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 472, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 498, "(1)"
+       line 247, "pan.___", state 510, "(1)"
+       line 251, "pan.___", state 518, "(1)"
+       line 405, "pan.___", state 551, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 569, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 583, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 609, "(1)"
+       line 247, "pan.___", state 621, "(1)"
+       line 251, "pan.___", state 629, "(1)"
+       line 405, "pan.___", state 664, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 682, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 696, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 722, "(1)"
+       line 247, "pan.___", state 734, "(1)"
+       line 251, "pan.___", state 742, "(1)"
+       line 262, "pan.___", state 790, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 799, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 812, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 837, "(1)"
+       line 243, "pan.___", state 845, "(1)"
+       line 247, "pan.___", state 857, "(1)"
+       line 251, "pan.___", state 865, "(1)"
+       line 262, "pan.___", state 896, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 905, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 918, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 927, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 943, "(1)"
+       line 243, "pan.___", state 951, "(1)"
+       line 247, "pan.___", state 963, "(1)"
+       line 251, "pan.___", state 971, "(1)"
+       line 262, "pan.___", state 992, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1001, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1016, "(1)"
+       line 274, "pan.___", state 1023, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1039, "(1)"
+       line 243, "pan.___", state 1047, "(1)"
+       line 247, "pan.___", state 1059, "(1)"
+       line 251, "pan.___", state 1067, "(1)"
+       line 262, "pan.___", state 1098, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1107, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1120, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1129, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1145, "(1)"
+       line 243, "pan.___", state 1153, "(1)"
+       line 247, "pan.___", state 1165, "(1)"
+       line 251, "pan.___", state 1173, "(1)"
+       line 266, "pan.___", state 1199, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1212, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1221, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1237, "(1)"
+       line 243, "pan.___", state 1245, "(1)"
+       line 247, "pan.___", state 1257, "(1)"
+       line 251, "pan.___", state 1265, "(1)"
+       line 262, "pan.___", state 1296, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1305, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1318, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1327, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1343, "(1)"
+       line 243, "pan.___", state 1351, "(1)"
+       line 247, "pan.___", state 1363, "(1)"
+       line 251, "pan.___", state 1371, "(1)"
+       line 266, "pan.___", state 1397, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1410, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1419, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1435, "(1)"
+       line 243, "pan.___", state 1443, "(1)"
+       line 247, "pan.___", state 1455, "(1)"
+       line 251, "pan.___", state 1463, "(1)"
+       line 262, "pan.___", state 1494, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1503, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1516, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1525, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1541, "(1)"
+       line 243, "pan.___", state 1549, "(1)"
+       line 247, "pan.___", state 1561, "(1)"
+       line 251, "pan.___", state 1569, "(1)"
+       line 266, "pan.___", state 1595, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1608, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1617, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1633, "(1)"
+       line 243, "pan.___", state 1641, "(1)"
+       line 247, "pan.___", state 1653, "(1)"
+       line 251, "pan.___", state 1661, "(1)"
+       line 262, "pan.___", state 1692, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1701, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1714, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1723, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1739, "(1)"
+       line 243, "pan.___", state 1747, "(1)"
+       line 247, "pan.___", state 1759, "(1)"
+       line 251, "pan.___", state 1767, "(1)"
+       line 1214, "pan.___", state 1783, "-end-"
+       (118 of 1783 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1279, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.9e+03 seconds
+pan: rate 22877.047 states/second
+pan: avg transition delay 2.8788e-06 usec
+cp .input.spin urcu_progress_reader.spin.input
+cp .input.spin.trail urcu_progress_reader.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_reader.spin.input
new file mode 100644 (file)
index 0000000..34cee26
--- /dev/null
@@ -0,0 +1,1250 @@
+#define READER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.define
new file mode 100644 (file)
index 0000000..1e4417f
--- /dev/null
@@ -0,0 +1 @@
+#define WRITER_PROGRESS
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.log
new file mode 100644 (file)
index 0000000..2b68dab
--- /dev/null
@@ -0,0 +1,519 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1272)
+depth 23: Claim reached state 9 (line 1277)
+depth 51: Claim reached state 9 (line 1276)
+Depth=    5851 States=    1e+06 Transitions= 7.93e+06 Memory=   516.154        t=   20.4 R=   5e+04
+Depth=    6593 States=    2e+06 Transitions=  1.7e+07 Memory=   566.545        t=   44.5 R=   4e+04
+Depth=    6593 States=    3e+06 Transitions= 2.76e+07 Memory=   618.205        t=   73.5 R=   4e+04
+pan: resizing hashtable to -w22..  done
+Depth=    6593 States=    4e+06 Transitions= 3.59e+07 Memory=   700.791        t=   95.6 R=   4e+04
+Depth=    6593 States=    5e+06 Transitions= 5.42e+07 Memory=   752.647        t=    148 R=   3e+04
+Depth=    6593 States=    6e+06 Transitions= 7.04e+07 Memory=   801.768        t=    194 R=   3e+04
+Depth=    6593 States=    7e+06 Transitions= 8.54e+07 Memory=   850.693        t=    237 R=   3e+04
+Depth=    6593 States=    8e+06 Transitions= 9.51e+07 Memory=   904.307        t=    264 R=   3e+04
+Depth=    6593 States=    9e+06 Transitions= 1.26e+08 Memory=   962.217        t=    356 R=   3e+04
+pan: resizing hashtable to -w24..  done
+Depth=    6593 States=    1e+07 Transitions=  1.5e+08 Memory=  1135.627        t=    426 R=   2e+04
+Depth=    6593 States=  1.1e+07 Transitions= 1.66e+08 Memory=  1185.725        t=    473 R=   2e+04
+Depth=    6593 States=  1.2e+07 Transitions= 1.84e+08 Memory=  1237.385        t=    525 R=   2e+04
+Depth=    6593 States=  1.3e+07 Transitions= 2.03e+08 Memory=  1288.654        t=    579 R=   2e+04
+Depth=    6593 States=  1.4e+07 Transitions= 2.15e+08 Memory=  1339.631        t=    613 R=   2e+04
+Depth=    6593 States=  1.5e+07 Transitions= 2.28e+08 Memory=  1390.901        t=    646 R=   2e+04
+Depth=    6593 States=  1.6e+07 Transitions= 2.41e+08 Memory=  1442.756        t=    684 R=   2e+04
+Depth=    6593 States=  1.7e+07 Transitions= 2.55e+08 Memory=  1493.244        t=    724 R=   2e+04
+Depth=    6593 States=  1.8e+07 Transitions=  2.7e+08 Memory=  1545.197        t=    765 R=   2e+04
+Depth=    6593 States=  1.9e+07 Transitions= 2.82e+08 Memory=  1597.346        t=    798 R=   2e+04
+Depth=    6593 States=    2e+07 Transitions= 2.94e+08 Memory=  1649.690        t=    832 R=   2e+04
+Depth=    6593 States=  2.1e+07 Transitions= 3.04e+08 Memory=  1700.959        t=    860 R=   2e+04
+Depth=    6593 States=  2.2e+07 Transitions= 3.24e+08 Memory=  1752.815        t=    917 R=   2e+04
+Depth=    6593 States=  2.3e+07 Transitions= 3.43e+08 Memory=  1801.447        t=    973 R=   2e+04
+Depth=    6593 States=  2.4e+07 Transitions= 3.58e+08 Memory=  1850.568        t= 1.02e+03 R=   2e+04
+Depth=    6593 States=  2.5e+07 Transitions= 3.69e+08 Memory=  1903.596        t= 1.05e+03 R=   2e+04
+Depth=    6593 States=  2.6e+07 Transitions= 3.93e+08 Memory=  1960.041        t= 1.12e+03 R=   2e+04
+Depth=    6593 States=  2.7e+07 Transitions= 4.23e+08 Memory=  2011.506        t= 1.21e+03 R=   2e+04
+Depth=    6593 States=  2.8e+07 Transitions= 4.42e+08 Memory=  2062.776        t= 1.26e+03 R=   2e+04
+Depth=    6593 States=  2.9e+07 Transitions= 4.55e+08 Memory=  2113.264        t= 1.3e+03 R=   2e+04
+Depth=    6593 States=    3e+07 Transitions= 4.75e+08 Memory=  2164.240        t= 1.36e+03 R=   2e+04
+Depth=    6593 States=  3.1e+07 Transitions= 4.88e+08 Memory=  2215.315        t= 1.39e+03 R=   2e+04
+Depth=    6593 States=  3.2e+07 Transitions= 5.02e+08 Memory=  2266.193        t= 1.43e+03 R=   2e+04
+Depth=    6593 States=  3.3e+07 Transitions= 5.14e+08 Memory=  2318.147        t= 1.47e+03 R=   2e+04
+Depth=    6593 States=  3.4e+07 Transitions= 5.28e+08 Memory=  2369.416        t= 1.5e+03 R=   2e+04
+pan: resizing hashtable to -w26..  done
+Depth=    6593 States=  3.5e+07 Transitions= 5.41e+08 Memory=  2917.744        t= 1.55e+03 R=   2e+04
+Depth=    6593 States=  3.6e+07 Transitions= 5.53e+08 Memory=  2969.990        t= 1.58e+03 R=   2e+04
+Depth=    6593 States=  3.7e+07 Transitions= 5.65e+08 Memory=  3022.236        t= 1.61e+03 R=   2e+04
+Depth=    6593 States=  3.8e+07 Transitions= 5.76e+08 Memory=  3073.604        t= 1.64e+03 R=   2e+04
+Depth=    6593 States=  3.9e+07 Transitions= 5.94e+08 Memory=  3125.850        t= 1.69e+03 R=   2e+04
+Depth=    6593 States=    4e+07 Transitions= 6.13e+08 Memory=  3173.994        t= 1.75e+03 R=   2e+04
+Depth=    6593 States=  4.1e+07 Transitions= 6.32e+08 Memory=  3223.115        t= 1.8e+03 R=   2e+04
+Depth=    6593 States=  4.2e+07 Transitions= 6.49e+08 Memory=  3272.236        t= 1.85e+03 R=   2e+04
+
+(Spin Version 5.1.7 -- 23 December 2008)
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 6593, errors: 0
+ 25668735 states, stored (4.20113e+07 visited)
+6.0674803e+08 states, matched
+6.4875934e+08 transitions (= visited+matched)
+9.990628e+09 atomic steps
+hash conflicts: 2.634972e+08 (resolved)
+
+Stats on memory usage (in Megabytes):
+ 2839.635      equivalent memory usage for states (stored*(State-vector + overhead))
+ 2304.486      actual memory usage for states (compression: 81.15%)
+               state-vector as stored = 66 byte + 28 byte overhead
+  512.000      memory used for hash table (-w26)
+  457.764      memory used for DFS stack (-m10000000)
+    1.330      memory lost to fragmentation
+ 3272.920      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 262, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 77, "(1)"
+       line 243, "pan.___", state 85, "(1)"
+       line 247, "pan.___", state 97, "(1)"
+       line 251, "pan.___", state 105, "(1)"
+       line 401, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 195, "(1)"
+       line 247, "pan.___", state 215, "(1)"
+       line 251, "pan.___", state 223, "(1)"
+       line 680, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 401, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 313, "(1)"
+       line 247, "pan.___", state 333, "(1)"
+       line 251, "pan.___", state 341, "(1)"
+       line 401, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 424, "(1)"
+       line 247, "pan.___", state 444, "(1)"
+       line 251, "pan.___", state 452, "(1)"
+       line 401, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 475, "(1)"
+       line 401, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 476, "else"
+       line 401, "pan.___", state 479, "(1)"
+       line 405, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 489, "(1)"
+       line 405, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 490, "else"
+       line 405, "pan.___", state 493, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 405, "pan.___", state 494, "(1)"
+       line 403, "pan.___", state 499, "((i<1))"
+       line 403, "pan.___", state 499, "((i>=1))"
+       line 410, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 507, "(1)"
+       line 410, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 508, "else"
+       line 410, "pan.___", state 511, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 410, "pan.___", state 512, "(1)"
+       line 414, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 521, "(1)"
+       line 414, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 522, "else"
+       line 414, "pan.___", state 525, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 414, "pan.___", state 526, "(1)"
+       line 412, "pan.___", state 531, "((i<2))"
+       line 412, "pan.___", state 531, "((i>=2))"
+       line 239, "pan.___", state 537, "(1)"
+       line 243, "pan.___", state 545, "(1)"
+       line 243, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 546, "else"
+       line 241, "pan.___", state 551, "((i<1))"
+       line 241, "pan.___", state 551, "((i>=1))"
+       line 247, "pan.___", state 557, "(1)"
+       line 247, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 558, "else"
+       line 251, "pan.___", state 565, "(1)"
+       line 251, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 566, "else"
+       line 249, "pan.___", state 571, "((i<2))"
+       line 249, "pan.___", state 571, "((i>=2))"
+       line 256, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 575, "else"
+       line 421, "pan.___", state 577, "(1)"
+       line 421, "pan.___", state 577, "(1)"
+       line 680, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 680, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 680, "pan.___", state 582, "(1)"
+       line 401, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 653, "(1)"
+       line 247, "pan.___", state 673, "(1)"
+       line 251, "pan.___", state 681, "(1)"
+       line 401, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 709, "(1)"
+       line 401, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 710, "else"
+       line 401, "pan.___", state 713, "(1)"
+       line 405, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 723, "(1)"
+       line 405, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 724, "else"
+       line 405, "pan.___", state 727, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 405, "pan.___", state 728, "(1)"
+       line 403, "pan.___", state 733, "((i<1))"
+       line 403, "pan.___", state 733, "((i>=1))"
+       line 410, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 741, "(1)"
+       line 410, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 742, "else"
+       line 410, "pan.___", state 745, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 410, "pan.___", state 746, "(1)"
+       line 414, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 755, "(1)"
+       line 414, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 756, "else"
+       line 414, "pan.___", state 759, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 414, "pan.___", state 760, "(1)"
+       line 412, "pan.___", state 765, "((i<2))"
+       line 412, "pan.___", state 765, "((i>=2))"
+       line 239, "pan.___", state 771, "(1)"
+       line 243, "pan.___", state 779, "(1)"
+       line 243, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 780, "else"
+       line 241, "pan.___", state 785, "((i<1))"
+       line 241, "pan.___", state 785, "((i>=1))"
+       line 247, "pan.___", state 791, "(1)"
+       line 247, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 792, "else"
+       line 251, "pan.___", state 799, "(1)"
+       line 251, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 800, "else"
+       line 256, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 809, "else"
+       line 421, "pan.___", state 811, "(1)"
+       line 421, "pan.___", state 811, "(1)"
+       line 401, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 820, "(1)"
+       line 401, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 821, "else"
+       line 401, "pan.___", state 824, "(1)"
+       line 405, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 834, "(1)"
+       line 405, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 835, "else"
+       line 405, "pan.___", state 838, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 405, "pan.___", state 839, "(1)"
+       line 403, "pan.___", state 844, "((i<1))"
+       line 403, "pan.___", state 844, "((i>=1))"
+       line 410, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 852, "(1)"
+       line 410, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 853, "else"
+       line 410, "pan.___", state 856, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 410, "pan.___", state 857, "(1)"
+       line 414, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 866, "(1)"
+       line 414, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 867, "else"
+       line 414, "pan.___", state 870, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 414, "pan.___", state 871, "(1)"
+       line 412, "pan.___", state 876, "((i<2))"
+       line 412, "pan.___", state 876, "((i>=2))"
+       line 239, "pan.___", state 882, "(1)"
+       line 243, "pan.___", state 890, "(1)"
+       line 243, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 891, "else"
+       line 241, "pan.___", state 896, "((i<1))"
+       line 241, "pan.___", state 896, "((i>=1))"
+       line 247, "pan.___", state 902, "(1)"
+       line 247, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 903, "else"
+       line 251, "pan.___", state 910, "(1)"
+       line 251, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 911, "else"
+       line 249, "pan.___", state 916, "((i<2))"
+       line 249, "pan.___", state 916, "((i>=2))"
+       line 256, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 920, "else"
+       line 421, "pan.___", state 922, "(1)"
+       line 421, "pan.___", state 922, "(1)"
+       line 688, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 401, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 995, "(1)"
+       line 247, "pan.___", state 1015, "(1)"
+       line 251, "pan.___", state 1023, "(1)"
+       line 401, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1109, "(1)"
+       line 247, "pan.___", state 1129, "(1)"
+       line 251, "pan.___", state 1137, "(1)"
+       line 401, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1224, "(1)"
+       line 247, "pan.___", state 1244, "(1)"
+       line 251, "pan.___", state 1252, "(1)"
+       line 401, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1335, "(1)"
+       line 247, "pan.___", state 1355, "(1)"
+       line 251, "pan.___", state 1363, "(1)"
+       line 401, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1451, "(1)"
+       line 247, "pan.___", state 1471, "(1)"
+       line 251, "pan.___", state 1479, "(1)"
+       line 401, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1562, "(1)"
+       line 247, "pan.___", state 1582, "(1)"
+       line 251, "pan.___", state 1590, "(1)"
+       line 401, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1676, "(1)"
+       line 247, "pan.___", state 1696, "(1)"
+       line 251, "pan.___", state 1704, "(1)"
+       line 727, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 401, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1794, "(1)"
+       line 247, "pan.___", state 1814, "(1)"
+       line 251, "pan.___", state 1822, "(1)"
+       line 401, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1905, "(1)"
+       line 247, "pan.___", state 1925, "(1)"
+       line 251, "pan.___", state 1933, "(1)"
+       line 401, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 1956, "(1)"
+       line 401, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 401, "pan.___", state 1957, "else"
+       line 401, "pan.___", state 1960, "(1)"
+       line 405, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 1970, "(1)"
+       line 405, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 405, "pan.___", state 1971, "else"
+       line 405, "pan.___", state 1974, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 405, "pan.___", state 1975, "(1)"
+       line 403, "pan.___", state 1980, "((i<1))"
+       line 403, "pan.___", state 1980, "((i>=1))"
+       line 410, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 1988, "(1)"
+       line 410, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 410, "pan.___", state 1989, "else"
+       line 410, "pan.___", state 1992, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 410, "pan.___", state 1993, "(1)"
+       line 414, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2002, "(1)"
+       line 414, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 414, "pan.___", state 2003, "else"
+       line 414, "pan.___", state 2006, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 414, "pan.___", state 2007, "(1)"
+       line 412, "pan.___", state 2012, "((i<2))"
+       line 412, "pan.___", state 2012, "((i>=2))"
+       line 239, "pan.___", state 2018, "(1)"
+       line 243, "pan.___", state 2026, "(1)"
+       line 243, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 243, "pan.___", state 2027, "else"
+       line 241, "pan.___", state 2032, "((i<1))"
+       line 241, "pan.___", state 2032, "((i>=1))"
+       line 247, "pan.___", state 2038, "(1)"
+       line 247, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 247, "pan.___", state 2039, "else"
+       line 251, "pan.___", state 2046, "(1)"
+       line 251, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 251, "pan.___", state 2047, "else"
+       line 249, "pan.___", state 2052, "((i<2))"
+       line 249, "pan.___", state 2052, "((i>=2))"
+       line 256, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 256, "pan.___", state 2056, "else"
+       line 421, "pan.___", state 2058, "(1)"
+       line 421, "pan.___", state 2058, "(1)"
+       line 727, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 727, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 727, "pan.___", state 2063, "(1)"
+       line 401, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2134, "(1)"
+       line 247, "pan.___", state 2154, "(1)"
+       line 251, "pan.___", state 2162, "(1)"
+       line 401, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2251, "(1)"
+       line 247, "pan.___", state 2271, "(1)"
+       line 251, "pan.___", state 2279, "(1)"
+       line 401, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 2362, "(1)"
+       line 247, "pan.___", state 2382, "(1)"
+       line 251, "pan.___", state 2390, "(1)"
+       line 239, "pan.___", state 2421, "(1)"
+       line 247, "pan.___", state 2441, "(1)"
+       line 251, "pan.___", state 2449, "(1)"
+       line 239, "pan.___", state 2464, "(1)"
+       line 247, "pan.___", state 2484, "(1)"
+       line 251, "pan.___", state 2492, "(1)"
+       line 887, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 401, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 82, "(1)"
+       line 243, "pan.___", state 90, "(1)"
+       line 247, "pan.___", state 102, "(1)"
+       line 262, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 401, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 405, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 257, "(1)"
+       line 243, "pan.___", state 265, "(1)"
+       line 247, "pan.___", state 277, "(1)"
+       line 251, "pan.___", state 285, "(1)"
+       line 405, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 378, "(1)"
+       line 247, "pan.___", state 390, "(1)"
+       line 251, "pan.___", state 398, "(1)"
+       line 405, "pan.___", state 440, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 458, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 472, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 498, "(1)"
+       line 247, "pan.___", state 510, "(1)"
+       line 251, "pan.___", state 518, "(1)"
+       line 405, "pan.___", state 551, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 569, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 583, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 609, "(1)"
+       line 247, "pan.___", state 621, "(1)"
+       line 251, "pan.___", state 629, "(1)"
+       line 405, "pan.___", state 664, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 410, "pan.___", state 682, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 414, "pan.___", state 696, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 243, "pan.___", state 722, "(1)"
+       line 247, "pan.___", state 734, "(1)"
+       line 251, "pan.___", state 742, "(1)"
+       line 262, "pan.___", state 795, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 804, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 819, "(1)"
+       line 274, "pan.___", state 826, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 842, "(1)"
+       line 243, "pan.___", state 850, "(1)"
+       line 247, "pan.___", state 862, "(1)"
+       line 251, "pan.___", state 870, "(1)"
+       line 262, "pan.___", state 901, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 910, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 923, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 932, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 948, "(1)"
+       line 243, "pan.___", state 956, "(1)"
+       line 247, "pan.___", state 968, "(1)"
+       line 251, "pan.___", state 976, "(1)"
+       line 266, "pan.___", state 1002, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1015, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1024, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1040, "(1)"
+       line 243, "pan.___", state 1048, "(1)"
+       line 247, "pan.___", state 1060, "(1)"
+       line 251, "pan.___", state 1068, "(1)"
+       line 262, "pan.___", state 1099, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1108, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1121, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1130, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1146, "(1)"
+       line 243, "pan.___", state 1154, "(1)"
+       line 247, "pan.___", state 1166, "(1)"
+       line 251, "pan.___", state 1174, "(1)"
+       line 266, "pan.___", state 1200, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1213, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1222, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1238, "(1)"
+       line 243, "pan.___", state 1246, "(1)"
+       line 247, "pan.___", state 1258, "(1)"
+       line 251, "pan.___", state 1266, "(1)"
+       line 262, "pan.___", state 1297, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1306, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1319, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1328, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1344, "(1)"
+       line 243, "pan.___", state 1352, "(1)"
+       line 247, "pan.___", state 1364, "(1)"
+       line 251, "pan.___", state 1372, "(1)"
+       line 266, "pan.___", state 1398, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1411, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1420, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1436, "(1)"
+       line 243, "pan.___", state 1444, "(1)"
+       line 247, "pan.___", state 1456, "(1)"
+       line 251, "pan.___", state 1464, "(1)"
+       line 262, "pan.___", state 1495, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 266, "pan.___", state 1504, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 270, "pan.___", state 1517, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 274, "pan.___", state 1526, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 239, "pan.___", state 1542, "(1)"
+       line 243, "pan.___", state 1550, "(1)"
+       line 247, "pan.___", state 1562, "(1)"
+       line 251, "pan.___", state 1570, "(1)"
+       line 1214, "pan.___", state 1586, "-end-"
+       (103 of 1586 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1279, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 1.85e+03 seconds
+pan: rate 22712.991 states/second
+pan: avg transition delay 2.8511e-06 usec
+cp .input.spin urcu_progress_writer.spin.input
+cp .input.spin.trail urcu_progress_writer.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer.spin.input
new file mode 100644 (file)
index 0000000..6e6d67f
--- /dev/null
@@ -0,0 +1,1250 @@
+#define WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.define b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.define
new file mode 100644 (file)
index 0000000..8d304f5
--- /dev/null
@@ -0,0 +1,2 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.log b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.log
new file mode 100644 (file)
index 0000000..a80f97b
--- /dev/null
@@ -0,0 +1,498 @@
+make[1]: Entering directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
+rm -f pan* trail.out .input.spin* *.spin.trail .input.define
+touch .input.define
+cat .input.define > pan.ltl
+cat DEFINES >> pan.ltl
+spin -f "!(`cat urcu_progress.ltl | grep -v ^//`)" >> pan.ltl
+cp urcu_progress_writer_error.define .input.define
+cat .input.define > .input.spin
+cat DEFINES >> .input.spin
+cat urcu.spin >> .input.spin
+rm -f .input.spin.trail
+spin -a -X -N pan.ltl .input.spin
+Exit-Status 0
+gcc -O2 -w -DHASH64 -o pan pan.c
+./pan -a -f -v -c1 -X -m10000000 -w20
+warning: for p.o. reduction to be valid the never claim must be stutter-invariant
+(never claims generated from LTL formulae are stutter-invariant)
+depth 0: Claim reached state 5 (line 1273)
+depth 23: Claim reached state 9 (line 1278)
+depth 51: Claim reached state 9 (line 1277)
+pan: acceptance cycle (at depth 1559)
+pan: wrote .input.spin.trail
+
+(Spin Version 5.1.7 -- 23 December 2008)
+Warning: Search not completed
+       + Partial Order Reduction
+
+Full statespace search for:
+       never claim             +
+       assertion violations    + (if within scope of claim)
+       acceptance   cycles     + (fairness enabled)
+       invalid end states      - (disabled by never claim)
+
+State-vector 88 byte, depth reached 4532, errors: 1
+   437005 states, stored (735994 visited)
+  5102692 states, matched
+  5838686 transitions (= visited+matched)
+ 78711771 atomic steps
+hash conflicts:    422011 (resolved)
+
+Stats on memory usage (in Megabytes):
+   48.344      equivalent memory usage for states (stored*(State-vector + overhead))
+   37.421      actual memory usage for states (compression: 77.41%)
+               state-vector as stored = 62 byte + 28 byte overhead
+    8.000      memory used for hash table (-w20)
+  457.764      memory used for DFS stack (-m10000000)
+  503.068      total actual memory usage
+
+unreached in proctype urcu_reader
+       line 263, "pan.___", state 30, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 52, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 61, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 77, "(1)"
+       line 244, "pan.___", state 85, "(1)"
+       line 248, "pan.___", state 97, "(1)"
+       line 252, "pan.___", state 105, "(1)"
+       line 402, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 163, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 177, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 195, "(1)"
+       line 248, "pan.___", state 215, "(1)"
+       line 252, "pan.___", state 223, "(1)"
+       line 681, "pan.___", state 242, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<1))"
+       line 402, "pan.___", state 249, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 281, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 295, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 313, "(1)"
+       line 248, "pan.___", state 333, "(1)"
+       line 252, "pan.___", state 341, "(1)"
+       line 402, "pan.___", state 360, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 392, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 406, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 424, "(1)"
+       line 248, "pan.___", state 444, "(1)"
+       line 252, "pan.___", state 452, "(1)"
+       line 402, "pan.___", state 473, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 475, "(1)"
+       line 402, "pan.___", state 476, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 402, "pan.___", state 476, "else"
+       line 402, "pan.___", state 479, "(1)"
+       line 406, "pan.___", state 487, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 489, "(1)"
+       line 406, "pan.___", state 490, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 406, "pan.___", state 490, "else"
+       line 406, "pan.___", state 493, "(1)"
+       line 406, "pan.___", state 494, "(1)"
+       line 406, "pan.___", state 494, "(1)"
+       line 404, "pan.___", state 499, "((i<1))"
+       line 404, "pan.___", state 499, "((i>=1))"
+       line 411, "pan.___", state 505, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 507, "(1)"
+       line 411, "pan.___", state 508, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 508, "else"
+       line 411, "pan.___", state 511, "(1)"
+       line 411, "pan.___", state 512, "(1)"
+       line 411, "pan.___", state 512, "(1)"
+       line 415, "pan.___", state 519, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 521, "(1)"
+       line 415, "pan.___", state 522, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 522, "else"
+       line 415, "pan.___", state 525, "(1)"
+       line 415, "pan.___", state 526, "(1)"
+       line 415, "pan.___", state 526, "(1)"
+       line 413, "pan.___", state 531, "((i<2))"
+       line 413, "pan.___", state 531, "((i>=2))"
+       line 240, "pan.___", state 537, "(1)"
+       line 244, "pan.___", state 545, "(1)"
+       line 244, "pan.___", state 546, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 244, "pan.___", state 546, "else"
+       line 242, "pan.___", state 551, "((i<1))"
+       line 242, "pan.___", state 551, "((i>=1))"
+       line 248, "pan.___", state 557, "(1)"
+       line 248, "pan.___", state 558, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 248, "pan.___", state 558, "else"
+       line 252, "pan.___", state 565, "(1)"
+       line 252, "pan.___", state 566, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 566, "else"
+       line 250, "pan.___", state 571, "((i<2))"
+       line 250, "pan.___", state 571, "((i>=2))"
+       line 257, "pan.___", state 575, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 575, "else"
+       line 422, "pan.___", state 577, "(1)"
+       line 422, "pan.___", state 577, "(1)"
+       line 681, "pan.___", state 580, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 681, "pan.___", state 581, "_proc_urcu_reader = (_proc_urcu_reader|(1<<5))"
+       line 681, "pan.___", state 582, "(1)"
+       line 402, "pan.___", state 589, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 621, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 635, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 653, "(1)"
+       line 248, "pan.___", state 673, "(1)"
+       line 252, "pan.___", state 681, "(1)"
+       line 402, "pan.___", state 707, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 709, "(1)"
+       line 402, "pan.___", state 710, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 402, "pan.___", state 710, "else"
+       line 402, "pan.___", state 713, "(1)"
+       line 406, "pan.___", state 721, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 723, "(1)"
+       line 406, "pan.___", state 724, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 406, "pan.___", state 724, "else"
+       line 406, "pan.___", state 727, "(1)"
+       line 406, "pan.___", state 728, "(1)"
+       line 406, "pan.___", state 728, "(1)"
+       line 404, "pan.___", state 733, "((i<1))"
+       line 404, "pan.___", state 733, "((i>=1))"
+       line 411, "pan.___", state 739, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 741, "(1)"
+       line 411, "pan.___", state 742, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 742, "else"
+       line 411, "pan.___", state 745, "(1)"
+       line 411, "pan.___", state 746, "(1)"
+       line 411, "pan.___", state 746, "(1)"
+       line 415, "pan.___", state 753, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 755, "(1)"
+       line 415, "pan.___", state 756, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 756, "else"
+       line 415, "pan.___", state 759, "(1)"
+       line 415, "pan.___", state 760, "(1)"
+       line 415, "pan.___", state 760, "(1)"
+       line 413, "pan.___", state 765, "((i<2))"
+       line 413, "pan.___", state 765, "((i>=2))"
+       line 240, "pan.___", state 771, "(1)"
+       line 244, "pan.___", state 779, "(1)"
+       line 244, "pan.___", state 780, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 244, "pan.___", state 780, "else"
+       line 242, "pan.___", state 785, "((i<1))"
+       line 242, "pan.___", state 785, "((i>=1))"
+       line 248, "pan.___", state 791, "(1)"
+       line 248, "pan.___", state 792, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 248, "pan.___", state 792, "else"
+       line 252, "pan.___", state 799, "(1)"
+       line 252, "pan.___", state 800, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 800, "else"
+       line 257, "pan.___", state 809, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 809, "else"
+       line 422, "pan.___", state 811, "(1)"
+       line 422, "pan.___", state 811, "(1)"
+       line 402, "pan.___", state 818, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 820, "(1)"
+       line 402, "pan.___", state 821, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 402, "pan.___", state 821, "else"
+       line 402, "pan.___", state 824, "(1)"
+       line 406, "pan.___", state 832, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 834, "(1)"
+       line 406, "pan.___", state 835, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 406, "pan.___", state 835, "else"
+       line 406, "pan.___", state 838, "(1)"
+       line 406, "pan.___", state 839, "(1)"
+       line 406, "pan.___", state 839, "(1)"
+       line 404, "pan.___", state 844, "((i<1))"
+       line 404, "pan.___", state 844, "((i>=1))"
+       line 411, "pan.___", state 850, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 852, "(1)"
+       line 411, "pan.___", state 853, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 853, "else"
+       line 411, "pan.___", state 856, "(1)"
+       line 411, "pan.___", state 857, "(1)"
+       line 411, "pan.___", state 857, "(1)"
+       line 415, "pan.___", state 864, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 866, "(1)"
+       line 415, "pan.___", state 867, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 867, "else"
+       line 415, "pan.___", state 870, "(1)"
+       line 415, "pan.___", state 871, "(1)"
+       line 415, "pan.___", state 871, "(1)"
+       line 413, "pan.___", state 876, "((i<2))"
+       line 413, "pan.___", state 876, "((i>=2))"
+       line 240, "pan.___", state 882, "(1)"
+       line 244, "pan.___", state 890, "(1)"
+       line 244, "pan.___", state 891, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 244, "pan.___", state 891, "else"
+       line 242, "pan.___", state 896, "((i<1))"
+       line 242, "pan.___", state 896, "((i>=1))"
+       line 248, "pan.___", state 902, "(1)"
+       line 248, "pan.___", state 903, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 248, "pan.___", state 903, "else"
+       line 252, "pan.___", state 910, "(1)"
+       line 252, "pan.___", state 911, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 911, "else"
+       line 250, "pan.___", state 916, "((i<2))"
+       line 250, "pan.___", state 916, "((i>=2))"
+       line 257, "pan.___", state 920, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 920, "else"
+       line 422, "pan.___", state 922, "(1)"
+       line 422, "pan.___", state 922, "(1)"
+       line 689, "pan.___", state 926, "_proc_urcu_reader = (_proc_urcu_reader|(1<<11))"
+       line 402, "pan.___", state 931, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 963, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 977, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 995, "(1)"
+       line 248, "pan.___", state 1015, "(1)"
+       line 252, "pan.___", state 1023, "(1)"
+       line 402, "pan.___", state 1045, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1077, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1091, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1109, "(1)"
+       line 248, "pan.___", state 1129, "(1)"
+       line 252, "pan.___", state 1137, "(1)"
+       line 402, "pan.___", state 1160, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1192, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1206, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1224, "(1)"
+       line 248, "pan.___", state 1244, "(1)"
+       line 252, "pan.___", state 1252, "(1)"
+       line 402, "pan.___", state 1271, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1303, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1317, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1335, "(1)"
+       line 248, "pan.___", state 1355, "(1)"
+       line 252, "pan.___", state 1363, "(1)"
+       line 402, "pan.___", state 1387, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1419, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1433, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1451, "(1)"
+       line 248, "pan.___", state 1471, "(1)"
+       line 252, "pan.___", state 1479, "(1)"
+       line 402, "pan.___", state 1498, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1530, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1544, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1562, "(1)"
+       line 248, "pan.___", state 1582, "(1)"
+       line 252, "pan.___", state 1590, "(1)"
+       line 402, "pan.___", state 1612, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1644, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1658, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1676, "(1)"
+       line 248, "pan.___", state 1696, "(1)"
+       line 252, "pan.___", state 1704, "(1)"
+       line 728, "pan.___", state 1723, "_proc_urcu_reader = (_proc_urcu_reader|((1<<2)<<19))"
+       line 402, "pan.___", state 1730, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1762, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1776, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1794, "(1)"
+       line 248, "pan.___", state 1814, "(1)"
+       line 252, "pan.___", state 1822, "(1)"
+       line 402, "pan.___", state 1841, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1873, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 1887, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1905, "(1)"
+       line 248, "pan.___", state 1925, "(1)"
+       line 252, "pan.___", state 1933, "(1)"
+       line 402, "pan.___", state 1954, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 1956, "(1)"
+       line 402, "pan.___", state 1957, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 402, "pan.___", state 1957, "else"
+       line 402, "pan.___", state 1960, "(1)"
+       line 406, "pan.___", state 1968, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 1970, "(1)"
+       line 406, "pan.___", state 1971, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 406, "pan.___", state 1971, "else"
+       line 406, "pan.___", state 1974, "(1)"
+       line 406, "pan.___", state 1975, "(1)"
+       line 406, "pan.___", state 1975, "(1)"
+       line 404, "pan.___", state 1980, "((i<1))"
+       line 404, "pan.___", state 1980, "((i>=1))"
+       line 411, "pan.___", state 1986, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 1988, "(1)"
+       line 411, "pan.___", state 1989, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 1989, "else"
+       line 411, "pan.___", state 1992, "(1)"
+       line 411, "pan.___", state 1993, "(1)"
+       line 411, "pan.___", state 1993, "(1)"
+       line 415, "pan.___", state 2000, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2002, "(1)"
+       line 415, "pan.___", state 2003, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 2003, "else"
+       line 415, "pan.___", state 2006, "(1)"
+       line 415, "pan.___", state 2007, "(1)"
+       line 415, "pan.___", state 2007, "(1)"
+       line 413, "pan.___", state 2012, "((i<2))"
+       line 413, "pan.___", state 2012, "((i>=2))"
+       line 240, "pan.___", state 2018, "(1)"
+       line 244, "pan.___", state 2026, "(1)"
+       line 244, "pan.___", state 2027, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 244, "pan.___", state 2027, "else"
+       line 242, "pan.___", state 2032, "((i<1))"
+       line 242, "pan.___", state 2032, "((i>=1))"
+       line 248, "pan.___", state 2038, "(1)"
+       line 248, "pan.___", state 2039, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 248, "pan.___", state 2039, "else"
+       line 252, "pan.___", state 2046, "(1)"
+       line 252, "pan.___", state 2047, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 2047, "else"
+       line 250, "pan.___", state 2052, "((i<2))"
+       line 250, "pan.___", state 2052, "((i>=2))"
+       line 257, "pan.___", state 2056, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 2056, "else"
+       line 422, "pan.___", state 2058, "(1)"
+       line 422, "pan.___", state 2058, "(1)"
+       line 728, "pan.___", state 2061, "cached_urcu_active_readers.val[_pid] = (tmp+1)"
+       line 728, "pan.___", state 2062, "_proc_urcu_reader = (_proc_urcu_reader|(1<<23))"
+       line 728, "pan.___", state 2063, "(1)"
+       line 402, "pan.___", state 2070, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2102, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2116, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 2134, "(1)"
+       line 248, "pan.___", state 2154, "(1)"
+       line 252, "pan.___", state 2162, "(1)"
+       line 402, "pan.___", state 2187, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2219, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2233, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 2251, "(1)"
+       line 248, "pan.___", state 2271, "(1)"
+       line 252, "pan.___", state 2279, "(1)"
+       line 402, "pan.___", state 2298, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 2330, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 2344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 2362, "(1)"
+       line 248, "pan.___", state 2382, "(1)"
+       line 252, "pan.___", state 2390, "(1)"
+       line 240, "pan.___", state 2421, "(1)"
+       line 248, "pan.___", state 2441, "(1)"
+       line 252, "pan.___", state 2449, "(1)"
+       line 240, "pan.___", state 2464, "(1)"
+       line 248, "pan.___", state 2484, "(1)"
+       line 252, "pan.___", state 2492, "(1)"
+       line 888, "pan.___", state 2509, "-end-"
+       (246 of 2509 states)
+unreached in proctype urcu_writer
+       line 402, "pan.___", state 18, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 32, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 50, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 82, "(1)"
+       line 244, "pan.___", state 90, "(1)"
+       line 248, "pan.___", state 102, "(1)"
+       line 263, "pan.___", state 131, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 267, "pan.___", state 140, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 153, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 193, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 207, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 225, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 239, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 257, "(1)"
+       line 244, "pan.___", state 265, "(1)"
+       line 248, "pan.___", state 277, "(1)"
+       line 252, "pan.___", state 285, "(1)"
+       line 406, "pan.___", state 320, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 338, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 352, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 244, "pan.___", state 378, "(1)"
+       line 248, "pan.___", state 390, "(1)"
+       line 252, "pan.___", state 398, "(1)"
+       line 402, "pan.___", state 423, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 402, "pan.___", state 425, "(1)"
+       line 402, "pan.___", state 426, "((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid)))"
+       line 402, "pan.___", state 426, "else"
+       line 402, "pan.___", state 429, "(1)"
+       line 406, "pan.___", state 437, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 406, "pan.___", state 439, "(1)"
+       line 406, "pan.___", state 440, "((cache_dirty_urcu_active_readers.bitfield&(1<<_pid)))"
+       line 406, "pan.___", state 440, "else"
+       line 406, "pan.___", state 443, "(1)"
+       line 406, "pan.___", state 444, "(1)"
+       line 406, "pan.___", state 444, "(1)"
+       line 404, "pan.___", state 449, "((i<1))"
+       line 404, "pan.___", state 449, "((i>=1))"
+       line 411, "pan.___", state 455, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 457, "(1)"
+       line 411, "pan.___", state 458, "((cache_dirty_rcu_ptr.bitfield&(1<<_pid)))"
+       line 411, "pan.___", state 458, "else"
+       line 411, "pan.___", state 461, "(1)"
+       line 411, "pan.___", state 462, "(1)"
+       line 411, "pan.___", state 462, "(1)"
+       line 415, "pan.___", state 469, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 471, "(1)"
+       line 415, "pan.___", state 472, "((cache_dirty_rcu_data[i].bitfield&(1<<_pid)))"
+       line 415, "pan.___", state 472, "else"
+       line 415, "pan.___", state 475, "(1)"
+       line 415, "pan.___", state 476, "(1)"
+       line 415, "pan.___", state 476, "(1)"
+       line 413, "pan.___", state 481, "((i<2))"
+       line 413, "pan.___", state 481, "((i>=2))"
+       line 240, "pan.___", state 487, "(1)"
+       line 244, "pan.___", state 495, "(1)"
+       line 244, "pan.___", state 496, "(!((cache_dirty_urcu_active_readers.bitfield&(1<<_pid))))"
+       line 244, "pan.___", state 496, "else"
+       line 242, "pan.___", state 501, "((i<1))"
+       line 242, "pan.___", state 501, "((i>=1))"
+       line 248, "pan.___", state 507, "(1)"
+       line 248, "pan.___", state 508, "(!((cache_dirty_rcu_ptr.bitfield&(1<<_pid))))"
+       line 248, "pan.___", state 508, "else"
+       line 252, "pan.___", state 515, "(1)"
+       line 252, "pan.___", state 516, "(!((cache_dirty_rcu_data[i].bitfield&(1<<_pid))))"
+       line 252, "pan.___", state 516, "else"
+       line 257, "pan.___", state 525, "(!((cache_dirty_urcu_gp_ctr.bitfield&(1<<_pid))))"
+       line 257, "pan.___", state 525, "else"
+       line 422, "pan.___", state 527, "(1)"
+       line 422, "pan.___", state 527, "(1)"
+       line 406, "pan.___", state 547, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 565, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 579, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 244, "pan.___", state 605, "(1)"
+       line 248, "pan.___", state 617, "(1)"
+       line 252, "pan.___", state 625, "(1)"
+       line 406, "pan.___", state 658, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 676, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 690, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 244, "pan.___", state 716, "(1)"
+       line 248, "pan.___", state 728, "(1)"
+       line 252, "pan.___", state 736, "(1)"
+       line 406, "pan.___", state 771, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 789, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 803, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 244, "pan.___", state 829, "(1)"
+       line 248, "pan.___", state 841, "(1)"
+       line 252, "pan.___", state 849, "(1)"
+       line 406, "pan.___", state 887, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 411, "pan.___", state 905, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 415, "pan.___", state 919, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 244, "pan.___", state 945, "(1)"
+       line 248, "pan.___", state 957, "(1)"
+       line 252, "pan.___", state 965, "(1)"
+       line 263, "pan.___", state 1009, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 267, "pan.___", state 1018, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 1033, "(1)"
+       line 275, "pan.___", state 1040, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1056, "(1)"
+       line 244, "pan.___", state 1064, "(1)"
+       line 248, "pan.___", state 1076, "(1)"
+       line 252, "pan.___", state 1084, "(1)"
+       line 263, "pan.___", state 1115, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 267, "pan.___", state 1124, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 1137, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1146, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1162, "(1)"
+       line 244, "pan.___", state 1170, "(1)"
+       line 248, "pan.___", state 1182, "(1)"
+       line 252, "pan.___", state 1190, "(1)"
+       line 267, "pan.___", state 1216, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 1229, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1238, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1254, "(1)"
+       line 244, "pan.___", state 1262, "(1)"
+       line 248, "pan.___", state 1274, "(1)"
+       line 252, "pan.___", state 1282, "(1)"
+       line 263, "pan.___", state 1313, "cache_dirty_urcu_gp_ctr.bitfield = (cache_dirty_urcu_gp_ctr.bitfield&~((1<<_pid)))"
+       line 267, "pan.___", state 1322, "cache_dirty_urcu_active_readers.bitfield = (cache_dirty_urcu_active_readers.bitfield&~((1<<_pid)))"
+       line 271, "pan.___", state 1335, "cache_dirty_rcu_ptr.bitfield = (cache_dirty_rcu_ptr.bitfield&~((1<<_pid)))"
+       line 275, "pan.___", state 1344, "cache_dirty_rcu_data[i].bitfield = (cache_dirty_rcu_data[i].bitfield&~((1<<_pid)))"
+       line 240, "pan.___", state 1360, "(1)"
+       line 244, "pan.___", state 1368, "(1)"
+       line 248, "pan.___", state 1380, "(1)"
+       line 252, "pan.___", state 1388, "(1)"
+       line 1215, "pan.___", state 1404, "-end-"
+       (110 of 1404 states)
+unreached in proctype :init:
+       (0 of 78 states)
+unreached in proctype :never:
+       line 1280, "pan.___", state 11, "-end-"
+       (1 of 11 states)
+
+pan: elapsed time 14.8 seconds
+pan: rate 49695.746 states/second
+pan: avg transition delay 2.5365e-06 usec
+cp .input.spin urcu_progress_writer_error.spin.input
+cp .input.spin.trail urcu_progress_writer_error.spin.input.trail
+make[1]: Leaving directory `/home/compudj/doc/userspace-rcu/formal-model/urcu-controldataflow'
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input
new file mode 100644 (file)
index 0000000..7161faf
--- /dev/null
@@ -0,0 +1,1251 @@
+#define WRITER_PROGRESS
+#define GEN_ERROR_WRITER_PROGRESS
+
+// Poison value for freed memory
+#define POISON 1
+// Memory with correct data
+#define WINE 0
+#define SLAB_SIZE 2
+
+#define read_poison    (data_read_first[0] == POISON || data_read_second[0] == POISON)
+
+#define RCU_GP_CTR_BIT (1 << 7)
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
+
+//disabled
+#define REMOTE_BARRIERS
+
+//#define ARCH_ALPHA
+#define ARCH_INTEL
+//#define ARCH_POWERPC
+/*
+ * mem.spin: Promela code to validate memory barriers with OOO memory
+ * and out-of-order instruction scheduling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers
+ */
+
+/* Promela validation variables. */
+
+/* specific defines "included" here */
+/* DEFINES file "included" here */
+
+#define NR_READERS 1
+#define NR_WRITERS 1
+
+#define NR_PROCS 2
+
+#define get_pid()      (_pid)
+
+#define get_readerid() (get_pid())
+
+/*
+ * Produced process control and data flow. Updated after each instruction to
+ * show which variables are ready. Using one-hot bit encoding per variable to
+ * save state space. Used as triggers to execute the instructions having those
+ * variables as input. Leaving bits active to inhibit instruction execution.
+ * Scheme used to make instruction disabling and automatic dependency fall-back
+ * automatic.
+ */
+
+#define CONSUME_TOKENS(state, bits, notbits)                   \
+       ((!(state & (notbits))) && (state & (bits)) == (bits))
+
+#define PRODUCE_TOKENS(state, bits)                            \
+       state = state | (bits);
+
+#define CLEAR_TOKENS(state, bits)                              \
+       state = state & ~(bits)
+
+/*
+ * Types of dependency :
+ *
+ * Data dependency
+ *
+ * - True dependency, Read-after-Write (RAW)
+ *
+ * This type of dependency happens when a statement depends on the result of a
+ * previous statement. This applies to any statement which needs to read a
+ * variable written by a preceding statement.
+ *
+ * - False dependency, Write-after-Read (WAR)
+ *
+ * Typically, variable renaming can ensure that this dependency goes away.
+ * However, if the statements must read and then write from/to the same variable
+ * in the OOO memory model, renaming may be impossible, and therefore this
+ * causes a WAR dependency.
+ *
+ * - Output dependency, Write-after-Write (WAW)
+ *
+ * Two writes to the same variable in subsequent statements. Variable renaming
+ * can ensure this is not needed, but can be required when writing multiple
+ * times to the same OOO mem model variable.
+ *
+ * Control dependency
+ *
+ * Execution of a given instruction depends on a previous instruction evaluating
+ * in a way that allows its execution. E.g. : branches.
+ *
+ * Useful considerations for joining dependencies after branch
+ *
+ * - Pre-dominance
+ *
+ * "We say box i dominates box j if every path (leading from input to output
+ * through the diagram) which passes through box j must also pass through box
+ * i. Thus box i dominates box j if box j is subordinate to box i in the
+ * program."
+ *
+ * http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+ * Other classic algorithm to calculate dominance : Lengauer-Tarjan (in gcc)
+ *
+ * - Post-dominance
+ *
+ * Just as pre-dominance, but with arcs of the data flow inverted, and input vs
+ * output exchanged. Therefore, i post-dominating j ensures that every path
+ * passing by j will pass by i before reaching the output.
+ *
+ * Other considerations
+ *
+ * Note about "volatile" keyword dependency : The compiler will order volatile
+ * accesses so they appear in the right order on a given CPU. They can be
+ * reordered by the CPU instruction scheduling. This therefore cannot be
+ * considered as a depencency.
+ *
+ * References :
+ *
+ * Cooper, Keith D.; & Torczon, Linda. (2005). Engineering a Compiler. Morgan
+ * Kaufmann. ISBN 1-55860-698-X. 
+ * Kennedy, Ken; & Allen, Randy. (2001). Optimizing Compilers for Modern
+ * Architectures: A Dependence-based Approach. Morgan Kaufmann. ISBN
+ * 1-55860-286-0. 
+ * Muchnick, Steven S. (1997). Advanced Compiler Design and Implementation.
+ * Morgan Kaufmann. ISBN 1-55860-320-4.
+ */
+
+/*
+ * Note about loops and nested calls
+ *
+ * To keep this model simple, loops expressed in the framework will behave as if
+ * there was a core synchronizing instruction between loops. To see the effect
+ * of loop unrolling, manually unrolling loops is required. Note that if loops
+ * end or start with a core synchronizing instruction, the model is appropriate.
+ * Nested calls are not supported.
+ */
+
+/*
+ * Only Alpha has out-of-order cache bank loads. Other architectures (intel,
+ * powerpc, arm) ensure that dependent reads won't be reordered. c.f.
+ * http://www.linuxjournal.com/article/8212)
+#ifdef ARCH_ALPHA
+#define HAVE_OOO_CACHE_READ
+#endif
+
+/*
+ * Each process have its own data in cache. Caches are randomly updated.
+ * smp_wmb and smp_rmb forces cache updates (write and read), smp_mb forces
+ * both.
+ */
+
+typedef per_proc_byte {
+       byte val[NR_PROCS];
+};
+
+typedef per_proc_bit {
+       bit val[NR_PROCS];
+};
+
+/* Bitfield has a maximum of 8 procs */
+typedef per_proc_bitfield {
+       byte bitfield;
+};
+
+#define DECLARE_CACHED_VAR(type, x)    \
+       type mem_##x;                   \
+       per_proc_##type cached_##x;     \
+       per_proc_bitfield cache_dirty_##x;
+
+#define INIT_CACHED_VAR(x, v, j)       \
+       mem_##x = v;                    \
+       cache_dirty_##x.bitfield = 0;   \
+       j = 0;                          \
+       do                              \
+       :: j < NR_PROCS ->              \
+               cached_##x.val[j] = v;  \
+               j++                     \
+       :: j >= NR_PROCS -> break       \
+       od;
+
+#define IS_CACHE_DIRTY(x, id)  (cache_dirty_##x.bitfield & (1 << id))
+
+#define READ_CACHED_VAR(x)     (cached_##x.val[get_pid()])
+
+#define WRITE_CACHED_VAR(x, v)                         \
+       atomic {                                        \
+               cached_##x.val[get_pid()] = v;          \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield | (1 << get_pid());    \
+       }
+
+#define CACHE_WRITE_TO_MEM(x, id)                      \
+       if                                              \
+       :: IS_CACHE_DIRTY(x, id) ->                     \
+               mem_##x = cached_##x.val[id];           \
+               cache_dirty_##x.bitfield =              \
+                       cache_dirty_##x.bitfield & (~(1 << id));        \
+       :: else ->                                      \
+               skip                                    \
+       fi;
+
+#define CACHE_READ_FROM_MEM(x, id)     \
+       if                              \
+       :: !IS_CACHE_DIRTY(x, id) ->    \
+               cached_##x.val[id] = mem_##x;\
+       :: else ->                      \
+               skip                    \
+       fi;
+
+/*
+ * May update other caches if cache is dirty, or not.
+ */
+#define RANDOM_CACHE_WRITE_TO_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_WRITE_TO_MEM(x, id);      \
+       :: 1 -> skip                    \
+       fi;
+
+#define RANDOM_CACHE_READ_FROM_MEM(x, id)\
+       if                              \
+       :: 1 -> CACHE_READ_FROM_MEM(x, id);     \
+       :: 1 -> skip                    \
+       fi;
+
+/* Must consume all prior read tokens. All subsequent reads depend on it. */
+inline smp_rmb(i)
+{
+       atomic {
+               CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_READ_FROM_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Must consume all prior write tokens. All subsequent writes depend on it. */
+inline smp_wmb(i)
+{
+       atomic {
+               CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       CACHE_WRITE_TO_MEM(urcu_active_readers[i], get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+       }
+}
+
+/* Synchronization point. Must consume all prior read and write tokens. All
+ * subsequent reads and writes depend on it. */
+inline smp_mb(i)
+{
+       atomic {
+               smp_wmb(i);
+               smp_rmb(i);
+       }
+}
+
+#ifdef REMOTE_BARRIERS
+
+bit reader_barrier[NR_READERS];
+
+/*
+ * We cannot leave the barriers dependencies in place in REMOTE_BARRIERS mode
+ * because they would add unexisting core synchronization and would therefore
+ * create an incomplete model.
+ * Therefore, we model the read-side memory barriers by completely disabling the
+ * memory barriers and their dependencies from the read-side. One at a time
+ * (different verification runs), we make a different instruction listen for
+ * signals.
+ */
+
+#define smp_mb_reader(i, j)
+
+/*
+ * Service 0, 1 or many barrier requests.
+ */
+inline smp_mb_recv(i, j)
+{
+       do
+       :: (reader_barrier[get_readerid()] == 1) ->
+               /*
+                * We choose to ignore cycles caused by writer busy-looping,
+                * waiting for the reader, sending barrier requests, and the
+                * reader always services them without continuing execution.
+                */
+progress_ignoring_mb1:
+               smp_mb(i);
+               reader_barrier[get_readerid()] = 0;
+       :: 1 ->
+               /*
+                * We choose to ignore writer's non-progress caused by the
+                * reader ignoring the writer's mb() requests.
+                */
+progress_ignoring_mb2:
+               break;
+       od;
+}
+
+#define PROGRESS_LABEL(progressid)     progress_writer_progid_##progressid:
+
+#define smp_mb_send(i, j, progressid)                                          \
+{                                                                              \
+       smp_mb(i);                                                              \
+       i = 0;                                                                  \
+       do                                                                      \
+       :: i < NR_READERS ->                                                    \
+               reader_barrier[i] = 1;                                          \
+               /*                                                              \
+                * Busy-looping waiting for reader barrier handling is of little\
+                * interest, given the reader has the ability to totally ignore \
+                * barrier requests.                                            \
+                */                                                             \
+               do                                                              \
+               :: (reader_barrier[i] == 1) ->                                  \
+PROGRESS_LABEL(progressid)                                                     \
+                       skip;                                                   \
+               :: (reader_barrier[i] == 0) -> break;                           \
+               od;                                                             \
+               i++;                                                            \
+       :: i >= NR_READERS ->                                                   \
+               break                                                           \
+       od;                                                                     \
+       smp_mb(i);                                                              \
+}
+
+#else
+
+#define smp_mb_send(i, j, progressid)  smp_mb(i)
+#define smp_mb_reader  smp_mb(i)
+#define smp_mb_recv(i, j)
+
+#endif
+
+/* Keep in sync manually with smp_rmb, smp_wmb, ooo_mem and init() */
+DECLARE_CACHED_VAR(byte, urcu_gp_ctr);
+/* Note ! currently only one reader */
+DECLARE_CACHED_VAR(byte, urcu_active_readers[NR_READERS]);
+/* RCU data */
+DECLARE_CACHED_VAR(bit, rcu_data[SLAB_SIZE]);
+
+/* RCU pointer */
+#if (SLAB_SIZE == 2)
+DECLARE_CACHED_VAR(bit, rcu_ptr);
+bit ptr_read_first[NR_READERS];
+bit ptr_read_second[NR_READERS];
+#else
+DECLARE_CACHED_VAR(byte, rcu_ptr);
+byte ptr_read_first[NR_READERS];
+byte ptr_read_second[NR_READERS];
+#endif
+
+bit data_read_first[NR_READERS];
+bit data_read_second[NR_READERS];
+
+bit init_done = 0;
+
+inline wait_init_done()
+{
+       do
+       :: init_done == 0 -> skip;
+       :: else -> break;
+       od;
+}
+
+inline ooo_mem(i)
+{
+       atomic {
+               RANDOM_CACHE_WRITE_TO_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_WRITE_TO_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_WRITE_TO_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_WRITE_TO_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#ifdef HAVE_OOO_CACHE_READ
+               RANDOM_CACHE_READ_FROM_MEM(urcu_gp_ctr, get_pid());
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       RANDOM_CACHE_READ_FROM_MEM(urcu_active_readers[i],
+                               get_pid());
+                       i++
+               :: i >= NR_READERS -> break
+               od;
+               RANDOM_CACHE_READ_FROM_MEM(rcu_ptr, get_pid());
+               i = 0;
+               do
+               :: i < SLAB_SIZE ->
+                       RANDOM_CACHE_READ_FROM_MEM(rcu_data[i], get_pid());
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+#else
+               smp_rmb(i);
+#endif /* HAVE_OOO_CACHE_READ */
+       }
+}
+
+/*
+ * Bit encoding, urcu_reader :
+ */
+
+int _proc_urcu_reader;
+#define proc_urcu_reader       _proc_urcu_reader
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROD_A_READ               (1 << 0)
+#define READ_PROD_B_IF_TRUE            (1 << 1)
+#define READ_PROD_B_IF_FALSE           (1 << 2)
+#define READ_PROD_C_IF_TRUE_READ       (1 << 3)
+
+#define PROCEDURE_READ_LOCK(base, consumetoken, producetoken)                          \
+       :: CONSUME_TOKENS(proc_urcu_reader, consumetoken, READ_PROD_A_READ << base) ->  \
+               ooo_mem(i);                                                             \
+               tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);             \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_A_READ << base);             \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         READ_PROD_A_READ << base,             /* RAW, pre-dominant */ \
+                         (READ_PROD_B_IF_TRUE | READ_PROD_B_IF_FALSE) << base) ->      \
+               if                                                                      \
+               :: (!(tmp & RCU_GP_CTR_NEST_MASK)) ->                                   \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base);  \
+               :: else ->                                                              \
+                       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_B_IF_FALSE << base); \
+               fi;                                                                     \
+       /* IF TRUE */                                                                   \
+       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_B_IF_TRUE << base,                \
+                         READ_PROD_C_IF_TRUE_READ << base) ->                          \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_gp_ctr);                                    \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_C_IF_TRUE_READ << base);     \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_C_IF_TRUE_READ     /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2);            \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ELSE */                                                                      \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         (READ_PROD_B_IF_FALSE         /* pre-dominant */              \
+                         | READ_PROD_A_READ) << base,          /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()],                   \
+                                tmp + 1);                                              \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+                                                       /* IF_MERGE implies             \
+                                                        * post-dominance */            \
+       /* ENDIF */                                                                     \
+       skip
+
+/* Body of PROCEDURE_READ_LOCK */
+#define READ_PROC_READ_UNLOCK          (1 << 0)
+
+#define PROCEDURE_READ_UNLOCK(base, consumetoken, producetoken)                                \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken,                                                 \
+                         READ_PROC_READ_UNLOCK << base) ->                             \
+               ooo_mem(i);                                                             \
+               tmp2 = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);            \
+               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_UNLOCK << base);        \
+       :: CONSUME_TOKENS(proc_urcu_reader,                                             \
+                         consumetoken                                                  \
+                         | (READ_PROC_READ_UNLOCK << base),    /* WAR */               \
+                         producetoken) ->                                              \
+               ooo_mem(i);                                                             \
+               WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1);        \
+               PRODUCE_TOKENS(proc_urcu_reader, producetoken);                         \
+       skip
+
+
+#define READ_PROD_NONE                 (1 << 0)
+
+/* PROCEDURE_READ_LOCK base = << 1 : 1 to 5 */
+#define READ_LOCK_BASE                 1
+#define READ_LOCK_OUT                  (1 << 5)
+
+#define READ_PROC_FIRST_MB             (1 << 6)
+
+/* PROCEDURE_READ_LOCK (NESTED) base : << 7 : 7 to 11 */
+#define READ_LOCK_NESTED_BASE          7
+#define READ_LOCK_NESTED_OUT           (1 << 11)
+
+#define READ_PROC_READ_GEN             (1 << 12)
+#define READ_PROC_ACCESS_GEN           (1 << 13)
+
+/* PROCEDURE_READ_UNLOCK (NESTED) base = << 14 : 14 to 15 */
+#define READ_UNLOCK_NESTED_BASE                14
+#define READ_UNLOCK_NESTED_OUT         (1 << 15)
+
+#define READ_PROC_SECOND_MB            (1 << 16)
+
+/* PROCEDURE_READ_UNLOCK base = << 17 : 17 to 18 */
+#define READ_UNLOCK_BASE               17
+#define READ_UNLOCK_OUT                        (1 << 18)
+
+/* PROCEDURE_READ_LOCK_UNROLL base = << 19 : 19 to 23 */
+#define READ_LOCK_UNROLL_BASE          19
+#define READ_LOCK_OUT_UNROLL           (1 << 23)
+
+#define READ_PROC_THIRD_MB             (1 << 24)
+
+#define READ_PROC_READ_GEN_UNROLL      (1 << 25)
+#define READ_PROC_ACCESS_GEN_UNROLL    (1 << 26)
+
+#define READ_PROC_FOURTH_MB            (1 << 27)
+
+/* PROCEDURE_READ_UNLOCK_UNROLL base = << 28 : 28 to 29 */
+#define READ_UNLOCK_UNROLL_BASE                28
+#define READ_UNLOCK_OUT_UNROLL         (1 << 29)
+
+
+/* Should not include branches */
+#define READ_PROC_ALL_TOKENS           (READ_PROD_NONE                 \
+                                       | READ_LOCK_OUT                 \
+                                       | READ_PROC_FIRST_MB            \
+                                       | READ_LOCK_NESTED_OUT          \
+                                       | READ_PROC_READ_GEN            \
+                                       | READ_PROC_ACCESS_GEN          \
+                                       | READ_UNLOCK_NESTED_OUT        \
+                                       | READ_PROC_SECOND_MB           \
+                                       | READ_UNLOCK_OUT               \
+                                       | READ_LOCK_OUT_UNROLL          \
+                                       | READ_PROC_THIRD_MB            \
+                                       | READ_PROC_READ_GEN_UNROLL     \
+                                       | READ_PROC_ACCESS_GEN_UNROLL   \
+                                       | READ_PROC_FOURTH_MB           \
+                                       | READ_UNLOCK_OUT_UNROLL)
+
+/* Must clear all tokens, including branches */
+#define READ_PROC_ALL_TOKENS_CLEAR     ((1 << 30) - 1)
+
+inline urcu_one_read(i, j, nest_i, tmp, tmp2)
+{
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROD_NONE);
+
+#ifdef NO_MB
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+#ifdef REMOTE_BARRIERS
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+       PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+#endif
+
+       do
+       :: 1 ->
+
+#ifdef REMOTE_BARRIERS
+               /*
+                * Signal-based memory barrier will only execute when the
+                * execution order appears in program order.
+                */
+               if
+               :: 1 ->
+                       atomic {
+                               if
+                               :: CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE,
+                                               READ_LOCK_OUT | READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT,
+                                               READ_LOCK_NESTED_OUT
+                                               | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT | READ_LOCK_NESTED_OUT,
+                                               READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN,
+                                               READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN,
+                                               READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT,
+                                               READ_UNLOCK_OUT
+                                               | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT,
+                                               READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL,
+                                               READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL,
+                                               READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN
+                                               | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL,
+                                               READ_UNLOCK_OUT_UNROLL)
+                                       || CONSUME_TOKENS(proc_urcu_reader, READ_PROD_NONE | READ_LOCK_OUT
+                                               | READ_LOCK_NESTED_OUT | READ_PROC_READ_GEN | READ_PROC_ACCESS_GEN | READ_UNLOCK_NESTED_OUT
+                                               | READ_UNLOCK_OUT | READ_LOCK_OUT_UNROLL
+                                               | READ_PROC_READ_GEN_UNROLL | READ_PROC_ACCESS_GEN_UNROLL | READ_UNLOCK_OUT_UNROLL,
+                                               0) ->
+                                       goto non_atomic3;
+non_atomic3_end:
+                                       skip;
+                               fi;
+                       }
+               fi;
+
+               goto non_atomic3_skip;
+non_atomic3:
+               smp_mb_recv(i, j);
+               goto non_atomic3_end;
+non_atomic3_skip:
+
+#endif /* REMOTE_BARRIERS */
+
+               atomic {
+                       if
+                       PROCEDURE_READ_LOCK(READ_LOCK_BASE, READ_PROD_NONE, READ_LOCK_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_LOCK_OUT,                /* post-dominant */
+                                         READ_PROC_FIRST_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FIRST_MB);
+
+                       PROCEDURE_READ_LOCK(READ_LOCK_NESTED_BASE, READ_PROC_FIRST_MB | READ_LOCK_OUT,
+                                           READ_LOCK_NESTED_OUT);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB,           /* mb() orders reads */
+                                         READ_PROC_READ_GEN) ->
+                               ooo_mem(i);
+                               ptr_read_first[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_READ_GEN,
+                                         READ_PROC_ACCESS_GEN) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb1;
+rmb1_end:
+                               data_read_first[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_first[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN);
+
+
+                       /* Note : we remove the nested memory barrier from the read unlock
+                        * model, given it is not usually needed. The implementation has the barrier
+                        * because the performance impact added by a branch in the common case does not
+                        * justify it.
+                        */
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_NESTED_BASE,
+                                             READ_PROC_FIRST_MB
+                                             | READ_LOCK_OUT
+                                             | READ_LOCK_NESTED_OUT,
+                                             READ_UNLOCK_NESTED_OUT);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_ACCESS_GEN          /* mb() orders reads */
+                                         | READ_PROC_READ_GEN          /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT               /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT        /* post-dominant */
+                                         | READ_UNLOCK_NESTED_OUT,
+                                         READ_PROC_SECOND_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_SECOND_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_BASE,
+                                             READ_PROC_SECOND_MB       /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT);
+
+                       /* Unrolling loop : second consecutive lock */
+                       /* reading urcu_active_readers, which have been written by
+                        * READ_UNLOCK_OUT : RAW */
+                       PROCEDURE_READ_LOCK(READ_LOCK_UNROLL_BASE,
+                                           READ_UNLOCK_OUT             /* RAW */
+                                           | READ_PROC_SECOND_MB       /* mb() orders reads */
+                                           | READ_PROC_FIRST_MB        /* mb() orders reads */
+                                           | READ_LOCK_NESTED_OUT      /* RAW */
+                                           | READ_LOCK_OUT             /* RAW */
+                                           | READ_UNLOCK_NESTED_OUT,   /* RAW */
+                                           READ_LOCK_OUT_UNROLL);
+
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_THIRD_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_THIRD_MB);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_FIRST_MB            /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_READ_GEN_UNROLL) ->
+                               ooo_mem(i);
+                               ptr_read_second[get_readerid()] = READ_CACHED_VAR(rcu_ptr);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_READ_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL
+                                         | READ_PROC_FIRST_MB          /* mb() orders reads */
+                                         | READ_PROC_SECOND_MB         /* mb() orders reads */
+                                         | READ_PROC_THIRD_MB,         /* mb() orders reads */
+                                         READ_PROC_ACCESS_GEN_UNROLL) ->
+                               /* smp_read_barrier_depends */
+                               goto rmb2;
+rmb2_end:
+                               data_read_second[get_readerid()] =
+                                       READ_CACHED_VAR(rcu_data[ptr_read_second[get_readerid()]]);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_ACCESS_GEN_UNROLL);
+
+                       :: CONSUME_TOKENS(proc_urcu_reader,
+                                         READ_PROC_READ_GEN_UNROLL     /* mb() orders reads */
+                                         | READ_PROC_ACCESS_GEN_UNROLL /* mb() orders reads */
+                                         | READ_PROC_FIRST_MB          /* mb() ordered */
+                                         | READ_PROC_SECOND_MB         /* mb() ordered */
+                                         | READ_PROC_THIRD_MB          /* mb() ordered */
+                                         | READ_LOCK_OUT_UNROLL        /* post-dominant */
+                                         | READ_LOCK_NESTED_OUT
+                                         | READ_LOCK_OUT
+                                         | READ_UNLOCK_NESTED_OUT
+                                         | READ_UNLOCK_OUT,
+                                         READ_PROC_FOURTH_MB) ->
+                               smp_mb_reader(i, j);
+                               PRODUCE_TOKENS(proc_urcu_reader, READ_PROC_FOURTH_MB);
+
+                       PROCEDURE_READ_UNLOCK(READ_UNLOCK_UNROLL_BASE,
+                                             READ_PROC_FOURTH_MB       /* mb() orders reads */
+                                             | READ_PROC_THIRD_MB      /* mb() orders reads */
+                                             | READ_LOCK_OUT_UNROLL    /* RAW */
+                                             | READ_PROC_SECOND_MB     /* mb() orders reads */
+                                             | READ_PROC_FIRST_MB      /* mb() orders reads */
+                                             | READ_LOCK_NESTED_OUT    /* RAW */
+                                             | READ_LOCK_OUT           /* RAW */
+                                             | READ_UNLOCK_NESTED_OUT, /* RAW */
+                                             READ_UNLOCK_OUT_UNROLL);
+                       :: CONSUME_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS, 0) ->
+                               CLEAR_TOKENS(proc_urcu_reader, READ_PROC_ALL_TOKENS_CLEAR);
+                               break;
+                       fi;
+               }
+       od;
+       /*
+        * Dependency between consecutive loops :
+        * RAW dependency on 
+        * WRITE_CACHED_VAR(urcu_active_readers[get_readerid()], tmp2 - 1)
+        * tmp = READ_CACHED_VAR(urcu_active_readers[get_readerid()]);
+        * between loops.
+        * _WHEN THE MB()s are in place_, they add full ordering of the
+        * generation pointer read wrt active reader count read, which ensures
+        * execution will not spill across loop execution.
+        * However, in the event mb()s are removed (execution using signal
+        * handler to promote barrier()() -> smp_mb()), nothing prevents one loop
+        * to spill its execution on other loop's execution.
+        */
+       goto end;
+rmb1:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb1_end;
+rmb2:
+#ifndef NO_RMB
+       smp_rmb(i);
+#else
+       ooo_mem(i);
+#endif
+       goto rmb2_end;
+end:
+       skip;
+}
+
+
+
+active proctype urcu_reader()
+{
+       byte i, j, nest_i;
+       byte tmp, tmp2;
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+end_reader:
+       do
+       :: 1 ->
+               /*
+                * We do not test reader's progress here, because we are mainly
+                * interested in writer's progress. The reader never blocks
+                * anyway. We have to test for reader/writer's progress
+                * separately, otherwise we could think the writer is doing
+                * progress when it's blocked by an always progressing reader.
+                */
+#ifdef READER_PROGRESS
+progress_reader:
+#endif
+               urcu_one_read(i, j, nest_i, tmp, tmp2);
+       od;
+}
+
+/* no name clash please */
+#undef proc_urcu_reader
+
+
+/* Model the RCU update process. */
+
+/*
+ * Bit encoding, urcu_writer :
+ * Currently only supports one reader.
+ */
+
+int _proc_urcu_writer;
+#define proc_urcu_writer       _proc_urcu_writer
+
+#define WRITE_PROD_NONE                        (1 << 0)
+
+#define WRITE_DATA                     (1 << 1)
+#define WRITE_PROC_WMB                 (1 << 2)
+#define WRITE_XCHG_PTR                 (1 << 3)
+
+#define WRITE_PROC_FIRST_MB            (1 << 4)
+
+/* first flip */
+#define WRITE_PROC_FIRST_READ_GP       (1 << 5)
+#define WRITE_PROC_FIRST_WRITE_GP      (1 << 6)
+#define WRITE_PROC_FIRST_WAIT          (1 << 7)
+#define WRITE_PROC_FIRST_WAIT_LOOP     (1 << 8)
+
+/* second flip */
+#define WRITE_PROC_SECOND_READ_GP      (1 << 9)
+#define WRITE_PROC_SECOND_WRITE_GP     (1 << 10)
+#define WRITE_PROC_SECOND_WAIT         (1 << 11)
+#define WRITE_PROC_SECOND_WAIT_LOOP    (1 << 12)
+
+#define WRITE_PROC_SECOND_MB           (1 << 13)
+
+#define WRITE_FREE                     (1 << 14)
+
+#define WRITE_PROC_ALL_TOKENS          (WRITE_PROD_NONE                \
+                                       | WRITE_DATA                    \
+                                       | WRITE_PROC_WMB                \
+                                       | WRITE_XCHG_PTR                \
+                                       | WRITE_PROC_FIRST_MB           \
+                                       | WRITE_PROC_FIRST_READ_GP      \
+                                       | WRITE_PROC_FIRST_WRITE_GP     \
+                                       | WRITE_PROC_FIRST_WAIT         \
+                                       | WRITE_PROC_SECOND_READ_GP     \
+                                       | WRITE_PROC_SECOND_WRITE_GP    \
+                                       | WRITE_PROC_SECOND_WAIT        \
+                                       | WRITE_PROC_SECOND_MB          \
+                                       | WRITE_FREE)
+
+#define WRITE_PROC_ALL_TOKENS_CLEAR    ((1 << 15) - 1)
+
+/*
+ * Mutexes are implied around writer execution. A single writer at a time.
+ */
+active proctype urcu_writer()
+{
+       byte i, j;
+       byte tmp, tmp2, tmpa;
+       byte cur_data = 0, old_data, loop_nr = 0;
+       byte cur_gp_val = 0;    /*
+                                * Keep a local trace of the current parity so
+                                * we don't add non-existing dependencies on the global
+                                * GP update. Needed to test single flip case.
+                                */
+
+       wait_init_done();
+
+       assert(get_pid() < NR_PROCS);
+
+       do
+       :: (loop_nr < 3) ->
+#ifdef WRITER_PROGRESS
+progress_writer1:
+#endif
+               loop_nr = loop_nr + 1;
+
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROD_NONE);
+
+#ifdef NO_WMB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+#endif
+
+#ifdef NO_MB
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+#endif
+
+#ifdef SINGLE_FLIP
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+               /* For single flip, we need to know the current parity */
+               cur_gp_val = cur_gp_val ^ RCU_GP_CTR_BIT;
+#endif
+
+               do :: 1 ->
+               atomic {
+               if
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROD_NONE,
+                                 WRITE_DATA) ->
+                       ooo_mem(i);
+                       cur_data = (cur_data + 1) % SLAB_SIZE;
+                       WRITE_CACHED_VAR(rcu_data[cur_data], WINE);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_DATA);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA,
+                                 WRITE_PROC_WMB) ->
+                       smp_wmb(i);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_WMB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_WMB,
+                                 WRITE_XCHG_PTR) ->
+                       /* rcu_xchg_pointer() */
+                       atomic {
+                               old_data = READ_CACHED_VAR(rcu_ptr);
+                               WRITE_CACHED_VAR(rcu_ptr, cur_data);
+                       }
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_XCHG_PTR);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR,
+                                 WRITE_PROC_FIRST_MB) ->
+                       goto smp_mb_send1;
+smp_mb_send1_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_MB);
+
+               /* first flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_FIRST_READ_GP) ->
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP,
+                                 WRITE_PROC_FIRST_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_MB,  /* can be reordered before/after flips */
+                                 WRITE_PROC_FIRST_WAIT | WRITE_PROC_FIRST_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+#ifndef SINGLE_FLIP
+                       /* In normal execution, we are always starting by
+                        * waiting for the even parity.
+                        */
+                       cur_gp_val = RCU_GP_CTR_BIT;
+#endif
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ cur_gp_val) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send2;
+smp_mb_send2_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_FIRST_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_FIRST_WAIT_LOOP | WRITE_PROC_FIRST_WAIT);
+
+               /* second flip */
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT         /* Control dependency : need to branch out of
+                                                                * the loop to execute the next flip (CHECK) */
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_READ_GP) ->
+                       ooo_mem(i);
+                       tmpa = READ_CACHED_VAR(urcu_gp_ctr);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_READ_GP);
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_MB
+                                 | WRITE_PROC_WMB
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP,
+                                 WRITE_PROC_SECOND_WRITE_GP) ->
+                       ooo_mem(i);
+                       WRITE_CACHED_VAR(urcu_gp_ctr, tmpa ^ RCU_GP_CTR_BIT);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WRITE_GP);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 WRITE_PROC_SECOND_WAIT | WRITE_PROC_SECOND_WAIT_LOOP) ->
+                       ooo_mem(i);
+                       /* ONLY WAITING FOR READER 0 */
+                       tmp2 = READ_CACHED_VAR(urcu_active_readers[0]);
+                       if
+                       :: (tmp2 & RCU_GP_CTR_NEST_MASK)
+                                       && ((tmp2 ^ 0) & RCU_GP_CTR_BIT) ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP);
+                       :: else ->
+                               PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT);
+                       fi;
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 //WRITE_PROC_FIRST_WRITE_GP   /* TEST ADDING SYNC CORE */
+                                 WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_WAIT_LOOP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,        /* can be reordered before/after flips */
+                                 0) ->
+#ifndef GEN_ERROR_WRITER_PROGRESS
+                       goto smp_mb_send3;
+smp_mb_send3_end:
+#else
+                       ooo_mem(i);
+#endif
+                       /* This instruction loops to WRITE_PROC_SECOND_WAIT */
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_WAIT_LOOP | WRITE_PROC_SECOND_WAIT);
+
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_FIRST_READ_GP
+                                 | WRITE_PROC_SECOND_READ_GP
+                                 | WRITE_PROC_FIRST_WRITE_GP
+                                 | WRITE_PROC_SECOND_WRITE_GP
+                                 | WRITE_DATA | WRITE_PROC_WMB | WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_PROC_SECOND_MB) ->
+                       goto smp_mb_send4;
+smp_mb_send4_end:
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_PROC_SECOND_MB);
+
+               :: CONSUME_TOKENS(proc_urcu_writer,
+                                 WRITE_XCHG_PTR
+                                 | WRITE_PROC_FIRST_WAIT
+                                 | WRITE_PROC_SECOND_WAIT
+                                 | WRITE_PROC_WMB      /* No dependency on
+                                                        * WRITE_DATA because we
+                                                        * write to a
+                                                        * different location. */
+                                 | WRITE_PROC_SECOND_MB
+                                 | WRITE_PROC_FIRST_MB,
+                                 WRITE_FREE) ->
+                       WRITE_CACHED_VAR(rcu_data[old_data], POISON);
+                       PRODUCE_TOKENS(proc_urcu_writer, WRITE_FREE);
+
+               :: CONSUME_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS, 0) ->
+                       CLEAR_TOKENS(proc_urcu_writer, WRITE_PROC_ALL_TOKENS_CLEAR);
+                       break;
+               fi;
+               }
+               od;
+               /*
+                * Note : Promela model adds implicit serialization of the
+                * WRITE_FREE instruction. Normally, it would be permitted to
+                * spill on the next loop execution. Given the validation we do
+                * checks for the data entry read to be poisoned, it's ok if
+                * we do not check "late arriving" memory poisoning.
+                */
+       :: else -> break;
+       od;
+       /*
+        * Given the reader loops infinitely, let the writer also busy-loop
+        * with progress here so, with weak fairness, we can test the
+        * writer's progress.
+        */
+end_writer:
+       do
+       :: 1 ->
+#ifdef WRITER_PROGRESS
+progress_writer2:
+#endif
+#ifdef READER_PROGRESS
+               /*
+                * Make sure we don't block the reader's progress.
+                */
+               smp_mb_send(i, j, 5);
+#endif
+               skip;
+       od;
+
+       /* Non-atomic parts of the loop */
+       goto end;
+smp_mb_send1:
+       smp_mb_send(i, j, 1);
+       goto smp_mb_send1_end;
+#ifndef GEN_ERROR_WRITER_PROGRESS
+smp_mb_send2:
+       smp_mb_send(i, j, 2);
+       goto smp_mb_send2_end;
+smp_mb_send3:
+       smp_mb_send(i, j, 3);
+       goto smp_mb_send3_end;
+#endif
+smp_mb_send4:
+       smp_mb_send(i, j, 4);
+       goto smp_mb_send4_end;
+end:
+       skip;
+}
+
+/* no name clash please */
+#undef proc_urcu_writer
+
+
+/* Leave after the readers and writers so the pid count is ok. */
+init {
+       byte i, j;
+
+       atomic {
+               INIT_CACHED_VAR(urcu_gp_ctr, 1, j);
+               INIT_CACHED_VAR(rcu_ptr, 0, j);
+
+               i = 0;
+               do
+               :: i < NR_READERS ->
+                       INIT_CACHED_VAR(urcu_active_readers[i], 0, j);
+                       ptr_read_first[i] = 1;
+                       ptr_read_second[i] = 1;
+                       data_read_first[i] = WINE;
+                       data_read_second[i] = WINE;
+                       i++;
+               :: i >= NR_READERS -> break
+               od;
+               INIT_CACHED_VAR(rcu_data[0], WINE, j);
+               i = 1;
+               do
+               :: i < SLAB_SIZE ->
+                       INIT_CACHED_VAR(rcu_data[i], POISON, j);
+                       i++
+               :: i >= SLAB_SIZE -> break
+               od;
+
+               init_done = 1;
+       }
+}
diff --git a/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input.trail b/formal-model/results/urcu-controldataflow-ipi-intel/urcu_progress_writer_error.spin.input.trail
new file mode 100644 (file)
index 0000000..ee9e4b8
--- /dev/null
@@ -0,0 +1,1700 @@
+-2:3:-2
+-4:-4:-4
+1:0:3993
+2:3:3913
+3:3:3916
+4:3:3916
+5:3:3919
+6:3:3927
+7:3:3927
+8:3:3930
+9:3:3936
+10:3:3940
+11:3:3940
+12:3:3943
+13:3:3953
+14:3:3961
+15:3:3961
+16:3:3964
+17:3:3970
+18:3:3974
+19:3:3974
+20:3:3977
+21:3:3983
+22:3:3987
+23:3:3988
+24:0:3993
+25:3:3990
+26:0:3993
+27:2:2511
+28:0:3993
+29:2:2517
+30:0:3993
+31:2:2518
+32:0:3993
+33:2:2519
+34:0:3993
+35:2:2520
+36:0:3993
+37:2:2521
+38:0:3993
+39:2:2522
+40:2:2523
+41:2:2527
+42:2:2528
+43:2:2536
+44:2:2537
+45:2:2541
+46:2:2542
+47:2:2550
+48:2:2555
+49:2:2559
+50:2:2560
+51:2:2568
+52:2:2569
+53:2:2573
+54:2:2574
+55:2:2568
+56:2:2569
+57:2:2573
+58:2:2574
+59:2:2582
+60:2:2587
+61:2:2594
+62:2:2595
+63:2:2602
+64:2:2607
+65:2:2614
+66:2:2615
+67:2:2614
+68:2:2615
+69:2:2622
+70:2:2632
+71:0:3993
+72:2:2521
+73:0:3993
+74:2:2636
+75:2:2640
+76:2:2641
+77:2:2645
+78:2:2649
+79:2:2650
+80:2:2654
+81:2:2662
+82:2:2663
+83:2:2667
+84:2:2671
+85:2:2672
+86:2:2667
+87:2:2668
+88:2:2676
+89:0:3993
+90:2:2521
+91:0:3993
+92:2:2684
+93:2:2685
+94:2:2686
+95:0:3993
+96:2:2521
+97:0:3993
+98:2:2691
+99:0:3993
+100:2:3518
+101:2:3519
+102:2:3523
+103:2:3527
+104:2:3528
+105:2:3532
+106:2:3537
+107:2:3545
+108:2:3549
+109:2:3550
+110:2:3545
+111:2:3549
+112:2:3550
+113:2:3554
+114:2:3561
+115:2:3568
+116:2:3569
+117:2:3576
+118:2:3581
+119:2:3588
+120:2:3589
+121:2:3588
+122:2:3589
+123:2:3596
+124:2:3600
+125:0:3993
+126:2:3605
+127:0:3993
+128:2:3606
+129:0:3993
+130:2:3607
+131:0:3993
+132:2:3608
+133:0:3993
+134:1:2
+135:0:3993
+136:1:8
+137:0:3993
+138:1:9
+139:0:3993
+140:2:3609
+141:0:3993
+142:1:10
+143:0:3993
+144:2:3608
+145:0:3993
+146:1:11
+147:0:3993
+148:2:3609
+149:0:3993
+150:1:12
+151:0:3993
+152:2:3608
+153:0:3993
+154:1:13
+155:0:3993
+156:2:3609
+157:0:3993
+158:1:14
+159:0:3993
+160:1:15
+161:0:3993
+162:1:16
+163:0:3993
+164:2:3608
+165:0:3993
+166:1:17
+167:0:3993
+168:2:3609
+169:0:3993
+170:1:26
+171:0:3993
+172:2:3608
+173:0:3993
+174:1:30
+175:1:31
+176:1:35
+177:1:39
+178:1:40
+179:1:44
+180:1:52
+181:1:53
+182:1:57
+183:1:61
+184:1:62
+185:1:57
+186:1:61
+187:1:62
+188:1:66
+189:1:73
+190:1:80
+191:1:81
+192:1:88
+193:1:93
+194:1:100
+195:1:101
+196:1:100
+197:1:101
+198:1:108
+199:1:112
+200:0:3993
+201:2:3609
+202:0:3993
+203:1:117
+204:0:3993
+205:2:3610
+206:0:3993
+207:2:3615
+208:0:3993
+209:2:3616
+210:0:3993
+211:2:3624
+212:2:3625
+213:2:3629
+214:2:3633
+215:2:3634
+216:2:3638
+217:2:3646
+218:2:3647
+219:2:3651
+220:2:3655
+221:2:3656
+222:2:3651
+223:2:3655
+224:2:3656
+225:2:3660
+226:2:3667
+227:2:3674
+228:2:3675
+229:2:3682
+230:2:3687
+231:2:3694
+232:2:3695
+233:2:3694
+234:2:3695
+235:2:3702
+236:2:3706
+237:0:3993
+238:2:2693
+239:2:3499
+240:0:3993
+241:2:2521
+242:0:3993
+243:2:2694
+244:0:3993
+245:2:2521
+246:0:3993
+247:2:2697
+248:2:2698
+249:2:2702
+250:2:2703
+251:2:2711
+252:2:2712
+253:2:2716
+254:2:2717
+255:2:2725
+256:2:2730
+257:2:2734
+258:2:2735
+259:2:2743
+260:2:2744
+261:2:2748
+262:2:2749
+263:2:2743
+264:2:2744
+265:2:2748
+266:2:2749
+267:2:2757
+268:2:2762
+269:2:2769
+270:2:2770
+271:2:2777
+272:2:2782
+273:2:2789
+274:2:2790
+275:2:2789
+276:2:2790
+277:2:2797
+278:2:2806
+279:0:3993
+280:2:2521
+281:0:3993
+282:2:2810
+283:2:2811
+284:2:2812
+285:2:2824
+286:2:2825
+287:2:2829
+288:2:2830
+289:2:2838
+290:2:2843
+291:2:2847
+292:2:2848
+293:2:2856
+294:2:2857
+295:2:2861
+296:2:2862
+297:2:2856
+298:2:2857
+299:2:2861
+300:2:2862
+301:2:2870
+302:2:2875
+303:2:2882
+304:2:2883
+305:2:2890
+306:2:2895
+307:2:2902
+308:2:2903
+309:2:2902
+310:2:2903
+311:2:2910
+312:2:2923
+313:2:2924
+314:0:3993
+315:2:2521
+316:0:3993
+317:2:3037
+318:2:3038
+319:2:3042
+320:2:3043
+321:2:3051
+322:2:3052
+323:2:3056
+324:2:3057
+325:2:3065
+326:2:3070
+327:2:3074
+328:2:3075
+329:2:3083
+330:2:3084
+331:2:3088
+332:2:3089
+333:2:3083
+334:2:3084
+335:2:3088
+336:2:3089
+337:2:3097
+338:2:3102
+339:2:3109
+340:2:3110
+341:2:3117
+342:2:3122
+343:2:3129
+344:2:3130
+345:2:3129
+346:2:3130
+347:2:3137
+348:0:3993
+349:2:2521
+350:0:3993
+351:2:3148
+352:2:3149
+353:2:3153
+354:2:3154
+355:2:3162
+356:2:3163
+357:2:3167
+358:2:3168
+359:2:3176
+360:2:3181
+361:2:3185
+362:2:3186
+363:2:3194
+364:2:3195
+365:2:3199
+366:2:3200
+367:2:3194
+368:2:3195
+369:2:3199
+370:2:3200
+371:2:3208
+372:2:3213
+373:2:3220
+374:2:3221
+375:2:3228
+376:2:3233
+377:2:3240
+378:2:3241
+379:2:3240
+380:2:3241
+381:2:3248
+382:2:3257
+383:0:3993
+384:2:2521
+385:0:3993
+386:2:3261
+387:2:3262
+388:2:3263
+389:2:3275
+390:2:3276
+391:2:3280
+392:2:3281
+393:2:3289
+394:2:3294
+395:2:3298
+396:2:3299
+397:2:3307
+398:2:3308
+399:2:3312
+400:2:3313
+401:2:3307
+402:2:3308
+403:2:3312
+404:2:3313
+405:2:3321
+406:2:3326
+407:2:3333
+408:2:3334
+409:2:3341
+410:2:3346
+411:2:3353
+412:2:3354
+413:2:3353
+414:2:3354
+415:2:3361
+416:2:3373
+417:2:3374
+418:0:3993
+419:2:2521
+420:0:3993
+421:2:3487
+422:0:3993
+423:2:3716
+424:2:3717
+425:2:3721
+426:2:3725
+427:2:3726
+428:2:3730
+429:2:3738
+430:2:3739
+431:2:3743
+432:2:3747
+433:2:3748
+434:2:3743
+435:2:3747
+436:2:3748
+437:2:3752
+438:2:3759
+439:2:3766
+440:2:3767
+441:2:3774
+442:2:3779
+443:2:3786
+444:2:3787
+445:2:3786
+446:2:3787
+447:2:3794
+448:2:3798
+449:0:3993
+450:2:3803
+451:0:3993
+452:2:3804
+453:0:3993
+454:2:3805
+455:0:3993
+456:2:3806
+457:0:3993
+458:1:26
+459:0:3993
+460:2:3807
+461:0:3993
+462:1:30
+463:1:31
+464:1:35
+465:1:39
+466:1:40
+467:1:44
+468:1:52
+469:1:53
+470:1:57
+471:1:61
+472:1:62
+473:1:57
+474:1:61
+475:1:62
+476:1:66
+477:1:73
+478:1:80
+479:1:81
+480:1:88
+481:1:93
+482:1:100
+483:1:101
+484:1:100
+485:1:101
+486:1:108
+487:1:112
+488:0:3993
+489:2:3806
+490:0:3993
+491:1:117
+492:0:3993
+493:2:3807
+494:0:3993
+495:2:3808
+496:0:3993
+497:2:3813
+498:0:3993
+499:2:3814
+500:0:3993
+501:2:3822
+502:2:3823
+503:2:3827
+504:2:3831
+505:2:3832
+506:2:3836
+507:2:3844
+508:2:3845
+509:2:3849
+510:2:3853
+511:2:3854
+512:2:3849
+513:2:3853
+514:2:3854
+515:2:3858
+516:2:3865
+517:2:3872
+518:2:3873
+519:2:3880
+520:2:3885
+521:2:3892
+522:2:3893
+523:2:3892
+524:2:3893
+525:2:3900
+526:2:3904
+527:0:3993
+528:2:3489
+529:2:3499
+530:0:3993
+531:2:2521
+532:0:3993
+533:2:3490
+534:2:3491
+535:0:3993
+536:2:2521
+537:0:3993
+538:2:3495
+539:0:3993
+540:2:3503
+541:0:3993
+542:2:2518
+543:0:3993
+544:2:2519
+545:0:3993
+546:2:2520
+547:0:3993
+548:2:2521
+549:0:3993
+550:2:2522
+551:2:2523
+552:2:2527
+553:2:2528
+554:2:2536
+555:2:2537
+556:2:2541
+557:2:2542
+558:2:2550
+559:2:2555
+560:2:2559
+561:2:2560
+562:2:2568
+563:2:2569
+564:2:2570
+565:2:2568
+566:2:2569
+567:2:2573
+568:2:2574
+569:2:2582
+570:2:2587
+571:2:2594
+572:2:2595
+573:2:2602
+574:2:2607
+575:2:2614
+576:2:2615
+577:2:2614
+578:2:2615
+579:2:2622
+580:2:2632
+581:0:3993
+582:2:2521
+583:0:3993
+584:2:2636
+585:2:2640
+586:2:2641
+587:2:2645
+588:2:2649
+589:2:2650
+590:2:2654
+591:2:2662
+592:2:2663
+593:2:2667
+594:2:2668
+595:2:2667
+596:2:2671
+597:2:2672
+598:2:2676
+599:0:3993
+600:2:2521
+601:0:3993
+602:2:2684
+603:2:2685
+604:2:2686
+605:0:3993
+606:2:2521
+607:0:3993
+608:2:2691
+609:0:3993
+610:2:3518
+611:2:3519
+612:2:3523
+613:2:3527
+614:2:3528
+615:2:3532
+616:2:3537
+617:2:3545
+618:2:3549
+619:2:3550
+620:2:3545
+621:2:3549
+622:2:3550
+623:2:3554
+624:2:3561
+625:2:3568
+626:2:3569
+627:2:3576
+628:2:3581
+629:2:3588
+630:2:3589
+631:2:3588
+632:2:3589
+633:2:3596
+634:2:3600
+635:0:3993
+636:2:3605
+637:0:3993
+638:2:3606
+639:0:3993
+640:2:3607
+641:0:3993
+642:2:3608
+643:0:3993
+644:1:26
+645:0:3993
+646:2:3609
+647:0:3993
+648:1:30
+649:1:31
+650:1:35
+651:1:39
+652:1:40
+653:1:44
+654:1:52
+655:1:53
+656:1:57
+657:1:61
+658:1:62
+659:1:57
+660:1:61
+661:1:62
+662:1:66
+663:1:73
+664:1:80
+665:1:81
+666:1:88
+667:1:93
+668:1:100
+669:1:101
+670:1:100
+671:1:101
+672:1:108
+673:1:112
+674:0:3993
+675:2:3608
+676:0:3993
+677:1:117
+678:0:3993
+679:2:3609
+680:0:3993
+681:2:3610
+682:0:3993
+683:2:3615
+684:0:3993
+685:2:3616
+686:0:3993
+687:2:3624
+688:2:3625
+689:2:3629
+690:2:3633
+691:2:3634
+692:2:3638
+693:2:3646
+694:2:3647
+695:2:3651
+696:2:3655
+697:2:3656
+698:2:3651
+699:2:3655
+700:2:3656
+701:2:3660
+702:2:3667
+703:2:3674
+704:2:3675
+705:2:3682
+706:2:3687
+707:2:3694
+708:2:3695
+709:2:3694
+710:2:3695
+711:2:3702
+712:2:3706
+713:0:3993
+714:2:2693
+715:2:3499
+716:0:3993
+717:2:2521
+718:0:3993
+719:2:2694
+720:0:3993
+721:2:2521
+722:0:3993
+723:2:2697
+724:2:2698
+725:2:2702
+726:2:2703
+727:2:2711
+728:2:2712
+729:2:2716
+730:2:2717
+731:2:2725
+732:2:2730
+733:2:2734
+734:2:2735
+735:2:2743
+736:2:2744
+737:2:2748
+738:2:2749
+739:2:2743
+740:2:2744
+741:2:2748
+742:2:2749
+743:2:2757
+744:2:2762
+745:2:2769
+746:2:2770
+747:2:2777
+748:2:2782
+749:2:2789
+750:2:2790
+751:2:2789
+752:2:2790
+753:2:2797
+754:2:2806
+755:0:3993
+756:2:2521
+757:0:3993
+758:2:2810
+759:2:2811
+760:2:2812
+761:2:2824
+762:2:2825
+763:2:2829
+764:2:2830
+765:2:2838
+766:2:2843
+767:2:2847
+768:2:2848
+769:2:2856
+770:2:2857
+771:2:2861
+772:2:2862
+773:2:2856
+774:2:2857
+775:2:2861
+776:2:2862
+777:2:2870
+778:2:2875
+779:2:2882
+780:2:2883
+781:2:2890
+782:2:2895
+783:2:2902
+784:2:2903
+785:2:2902
+786:2:2903
+787:2:2910
+788:2:2923
+789:2:2924
+790:0:3993
+791:2:2521
+792:0:3993
+793:2:3037
+794:2:3038
+795:2:3042
+796:2:3043
+797:2:3051
+798:2:3052
+799:2:3056
+800:2:3057
+801:2:3065
+802:2:3070
+803:2:3074
+804:2:3075
+805:2:3083
+806:2:3084
+807:2:3088
+808:2:3089
+809:2:3083
+810:2:3084
+811:2:3088
+812:2:3089
+813:2:3097
+814:2:3102
+815:2:3109
+816:2:3110
+817:2:3117
+818:2:3122
+819:2:3129
+820:2:3130
+821:2:3129
+822:2:3130
+823:2:3137
+824:0:3993
+825:2:2521
+826:0:3993
+827:2:3148
+828:2:3149
+829:2:3153
+830:2:3154
+831:2:3162
+832:2:3163
+833:2:3167
+834:2:3168
+835:2:3176
+836:2:3181
+837:2:3185
+838:2:3186
+839:2:3194
+840:2:3195
+841:2:3199
+842:2:3200
+843:2:3194
+844:2:3195
+845:2:3199
+846:2:3200
+847:2:3208
+848:2:3213
+849:2:3220
+850:2:3221
+851:2:3228
+852:2:3233
+853:2:3240
+854:2:3241
+855:2:3240
+856:2:3241
+857:2:3248
+858:2:3257
+859:0:3993
+860:2:2521
+861:0:3993
+862:2:3261
+863:2:3262
+864:2:3263
+865:2:3275
+866:2:3276
+867:2:3280
+868:2:3281
+869:2:3289
+870:2:3294
+871:2:3298
+872:2:3299
+873:2:3307
+874:2:3308
+875:2:3312
+876:2:3313
+877:2:3307
+878:2:3308
+879:2:3312
+880:2:3313
+881:2:3321
+882:2:3326
+883:2:3333
+884:2:3334
+885:2:3341
+886:2:3346
+887:2:3353
+888:2:3354
+889:2:3353
+890:2:3354
+891:2:3361
+892:2:3373
+893:2:3374
+894:0:3993
+895:2:2521
+896:0:3993
+897:2:3487
+898:0:3993
+899:2:3716
+900:2:3717
+901:2:3721
+902:2:3725
+903:2:3726
+904:2:3730
+905:2:3738
+906:2:3739
+907:2:3743
+908:2:3747
+909:2:3748
+910:2:3743
+911:2:3747
+912:2:3748
+913:2:3752
+914:2:3759
+915:2:3766
+916:2:3767
+917:2:3774
+918:2:3779
+919:2:3786
+920:2:3787
+921:2:3786
+922:2:3787
+923:2:3794
+924:2:3798
+925:0:3993
+926:2:3803
+927:0:3993
+928:2:3804
+929:0:3993
+930:2:3805
+931:0:3993
+932:2:3806
+933:0:3993
+934:1:26
+935:0:3993
+936:2:3807
+937:0:3993
+938:1:30
+939:1:31
+940:1:35
+941:1:39
+942:1:40
+943:1:44
+944:1:52
+945:1:53
+946:1:57
+947:1:61
+948:1:62
+949:1:57
+950:1:61
+951:1:62
+952:1:66
+953:1:73
+954:1:80
+955:1:81
+956:1:88
+957:1:93
+958:1:100
+959:1:101
+960:1:100
+961:1:101
+962:1:108
+963:1:112
+964:0:3993
+965:2:3806
+966:0:3993
+967:1:117
+968:0:3993
+969:2:3807
+970:0:3993
+971:2:3808
+972:0:3993
+973:2:3813
+974:0:3993
+975:2:3814
+976:0:3993
+977:2:3822
+978:2:3823
+979:2:3827
+980:2:3831
+981:2:3832
+982:2:3836
+983:2:3844
+984:2:3845
+985:2:3849
+986:2:3853
+987:2:3854
+988:2:3849
+989:2:3853
+990:2:3854
+991:2:3858
+992:2:3865
+993:2:3872
+994:2:3873
+995:2:3880
+996:2:3885
+997:2:3892
+998:2:3893
+999:2:3892
+1000:2:3893
+1001:2:3900
+1002:2:3904
+1003:0:3993
+1004:2:3489
+1005:2:3499
+1006:0:3993
+1007:2:2521
+1008:0:3993
+1009:2:3490
+1010:2:3491
+1011:0:3993
+1012:2:2521
+1013:0:3993
+1014:2:3495
+1015:0:3993
+1016:2:3503
+1017:0:3993
+1018:2:2518
+1019:0:3993
+1020:2:2519
+1021:0:3993
+1022:2:2520
+1023:0:3993
+1024:2:2521
+1025:0:3993
+1026:2:2522
+1027:2:2523
+1028:2:2527
+1029:2:2528
+1030:2:2536
+1031:2:2537
+1032:2:2541
+1033:2:2542
+1034:2:2550
+1035:2:2555
+1036:2:2559
+1037:2:2560
+1038:2:2568
+1039:2:2569
+1040:2:2573
+1041:2:2574
+1042:2:2568
+1043:2:2569
+1044:2:2570
+1045:2:2582
+1046:2:2587
+1047:2:2594
+1048:2:2595
+1049:2:2602
+1050:2:2607
+1051:2:2614
+1052:2:2615
+1053:2:2614
+1054:2:2615
+1055:2:2622
+1056:2:2632
+1057:0:3993
+1058:2:2521
+1059:0:3993
+1060:2:2636
+1061:2:2640
+1062:2:2641
+1063:2:2645
+1064:2:2649
+1065:2:2650
+1066:2:2654
+1067:2:2662
+1068:2:2663
+1069:2:2667
+1070:2:2671
+1071:2:2672
+1072:2:2667
+1073:2:2668
+1074:2:2676
+1075:0:3993
+1076:2:2521
+1077:0:3993
+1078:2:2684
+1079:2:2685
+1080:2:2686
+1081:0:3993
+1082:2:2521
+1083:0:3993
+1084:2:2691
+1085:0:3993
+1086:2:3518
+1087:2:3519
+1088:2:3523
+1089:2:3527
+1090:2:3528
+1091:2:3532
+1092:2:3537
+1093:2:3545
+1094:2:3549
+1095:2:3550
+1096:2:3545
+1097:2:3549
+1098:2:3550
+1099:2:3554
+1100:2:3561
+1101:2:3568
+1102:2:3569
+1103:2:3576
+1104:2:3581
+1105:2:3588
+1106:2:3589
+1107:2:3588
+1108:2:3589
+1109:2:3596
+1110:2:3600
+1111:0:3993
+1112:2:3605
+1113:0:3993
+1114:2:3606
+1115:0:3993
+1116:2:3607
+1117:0:3993
+1118:2:3608
+1119:0:3993
+1120:1:26
+1121:0:3993
+1122:2:3609
+1123:0:3993
+1124:1:30
+1125:1:31
+1126:1:35
+1127:1:39
+1128:1:40
+1129:1:44
+1130:1:52
+1131:1:53
+1132:1:57
+1133:1:61
+1134:1:62
+1135:1:57
+1136:1:61
+1137:1:62
+1138:1:66
+1139:1:73
+1140:1:80
+1141:1:81
+1142:1:88
+1143:1:93
+1144:1:100
+1145:1:101
+1146:1:100
+1147:1:101
+1148:1:108
+1149:1:112
+1150:0:3993
+1151:2:3608
+1152:0:3993
+1153:1:117
+1154:0:3993
+1155:2:3609
+1156:0:3993
+1157:2:3610
+1158:0:3993
+1159:2:3615
+1160:0:3993
+1161:2:3616
+1162:0:3993
+1163:2:3624
+1164:2:3625
+1165:2:3629
+1166:2:3633
+1167:2:3634
+1168:2:3638
+1169:2:3646
+1170:2:3647
+1171:2:3651
+1172:2:3655
+1173:2:3656
+1174:2:3651
+1175:2:3655
+1176:2:3656
+1177:2:3660
+1178:2:3667
+1179:2:3674
+1180:2:3675
+1181:2:3682
+1182:2:3687
+1183:2:3694
+1184:2:3695
+1185:2:3694
+1186:2:3695
+1187:2:3702
+1188:2:3706
+1189:0:3993
+1190:2:2693
+1191:2:3499
+1192:0:3993
+1193:2:2521
+1194:0:3993
+1195:2:2694
+1196:0:3993
+1197:2:2521
+1198:0:3993
+1199:2:2697
+1200:2:2698
+1201:2:2702
+1202:2:2703
+1203:2:2711
+1204:2:2712
+1205:2:2716
+1206:2:2717
+1207:2:2725
+1208:2:2730
+1209:2:2734
+1210:2:2735
+1211:2:2743
+1212:2:2744
+1213:2:2748
+1214:2:2749
+1215:2:2743
+1216:2:2744
+1217:2:2748
+1218:2:2749
+1219:2:2757
+1220:2:2762
+1221:2:2769
+1222:2:2770
+1223:2:2777
+1224:2:2782
+1225:2:2789
+1226:2:2790
+1227:2:2789
+1228:2:2790
+1229:2:2797
+1230:2:2806
+1231:0:3993
+1232:2:2521
+1233:0:3993
+1234:2:2810
+1235:2:2811
+1236:2:2812
+1237:2:2824
+1238:2:2825
+1239:2:2829
+1240:2:2830
+1241:2:2838
+1242:2:2843
+1243:2:2847
+1244:2:2848
+1245:2:2856
+1246:2:2857
+1247:2:2861
+1248:2:2862
+1249:2:2856
+1250:2:2857
+1251:2:2861
+1252:2:2862
+1253:2:2870
+1254:2:2875
+1255:2:2882
+1256:2:2883
+1257:2:2890
+1258:2:2895
+1259:2:2902
+1260:2:2903
+1261:2:2902
+1262:2:2903
+1263:2:2910
+1264:2:2923
+1265:2:2924
+1266:0:3993
+1267:2:2521
+1268:0:3993
+1269:2:3037
+1270:2:3038
+1271:2:3042
+1272:2:3043
+1273:2:3051
+1274:2:3052
+1275:2:3056
+1276:2:3057
+1277:2:3065
+1278:2:3070
+1279:2:3074
+1280:2:3075
+1281:2:3083
+1282:2:3084
+1283:2:3088
+1284:2:3089
+1285:2:3083
+1286:2:3084
+1287:2:3088
+1288:2:3089
+1289:2:3097
+1290:2:3102
+1291:2:3109
+1292:2:3110
+1293:2:3117
+1294:2:3122
+1295:2:3129
+1296:2:3130
+1297:2:3129
+1298:2:3130
+1299:2:3137
+1300:0:3993
+1301:2:2521
+1302:0:3993
+1303:2:3148
+1304:2:3149
+1305:2:3153
+1306:2:3154
+1307:2:3162
+1308:2:3163
+1309:2:3167
+1310:2:3168
+1311:2:3176
+1312:2:3181
+1313:2:3185
+1314:2:3186
+1315:2:3194
+1316:2:3195
+1317:2:3199
+1318:2:3200
+1319:2:3194
+1320:2:3195
+1321:2:3199
+1322:2:3200
+1323:2:3208
+1324:2:3213
+1325:2:3220
+1326:2:3221
+1327:2:3228
+1328:2:3233
+1329:2:3240
+1330:2:3241
+1331:2:3240
+1332:2:3241
+1333:2:3248
+1334:2:3257
+1335:0:3993
+1336:2:2521
+1337:0:3993
+1338:1:118
+1339:0:3993
+1340:1:120
+1341:0:3993
+1342:1:19
+1343:0:3993
+1344:1:126
+1345:1:127
+1346:1:131
+1347:1:132
+1348:1:140
+1349:1:141
+1350:1:145
+1351:1:146
+1352:1:154
+1353:1:159
+1354:1:163
+1355:1:164
+1356:1:172
+1357:1:173
+1358:1:177
+1359:1:178
+1360:1:172
+1361:1:173
+1362:1:177
+1363:1:178
+1364:1:186
+1365:1:191
+1366:1:198
+1367:1:199
+1368:1:206
+1369:1:211
+1370:1:218
+1371:1:219
+1372:1:218
+1373:1:219
+1374:1:226
+1375:0:3993
+1376:1:15
+1377:0:3993
+1378:1:16
+1379:0:3993
+1380:1:17
+1381:0:3993
+1382:1:118
+1383:0:3993
+1384:1:120
+1385:0:3993
+1386:1:19
+1387:0:3993
+1388:1:237
+1389:1:238
+1390:0:3993
+1391:1:15
+1392:0:3993
+1393:1:16
+1394:0:3993
+1395:1:17
+1396:0:3993
+1397:1:118
+1398:0:3993
+1399:1:120
+1400:0:3993
+1401:1:19
+1402:0:3993
+1403:1:244
+1404:1:245
+1405:1:249
+1406:1:250
+1407:1:258
+1408:1:259
+1409:1:263
+1410:1:264
+1411:1:272
+1412:1:277
+1413:1:281
+1414:1:282
+1415:1:290
+1416:1:291
+1417:1:295
+1418:1:296
+1419:1:290
+1420:1:291
+1421:1:295
+1422:1:296
+1423:1:304
+1424:1:309
+1425:1:316
+1426:1:317
+1427:1:324
+1428:1:329
+1429:1:336
+1430:1:337
+1431:1:336
+1432:1:337
+1433:1:344
+1434:0:3993
+1435:1:15
+1436:0:3993
+1437:1:16
+1438:0:3993
+1439:1:17
+1440:0:3993
+1441:1:118
+1442:0:3993
+1443:1:120
+1444:0:3993
+1445:1:19
+1446:0:3993
+1447:1:355
+1448:1:356
+1449:1:360
+1450:1:361
+1451:1:369
+1452:1:370
+1453:1:374
+1454:1:375
+1455:1:383
+1456:1:388
+1457:1:392
+1458:1:393
+1459:1:401
+1460:1:402
+1461:1:406
+1462:1:407
+1463:1:401
+1464:1:402
+1465:1:406
+1466:1:407
+1467:1:415
+1468:1:420
+1469:1:427
+1470:1:428
+1471:1:435
+1472:1:440
+1473:1:447
+1474:1:448
+1475:1:447
+1476:1:448
+1477:1:455
+1478:1:464
+1479:0:3993
+1480:1:15
+1481:0:3993
+1482:1:16
+1483:0:3993
+1484:1:17
+1485:0:3993
+1486:1:118
+1487:0:3993
+1488:1:120
+1489:0:3991
+1490:1:19
+1491:0:3997
+1492:1:1040
+1493:1:1041
+1494:1:1045
+1495:1:1046
+1496:1:1054
+1497:1:1055
+1498:1:1056
+1499:1:1068
+1500:1:1073
+1501:1:1077
+1502:1:1078
+1503:1:1086
+1504:1:1087
+1505:1:1091
+1506:1:1092
+1507:1:1086
+1508:1:1087
+1509:1:1091
+1510:1:1092
+1511:1:1100
+1512:1:1105
+1513:1:1112
+1514:1:1113
+1515:1:1120
+1516:1:1125
+1517:1:1132
+1518:1:1133
+1519:1:1132
+1520:1:1133
+1521:1:1140
+1522:0:3997
+1523:1:15
+1524:0:3997
+1525:1:16
+1526:0:3997
+1527:2:3261
+1528:2:3262
+1529:2:3263
+1530:2:3275
+1531:2:3276
+1532:2:3280
+1533:2:3281
+1534:2:3289
+1535:2:3294
+1536:2:3298
+1537:2:3299
+1538:2:3307
+1539:2:3308
+1540:2:3312
+1541:2:3313
+1542:2:3307
+1543:2:3308
+1544:2:3312
+1545:2:3313
+1546:2:3321
+1547:2:3326
+1548:2:3333
+1549:2:3334
+1550:2:3341
+1551:2:3346
+1552:2:3353
+1553:2:3354
+1554:2:3353
+1555:2:3354
+1556:2:3361
+1557:2:3371
+1558:0:3997
+1559:2:2521
+-1:-1:-1
+1560:0:3997
+1561:2:3377
+1562:2:3378
+1563:2:3382
+1564:2:3383
+1565:2:3391
+1566:2:3392
+1567:2:3396
+1568:2:3397
+1569:2:3405
+1570:2:3410
+1571:2:3414
+1572:2:3415
+1573:2:3423
+1574:2:3424
+1575:2:3428
+1576:2:3429
+1577:2:3423
+1578:2:3424
+1579:2:3428
+1580:2:3429
+1581:2:3437
+1582:2:3442
+1583:2:3449
+1584:2:3450
+1585:2:3457
+1586:2:3462
+1587:2:3469
+1588:2:3470
+1589:2:3469
+1590:2:3470
+1591:2:3477
+1592:0:3997
+1593:2:2521
+1594:0:3997
+1595:2:3261
+1596:2:3262
+1597:2:3266
+1598:2:3267
+1599:2:3275
+1600:2:3276
+1601:2:3280
+1602:2:3281
+1603:2:3289
+1604:2:3294
+1605:2:3298
+1606:2:3299
+1607:2:3307
+1608:2:3308
+1609:2:3312
+1610:2:3313
+1611:2:3307
+1612:2:3308
+1613:2:3312
+1614:2:3313
+1615:2:3321
+1616:2:3326
+1617:2:3333
+1618:2:3334
+1619:2:3341
+1620:2:3346
+1621:2:3353
+1622:2:3354
+1623:2:3353
+1624:2:3354
+1625:2:3361
+1626:2:3371
+1627:0:3997
+1628:2:2521
+1629:0:3997
+1630:2:3377
+1631:2:3378
+1632:2:3382
+1633:2:3383
+1634:2:3391
+1635:2:3392
+1636:2:3396
+1637:2:3397
+1638:2:3405
+1639:2:3410
+1640:2:3414
+1641:2:3415
+1642:2:3423
+1643:2:3424
+1644:2:3428
+1645:2:3429
+1646:2:3423
+1647:2:3424
+1648:2:3428
+1649:2:3429
+1650:2:3437
+1651:2:3442
+1652:2:3449
+1653:2:3450
+1654:2:3457
+1655:2:3462
+1656:2:3469
+1657:2:3470
+1658:2:3469
+1659:2:3470
+1660:2:3477
+1661:0:3997
+1662:2:2521
+1663:0:3997
+1664:2:3261
+1665:2:3262
+1666:2:3266
+1667:2:3267
+1668:2:3275
+1669:2:3276
+1670:2:3280
+1671:2:3281
+1672:2:3289
+1673:2:3294
+1674:2:3298
+1675:2:3299
+1676:2:3307
+1677:2:3308
+1678:2:3312
+1679:2:3313
+1680:2:3307
+1681:2:3308
+1682:2:3312
+1683:2:3313
+1684:2:3321
+1685:2:3326
+1686:2:3333
+1687:2:3334
+1688:2:3341
+1689:2:3346
+1690:2:3353
+1691:2:3354
+1692:2:3353
+1693:2:3354
+1694:2:3361
+1695:2:3371
+1696:0:3997
+1697:2:2521
This page took 0.29127 seconds and 4 git commands to generate.