1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: pan.spin ***/
5 #define _FILE_OFFSET_BITS 64
14 #if defined(WIN32) || defined(WIN64)
18 #include <sys/times.h>
20 #include <sys/types.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
28 int Printf(const char *fmt
, ...); /* prototype only */
34 State A_Root
; /* seed-state for cycles */
35 State now
; /* the full state-vector */
37 #if defined(C_States) && defined(HAS_TRACK)
39 c_update(uchar
*p_t_r
)
42 printf("c_update %u\n", p_t_r
);
46 c_revert(uchar
*p_t_r
)
49 printf("c_revert %u\n", p_t_r
);
82 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
83 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
84 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
86 #if !defined(SAFETY) && !defined(NOCOMP)
87 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
88 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
99 #define onstack_put() ;
100 #define onstack_zap() gstore((char *) &now, vsize, 4)
102 #if defined(FULLSTACK) && !defined(BITSTATE)
103 #define onstack_put() trpt->ostate = Lstate
104 #define onstack_zap() { \
106 trpt->ostate->tagged = \
107 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
114 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
122 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
123 unsigned int proviso
;
126 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
129 #if !defined(SAFETY) || defined(REACH)
133 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
135 uchar cpu_id
; /* id of cpu that created the state */
145 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
151 typedef struct Trail
{
152 int st
; /* current state */
153 uchar pr
; /* process id */
154 uchar tau
; /* 8 bit-flags */
155 uchar o_pm
; /* 8 more bit-flags */
157 Meaning of bit
-flags
:
158 tau
&1 -> timeout enabled
159 tau
&2 -> request to enable timeout
1 level
up (in claim
)
160 tau
&4 -> current transition is a claim move
161 tau
&8 -> current transition is an atomic move
162 tau
&16 -> last move was truncated on stack
163 tau
&32 -> current transition is a preselected move
164 tau
&64 -> at least one next state is
not on the stack
165 tau
&128 -> current transition is a stutter move
166 o_pm
&1 -> the current pid moved
-- implements
else
167 o_pm
&2 -> this is an acceptance state
168 o_pm
&4 -> this is a progress state
169 o_pm
&8 -> fairness alg rule
1 undo mark
170 o_pm
&16 -> fairness alg rule
3 undo mark
171 o_pm
&32 -> fairness alg rule
2 undo mark
172 o_pm
&64 -> the current proc applied rule2
173 o_pm
&128 -> a fairness
, dummy move
- all procs blocked
176 uchar n_succ
; /* nr of successor states */
178 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
182 uchar o_n
, o_ot
; /* to save locals */
186 #if nstates_event<256
189 unsigned short o_event
;
199 #if defined(HAS_UNLESS) && !defined(BFS)
200 int e_state
; /* if escape trans - state of origin */
202 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
203 struct H_el
*ostate
; /* pointer to stored state */
205 #if defined(CNTRSTACK) && !defined(BFS)
210 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
212 #error "-DSCHED cannot be combined with -DNCORE (yet)"
228 long omaxdepth
=10000;
237 double quota
; /* time limit */
247 double memcnt
= (double) 0;
248 double memlim
= (double) (1<<30); /* 1 GB */
250 double mem_reserved
= (double) 0;
255 static long left
= 0L;
256 static double fragment
= (double) 0;
257 static unsigned long grow
;
259 unsigned int HASH_CONST
[] = {
260 /* asuming 4 bytes per int */
261 0x88888EEF, 0x00400007,
262 0x04c11db7, 0x100d4e63,
263 0x0fc22f87, 0x3ff0c3ff,
264 0x38e84cd7, 0x02b148e9,
265 0x98b2e49d, 0xb616d379,
266 0xa5247fd9, 0xbae92a15,
267 0xb91c8bc5, 0x8e5880f3,
268 0xacd7c069, 0xb4c44bb3,
269 0x2ead1fb7, 0x8e428171,
270 0xdbebd459, 0x828ae611,
271 0x6cb25933, 0x86cdd651,
272 0x9e8f5f21, 0xd5f8d8e7,
273 0x9c4e956f, 0xb5cf2c71,
274 0x2e805a6d, 0x33fc3a55,
275 0xaf203ed1, 0xe31f5909,
276 0x5276db35, 0x0c565ef7,
277 0x273d1aa5, 0x8923b1dd,
284 int done
=0, errors
=0, Nrun
=1;
286 char *c_stack_start
= (char *) 0;
287 double nstates
=0, nlinks
=0, truncs
=0, truncs2
=0;
288 double nlost
=0, nShadow
=0, hcmp
=0, ngrabs
=0;
289 #if defined(ZAPH) && defined(BITSTATE)
294 double midrv
=0, failedrv
=0, revrv
=0;
296 unsigned long nr_states
=0; /* nodes in DFA */
297 long Fa
=0, Fh
=0, Zh
=0, Zn
=0;
298 long PUT
=0, PROBE
=0, ZAPS
=0;
299 long Ccheck
=0, Cholds
=0;
300 int a_cycles
=0, upto
=1, strict
=0, verbose
= 0, signoff
= 0;
302 int gui
= 0, coltrace
= 0, readtrail
= 0;
303 int whichtrail
= 0, onlyproc
= -1, silent
= 0;
305 int state_tables
=0, fairness
=0, no_rck
=0, Nr_Trails
=0;
310 unsigned long mask
, nmask
;
312 int ssize
=23; /* 1 Mb */
314 int ssize
=19; /* 512K slots */
316 int hmax
=0, svmax
=0, smax
=0;
318 uchar
*noptr
; /* used by macro Pptr(x) */
320 void logval(char *, int);
321 void dumpranges(void);
325 extern void dfa_init(unsigned short);
326 extern int dfa_member(unsigned long);
327 extern int dfa_store(uchar
*);
328 unsigned int maxgs
= 0;
332 State comp_now
__attribute__ ((aligned (8)));
333 /* gcc 64-bit aligned for Itanium2 systems */
334 /* MAJOR runtime penalty if not used on those systems */
336 State comp_now
; /* compressed state vector */
340 uchar
*Mask
= (uchar
*) &comp_msk
;
343 static char *scratch
= (char *) &comp_tmp
;
345 Stack
*stack
; /* for queues, processes */
346 Svtack
*svtack
; /* for old state vectors */
348 static unsigned int hfns
= 3; /* new default */
350 static unsigned long j1
;
351 static unsigned long K1
, K2
;
352 static unsigned long j2
, j3
, j4
;
356 static long A_depth
= 0;
359 long nr_handoffs
= 0;
361 static uchar warned
= 0, iterative
= 0, exclusive
= 0, like_java
= 0, every_error
= 0;
362 static uchar noasserts
= 0, noends
= 0, bounded
= 0;
363 #if SYNC>0 && ASYNC==0
364 void set_recvs(void);
368 #define IfNotBlocked if (boq != -1) continue;
369 #define UnBlock boq = -1
371 #define IfNotBlocked /* cannot block */
372 #define UnBlock /* don't bother */
376 int (*bstore
)(char *, int);
377 int bstore_reg(char *, int);
378 int bstore_mod(char *, int);
380 void active_procs(void);
382 void do_the_search(void);
383 void find_shorter(int);
384 void iniglobals(void);
388 void ungrab_ints(int *, int);
390 #define Index(x, y) Boundcheck(x, y, II, tt, t)
392 #define Index(x, y) x
394 short Air
[] = { (short) Air0
, (short) Air1
, (short) Air2
, (short) Air3
, (short) Air4
, (short) Air5
, (short) Air6
};
397 { int j
, h
= now
._nr_pr
;
401 uchar
*o_this
= this;
404 if (TstOnly
) return (h
< MAXPROC
);
407 /* redefine Index only within this procedure */
409 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
412 Uerror("too many processes");
414 case 0: j
= sizeof(P0
); break;
415 case 1: j
= sizeof(P1
); break;
416 case 2: j
= sizeof(P2
); break;
417 case 3: j
= sizeof(P3
); break;
418 case 4: j
= sizeof(P4
); break;
419 case 5: j
= sizeof(P5
); break;
420 case 6: j
= sizeof(P6
); break;
421 default: Uerror("bad proc - addproc");
424 proc_skip
[h
] = WS
-(vsize
%WS
);
428 for (k
= vsize
+ (int) proc_skip
[h
]; k
> vsize
; k
--)
429 Mask
[k
-1] = 1; /* align */
431 vsize
+= (int) proc_skip
[h
];
432 proc_offset
[h
] = vsize
;
436 write(svfd
, (uchar
*) &dummy
, sizeof(int)); /* mark */
437 write(svfd
, (uchar
*) &h
, sizeof(int));
438 write(svfd
, (uchar
*) &n
, sizeof(int));
440 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(int));
442 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(short));
444 write(svfd
, (uchar
*) &now
, vprefix
-4*sizeof(int)); /* padd */
448 if (fairness
&& ((int) now
._nr_pr
+ 1 >= (8*NFAIR
)/2))
449 { printf("pan: error: too many processes -- current");
450 printf(" max is %d procs (-DNFAIR=%d)\n",
451 (8*NFAIR
)/2 - 2, NFAIR
);
452 printf("\trecompile with -DNFAIR=%d\n",
461 for (k
= 1; k
<= Air
[n
]; k
++)
462 Mask
[vsize
- k
] = 1; /* pad */
463 Mask
[vsize
-j
] = 1; /* _pid */
465 hmax
= max(hmax
, vsize
);
466 if (vsize
>= VECTORSZ
)
467 { printf("pan: error, VECTORSZ too small, recompile pan.c");
468 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize
);
471 memset((char *)pptr(h
), 0, j
);
473 if (BASE
> 0 && h
> 0)
474 ((P0
*)this)->_pid
= h
-BASE
;
476 ((P0
*)this)->_pid
= h
;
479 ((P6
*)pptr(h
))->_t
= 6;
480 ((P6
*)pptr(h
))->_p
= 0;
484 case 5: /* :never: */
485 ((P5
*)pptr(h
))->_t
= 5;
486 ((P5
*)pptr(h
))->_p
= 5; reached5
[5]=1;
496 ((P4
*)pptr(h
))->_t
= 4;
497 ((P4
*)pptr(h
))->_p
= 42; reached4
[42]=1;
500 ((P4
*)pptr(h
))->i
= 0;
501 ((P4
*)pptr(h
))->j
= 0;
502 ((P4
*)pptr(h
))->sum
= 0;
503 ((P4
*)pptr(h
))->commit_sum
= 0;
505 logval(":init::i", ((P4
*)pptr(h
))->i
);
506 logval(":init::j", ((P4
*)pptr(h
))->j
);
507 logval(":init::sum", ((P4
*)pptr(h
))->sum
);
508 logval(":init::commit_sum", ((P4
*)pptr(h
))->commit_sum
);
514 case 3: /* cleaner */
515 ((P3
*)pptr(h
))->_t
= 3;
516 ((P3
*)pptr(h
))->_p
= 8; reached3
[8]=1;
526 ((P2
*)pptr(h
))->_t
= 2;
527 ((P2
*)pptr(h
))->_p
= 26; reached2
[26]=1;
530 ((P2
*)pptr(h
))->i
= 0;
531 ((P2
*)pptr(h
))->j
= 0;
533 logval("reader:i", ((P2
*)pptr(h
))->i
);
534 logval("reader:j", ((P2
*)pptr(h
))->j
);
541 ((P1
*)pptr(h
))->_t
= 1;
542 ((P1
*)pptr(h
))->_p
= 3; reached1
[3]=1;
545 ((P1
*)pptr(h
))->size
= 1;
546 ((P1
*)pptr(h
))->prev_off
= 0;
547 ((P1
*)pptr(h
))->new_off
= 0;
548 ((P1
*)pptr(h
))->tmp_commit
= 0;
549 ((P1
*)pptr(h
))->i
= 0;
550 ((P1
*)pptr(h
))->j
= 0;
552 logval("tracer:size", ((P1
*)pptr(h
))->size
);
553 logval("tracer:prev_off", ((P1
*)pptr(h
))->prev_off
);
554 logval("tracer:new_off", ((P1
*)pptr(h
))->new_off
);
555 logval("tracer:tmp_commit", ((P1
*)pptr(h
))->tmp_commit
);
556 logval("tracer:i", ((P1
*)pptr(h
))->i
);
557 logval("tracer:j", ((P1
*)pptr(h
))->j
);
563 case 0: /* switcher */
564 ((P0
*)pptr(h
))->_t
= 0;
565 ((P0
*)pptr(h
))->_p
= 11; reached0
[11]=1;
568 ((P0
*)pptr(h
))->prev_off
= 0;
569 ((P0
*)pptr(h
))->new_off
= 0;
570 ((P0
*)pptr(h
))->tmp_commit
= 0;
571 ((P0
*)pptr(h
))->size
= 0;
573 logval("switcher:prev_off", ((P0
*)pptr(h
))->prev_off
);
574 logval("switcher:new_off", ((P0
*)pptr(h
))->new_off
);
575 logval("switcher:tmp_commit", ((P0
*)pptr(h
))->tmp_commit
);
576 logval("switcher:size", ((P0
*)pptr(h
))->size
);
587 #define Index(x, y) Boundcheck(x, y, II, tt, t)
591 #if defined(BITSTATE) && defined(COLLAPSE)
592 /* just to allow compilation, to generate the error */
593 long col_p(int i
, char *z
) { return 0; }
594 long col_q(int i
, char *z
) { return 0; }
599 col_p(int i
, char *z
)
600 { int j
, k
; unsigned long ordinal(char *, long, short);
602 P0
*ptr
= (P0
*) pptr(i
);
604 case 0: j
= sizeof(P0
); break;
605 case 1: j
= sizeof(P1
); break;
606 case 2: j
= sizeof(P2
); break;
607 case 3: j
= sizeof(P3
); break;
608 case 4: j
= sizeof(P4
); break;
609 case 5: j
= sizeof(P5
); break;
610 case 6: j
= sizeof(P6
); break;
611 default: Uerror("bad proctype - collapse");
613 if (z
) x
= z
; else x
= scratch
;
614 y
= (char *) ptr
; k
= proc_offset
[i
];
615 for ( ; j
> 0; j
--, y
++)
616 if (!Mask
[k
++]) *x
++ = *y
;
617 for (j
= 0; j
< WS
-1; j
++)
620 if (z
) return (long) (x
- z
);
621 return ordinal(scratch
, x
-scratch
, (short) (2+ptr
->_t
));
628 memset((char *)&now
, 0, sizeof(State
));
629 vsize
= (unsigned long) (sizeof(State
) - VECTORSZ
);
633 /* optional provisioning statements, e.g. to */
634 /* set hidden variables, used as constants */
639 Maxbody
= max(Maxbody
, ((int) sizeof(P0
)));
640 Maxbody
= max(Maxbody
, ((int) sizeof(P1
)));
641 Maxbody
= max(Maxbody
, ((int) sizeof(P2
)));
642 Maxbody
= max(Maxbody
, ((int) sizeof(P3
)));
643 Maxbody
= max(Maxbody
, ((int) sizeof(P4
)));
644 Maxbody
= max(Maxbody
, ((int) sizeof(P5
)));
645 Maxbody
= max(Maxbody
, ((int) sizeof(P6
)));
646 reached
[0] = reached0
;
647 reached
[1] = reached1
;
648 reached
[2] = reached2
;
649 reached
[3] = reached3
;
650 reached
[4] = reached4
;
651 reached
[5] = reached5
;
652 reached
[6] = reached6
;
653 accpstate
[0] = (uchar
*) emalloc(nstates0
);
654 accpstate
[1] = (uchar
*) emalloc(nstates1
);
655 accpstate
[2] = (uchar
*) emalloc(nstates2
);
656 accpstate
[3] = (uchar
*) emalloc(nstates3
);
657 accpstate
[4] = (uchar
*) emalloc(nstates4
);
658 accpstate
[5] = (uchar
*) emalloc(nstates5
);
659 accpstate
[6] = (uchar
*) emalloc(nstates6
);
660 progstate
[0] = (uchar
*) emalloc(nstates0
);
661 progstate
[1] = (uchar
*) emalloc(nstates1
);
662 progstate
[2] = (uchar
*) emalloc(nstates2
);
663 progstate
[3] = (uchar
*) emalloc(nstates3
);
664 progstate
[4] = (uchar
*) emalloc(nstates4
);
665 progstate
[5] = (uchar
*) emalloc(nstates5
);
666 progstate
[6] = (uchar
*) emalloc(nstates6
);
667 loopstate0
= loopstate
[0] = (uchar
*) emalloc(nstates0
);
668 loopstate1
= loopstate
[1] = (uchar
*) emalloc(nstates1
);
669 loopstate2
= loopstate
[2] = (uchar
*) emalloc(nstates2
);
670 loopstate3
= loopstate
[3] = (uchar
*) emalloc(nstates3
);
671 loopstate4
= loopstate
[4] = (uchar
*) emalloc(nstates4
);
672 loopstate5
= loopstate
[5] = (uchar
*) emalloc(nstates5
);
673 loopstate6
= loopstate
[6] = (uchar
*) emalloc(nstates6
);
674 stopstate
[0] = (uchar
*) emalloc(nstates0
);
675 stopstate
[1] = (uchar
*) emalloc(nstates1
);
676 stopstate
[2] = (uchar
*) emalloc(nstates2
);
677 stopstate
[3] = (uchar
*) emalloc(nstates3
);
678 stopstate
[4] = (uchar
*) emalloc(nstates4
);
679 stopstate
[5] = (uchar
*) emalloc(nstates5
);
680 stopstate
[6] = (uchar
*) emalloc(nstates6
);
681 visstate
[0] = (uchar
*) emalloc(nstates0
);
682 visstate
[1] = (uchar
*) emalloc(nstates1
);
683 visstate
[2] = (uchar
*) emalloc(nstates2
);
684 visstate
[3] = (uchar
*) emalloc(nstates3
);
685 visstate
[4] = (uchar
*) emalloc(nstates4
);
686 visstate
[5] = (uchar
*) emalloc(nstates5
);
687 visstate
[6] = (uchar
*) emalloc(nstates6
);
688 mapstate
[0] = (short *) emalloc(nstates0
* sizeof(short));
689 mapstate
[1] = (short *) emalloc(nstates1
* sizeof(short));
690 mapstate
[2] = (short *) emalloc(nstates2
* sizeof(short));
691 mapstate
[3] = (short *) emalloc(nstates3
* sizeof(short));
692 mapstate
[4] = (short *) emalloc(nstates4
* sizeof(short));
693 mapstate
[5] = (short *) emalloc(nstates5
* sizeof(short));
694 mapstate
[6] = (short *) emalloc(nstates6
* sizeof(short));
702 NrStates
[0] = nstates0
;
703 NrStates
[1] = nstates1
;
704 NrStates
[2] = nstates2
;
705 NrStates
[3] = nstates3
;
706 NrStates
[4] = nstates4
;
707 NrStates
[5] = nstates5
;
708 NrStates
[6] = nstates6
;
716 stopstate
[0][endstate0
] = 1;
717 stopstate
[1][endstate1
] = 1;
718 stopstate
[2][endstate2
] = 1;
719 stopstate
[3][endstate3
] = 1;
720 stopstate
[4][endstate4
] = 1;
721 stopstate
[5][endstate5
] = 1;
722 stopstate
[6][endstate6
] = 1;
724 stopstate
[1][49] = 1;
725 retrans(0, nstates0
, start0
, src_ln0
, reached0
, loopstate0
);
726 retrans(1, nstates1
, start1
, src_ln1
, reached1
, loopstate1
);
727 retrans(2, nstates2
, start2
, src_ln2
, reached2
, loopstate2
);
728 retrans(3, nstates3
, start3
, src_ln3
, reached3
, loopstate3
);
729 retrans(4, nstates4
, start4
, src_ln4
, reached4
, loopstate4
);
730 retrans(5, nstates5
, start5
, src_ln5
, reached5
, loopstate5
);
732 { printf("\nTransition Type: ");
733 printf("A=atomic; D=d_step; L=local; G=global\n");
734 printf("Source-State Labels: ");
735 printf("p=progress; e=end; a=accept;\n");
737 printf("Note: statement merging was used. Only the first\n");
738 printf(" stmnt executed in each merge sequence is shown\n");
739 printf(" (use spin -a -o3 to disable statement merging)\n");
744 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
753 { printf("warning: for p.o. reduction to be valid ");
754 printf("the never claim must be stutter-invariant\n");
755 printf("(never claims generated from LTL ");
756 printf("formulae are stutter-invariant)\n");
759 UnBlock
; /* disable rendez-vous */
762 { udmem
*= 1024L*1024L;
765 { void init_SS(unsigned long);
766 init_SS((unsigned long) udmem
);
769 SS
= (uchar
*) emalloc(udmem
);
773 { void init_SS(unsigned long);
774 init_SS(ONE_L
<<(ssize
-3));
777 SS
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
782 #if defined(FULLSTACK) && defined(BITSTATE)
785 #if defined(CNTRSTACK) && !defined(BFS)
786 LL
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
788 stack
= ( Stack
*) emalloc(sizeof(Stack
));
789 svtack
= (Svtack
*) emalloc(sizeof(Svtack
));
790 /* a place to point for Pptr of non-running procs: */
791 noptr
= (uchar
*) emalloc(Maxbody
* sizeof(char));
794 write(svfd
, (uchar
*) &vprefix
, sizeof(int));
797 Addproc(VERI
); /* never - pid = 0 */
799 active_procs(); /* started after never */
801 now
._event
= start_event
;
802 reached
[EVENT_TRACE
][start_event
] = 1;
812 if (--Nrun
> 0 && HASH_CONST
[++HASH_NR
])
813 { printf("Run %d:\n", HASH_NR
);
816 memset(SS
, 0, ONE_L
<<(ssize
-3));
818 memset(LL
, 0, ONE_L
<<(ssize
-3));
821 memset((uchar
*) S_Tab
, 0,
822 maxdepth
*sizeof(struct H_el
*));
824 nstates
=nlinks
=truncs
=truncs2
=ngrabs
= 0;
825 nlost
=nShadow
=hcmp
= 0;
827 PUT
=PROBE
=ZAPS
=Ccheck
=Cholds
= 0;
833 int provided(int, uchar
, int, Trans
*);
836 #define GLOBAL_LOCK (0)
838 #define CS_N (256*NCORE)
841 #define NR_QS (NCORE)
842 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
843 #define GQ_RD GLOBAL_LOCK
844 #define GQ_WR GLOBAL_LOCK
845 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
846 #define QLOCK(n) (1+n)
848 #define NR_QS (NCORE+1)
849 #define CS_NR (CS_N+3)
852 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
853 #define QLOCK(n) (3+n)
856 void e_critical(int);
857 void x_critical(int);
860 #define enter_critical(w) e_critical(w)
861 #define leave_critical(w) x_critical(w)
864 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
865 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
867 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
868 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
873 cpu_printf(const char *fmt
, ...)
875 enter_critical(GLOBAL_LOCK
); /* printing */
876 printf("cpu%d: ", core_id
);
882 leave_critical(GLOBAL_LOCK
);
887 cpu_printf(const char *fmt
, ...)
896 Printf(const char *fmt
, ...)
897 { /* Make sure the args to Printf
898 * are always evaluated (e.g., they
899 * could contain a run stmnt)
900 * but do not generate the output
901 * during verification runs
902 * unless explicitly wanted
903 * If this fails on your system
904 * compile SPIN itself -DPRINTF
905 * and this code is not generated
924 extern void printm(int);
926 #define getframe(i) &trail[i];
928 static long HHH
, DDD
, hiwater
;
929 static long CNT1
, CNT2
;
930 static int stackwrite
;
931 static int stackread
;
932 static Trail frameptr
;
939 if (d
>= (CNT1
-CNT2
)*DDD
)
940 return &trail
[d
- (CNT1
-CNT2
)*DDD
];
943 && (stackread
= open(stackfile
, 0)) < 0)
944 { printf("getframe: cannot open %s\n", stackfile
);
947 if (lseek(stackread
, d
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1
948 || read(stackread
, &frameptr
, sizeof(Trail
)) != sizeof(Trail
))
949 { printf("getframe: frame read error\n");
955 #if !defined(SAFETY) && !defined(BITSTATE)
956 #if !defined(FULLSTACK) || defined(MA)
957 #define depth_of(x) A_depth /* an estimate */
960 depth_of(struct H_el
*s
)
962 for (d
= 0; d
<= A_depth
; d
++)
967 printf("pan: cannot happen, depth_of\n");
973 extern void cleanup_shm(int);
974 volatile unsigned int *search_terminated
; /* to signal early termination */
978 { void stop_timer(void);
980 { printf("--end of output--\n");
983 if (search_terminated
!= NULL
)
984 { *search_terminated
|= 1; /* pan_exit */
987 { void dsk_stats(void);
991 if (!state_tables
&& !readtrail
)
1004 transmognify(char *s
)
1006 static char buf
[2][2048];
1008 if (!s
|| strlen(s
) > 2047) return s
;
1009 memset(buf
[0], 0, 2048);
1010 memset(buf
[1], 0, 2048);
1011 strcpy(buf
[toggle
], s
);
1012 while ((v
= strstr(buf
[toggle
], "{c_code")))
1014 strcpy(buf
[1-toggle
], buf
[toggle
]);
1015 for (w
= v
; *w
!= '}' && *w
!= '\0'; w
++) /* skip */;
1016 if (*w
!= '}') return s
;
1018 for (i
= 0; code_lookup
[i
].c
; i
++)
1019 if (strcmp(v
, code_lookup
[i
].c
) == 0
1020 && strlen(v
) == strlen(code_lookup
[i
].c
))
1021 { if (strlen(buf
[1-toggle
])
1022 + strlen(code_lookup
[i
].t
)
1025 strcat(buf
[1-toggle
], code_lookup
[i
].t
);
1028 strcat(buf
[1-toggle
], w
);
1029 toggle
= 1 - toggle
;
1031 buf
[toggle
][2047] = '\0';
1035 char * transmognify(char *s
) { return s
; }
1039 add_src_txt(int ot
, int tt
)
1043 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
1045 q
= transmognify(t
->tp
);
1046 for ( ; q
&& *q
; q
++)
1056 { static int wrap_in_progress
= 0;
1060 if (wrap_in_progress
++) return;
1062 printf("spin: trail ends after %ld steps\n", depth
);
1064 { if (onlyproc
>= now
._nr_pr
) { pan_exit(0); }
1067 printf("%3ld: proc %d (%s) ",
1068 depth
, II
, procname
[z
->_t
]);
1069 for (i
= 0; src_all
[i
].src
; i
++)
1070 if (src_all
[i
].tp
== (int) z
->_t
)
1071 { printf(" line %3d",
1072 src_all
[i
].src
[z
->_p
]);
1075 printf(" (state %2d)", z
->_p
);
1076 if (!stopstate
[z
->_t
][z
->_p
])
1077 printf(" (invalid end state)");
1079 add_src_txt(z
->_t
, z
->_p
);
1082 printf("#processes %d:\n", now
._nr_pr
);
1083 if (depth
< 0) depth
= 0;
1084 for (II
= 0; II
< now
._nr_pr
; II
++)
1085 { z
= (P0
*)pptr(II
);
1086 printf("%3ld: proc %d (%s) ",
1087 depth
, II
, procname
[z
->_t
]);
1088 for (i
= 0; src_all
[i
].src
; i
++)
1089 if (src_all
[i
].tp
== (int) z
->_t
)
1090 { printf(" line %3d",
1091 src_all
[i
].src
[z
->_p
]);
1094 printf(" (state %2d)", z
->_p
);
1095 if (!stopstate
[z
->_t
][z
->_p
])
1096 printf(" (invalid end state)");
1098 add_src_txt(z
->_t
, z
->_p
);
1101 for (II
= 0; II
< now
._nr_pr
; II
++)
1102 { z
= (P0
*)pptr(II
);
1103 c_locals(II
, z
->_t
);
1117 int candidate_files
;
1119 if (trailfilename
!= NULL
)
1120 { fd
= fopen(trailfilename
, "r");
1122 { printf("pan: cannot find %s\n", trailfilename
);
1129 candidate_files
= 0;
1131 strcpy(MyFile
, TrailFile
);
1132 do { /* see if there's more than one possible trailfile */
1134 { sprintf(fnm
, "%s%d.%s",
1135 MyFile
, whichtrail
, tprefix
);
1136 fd
= fopen(fnm
, "r");
1138 { candidate_files
++;
1140 printf("trail%d: %s\n",
1141 candidate_files
, fnm
);
1144 if ((q
= strchr(MyFile
, '.')) != NULL
)
1146 sprintf(fnm
, "%s%d.%s",
1147 MyFile
, whichtrail
, tprefix
);
1149 fd
= fopen(fnm
, "r");
1151 { candidate_files
++;
1153 printf("trail%d: %s\n",
1154 candidate_files
, fnm
);
1158 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1159 fd
= fopen(fnm
, "r");
1161 { candidate_files
++;
1163 printf("trail%d: %s\n",
1164 candidate_files
, fnm
);
1167 if ((q
= strchr(MyFile
, '.')) != NULL
)
1169 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1171 fd
= fopen(fnm
, "r");
1173 { candidate_files
++;
1175 printf("trail%d: %s\n",
1176 candidate_files
, fnm
);
1180 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1181 } while (try_core
<= NCORE
);
1183 if (candidate_files
!= 1)
1184 { if (verbose
!= 100)
1185 { printf("error: there are %d trail files:\n",
1190 { printf("pan: rm or mv all except one\n");
1194 strcpy(MyFile
, TrailFile
); /* restore */
1198 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, tprefix
);
1199 fd
= fopen(fnm
, "r");
1200 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1202 sprintf(fnm
, "%s%d.%s",
1203 MyFile
, whichtrail
, tprefix
);
1205 fd
= fopen(fnm
, "r");
1208 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1209 fd
= fopen(fnm
, "r");
1210 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1212 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1214 fd
= fopen(fnm
, "r");
1217 { if (try_core
< NCORE
)
1218 { tprefix
= MySuffix
;
1219 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1222 printf("pan: cannot find trailfile %s\n", fnm
);
1226 #if NCORE>1 && defined(SEP_STATE)
1227 { void set_root(void); /* for partial traces from local root */
1234 uchar
do_transit(Trans
*, short);
1240 int i
, t_id
, lastnever
=-1; short II
;
1244 fd
= findtrail(); /* exits if unsuccessful */
1245 while (fscanf(fd
, "%ld:%d:%d\n", &depth
, &i
, &t_id
) == 3)
1247 printf("<<<<<START OF CYCLE>>>>>\n");
1251 { printf("pan: Error, proc %d invalid pid ", i
);
1252 printf("transition %d\n", t_id
);
1257 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1258 if (t
->t_id
== (T_ID
) t_id
)
1261 { for (i
= 0; i
< NrStates
[z
->_t
]; i
++)
1262 { t
= trans
[z
->_t
][i
];
1263 if (t
&& t
->t_id
== (T_ID
) t_id
)
1264 { printf("\tRecovered at state %d\n", i
);
1268 printf("pan: Error, proc %d type %d state %d: ",
1270 printf("transition %d not found\n", t_id
);
1271 printf("pan: list of possible transitions in this process:\n");
1272 if (z
->_t
>= 0 && z
->_t
<= _NP_
)
1273 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1274 printf(" t_id %d -- case %d, [%s]\n",
1275 t
->t_id
, t
->forw
, t
->tp
);
1276 break; /* pan_exit(1); */
1279 q
= transmognify(t
->tp
);
1280 if (gui
) simvals
[0] = '\0';
1283 if (!do_transit(t
, II
))
1284 { if (onlyproc
>= 0 && II
!= onlyproc
)
1286 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1287 printf(" most likely causes: missing c_track statements\n");
1288 printf(" or illegal side-effects in c_expr statements\n");
1290 if (onlyproc
>= 0 && II
!= onlyproc
)
1293 { printf("%3ld: proc %2d (%s) ", depth
, II
, procname
[z
->_t
]);
1294 for (i
= 0; src_all
[i
].src
; i
++)
1295 if (src_all
[i
].tp
== (int) z
->_t
)
1296 { printf(" line %3d \"%s\" ",
1297 src_all
[i
].src
[z
->_p
], PanSource
);
1300 printf("(state %d) trans {%d,%d} [%s]\n",
1301 z
->_p
, t_id
, t
->forw
, q
?q
:"");
1303 for (i
= 0; i
< now
._nr_pr
; i
++)
1304 { c_locals(i
, ((P0
*)pptr(i
))->_t
);
1307 if (strcmp(procname
[z
->_t
], ":never:") == 0)
1308 { if (lastnever
!= (int) z
->_p
)
1309 { for (i
= 0; src_all
[i
].src
; i
++)
1310 if (src_all
[i
].tp
== (int) z
->_t
)
1311 { printf("MSC: ~G %d\n",
1312 src_all
[i
].src
[z
->_p
]);
1315 if (!src_all
[i
].src
)
1316 printf("MSC: ~R %d\n", z
->_p
);
1321 if (strcmp(procname
[z
->_t
], ":np_:") != 0)
1323 sameas
: if (no_rck
) goto moveon
;
1325 { printf("%ld: ", depth
);
1326 for (i
= 0; i
< II
; i
++)
1328 printf("%s(%d):", procname
[z
->_t
], II
);
1329 printf("[%s]\n", q
?q
:"");
1331 { if (strlen(simvals
) > 0) {
1332 printf("%3ld: proc %2d (%s)",
1333 depth
, II
, procname
[z
->_t
]);
1334 for (i
= 0; src_all
[i
].src
; i
++)
1335 if (src_all
[i
].tp
== (int) z
->_t
)
1336 { printf(" line %3d \"%s\" ",
1337 src_all
[i
].src
[z
->_p
], PanSource
);
1340 printf("(state %d) [values: %s]\n", z
->_p
, simvals
);
1342 printf("%3ld: proc %2d (%s)",
1343 depth
, II
, procname
[z
->_t
]);
1344 for (i
= 0; src_all
[i
].src
; i
++)
1345 if (src_all
[i
].tp
== (int) z
->_t
)
1346 { printf(" line %3d \"%s\" ",
1347 src_all
[i
].src
[z
->_p
], PanSource
);
1350 printf("(state %d) [%s]\n", z
->_p
, q
?q
:"");
1353 moveon
: z
->_p
= t
->st
;
1362 for (i
= 0; i
< now
._nr_pr
; i
++)
1363 { z
= (P0
*)pptr(i
);
1364 if (z
->_t
== (unsigned) pt
)
1365 return BASE
+z
->_pid
;
1370 void check_claim(int);
1373 #if !defined(HASH64) && !defined(HASH32)
1376 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1379 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1382 #if defined(SFH) && !defined(NOCOMP)
1383 #define NOCOMP /* go for speed */
1385 #if NCORE>1 && !defined(GLOB_HEAP)
1386 #define SEP_HEAP /* version 5.1.2 */
1391 bstore_mod(char *v
, int n
) /* hasharray size not a power of two */
1392 { unsigned long x
, y
;
1395 d_hash((uchar
*) v
, n
); /* sets j3, j4, K1, K2 */
1398 { if (!(SS
[x
%udmem
]&(1<<y
))) break;
1401 printf("Old bitstate\n");
1410 if (rand()%100 > RANDSTOR
) return 0;
1413 { SS
[x
%udmem
] |= (1<<y
);
1414 if (i
== hfns
) break;
1420 printf("New bitstate\n");
1428 bstore_reg(char *v
, int n
) /* extended hashing, Peter Dillinger, 2004 */
1429 { unsigned long x
, y
;
1432 d_hash((uchar
*) v
, n
); /* sets j1-j4 */
1435 { if (!(SS
[x
]&(1<<y
))) break;
1438 printf("Old bitstate\n");
1442 x
= (x
+ j1
+ i
) & nmask
;
1447 if (rand()%100 > RANDSTOR
) return 0;
1451 if (i
== hfns
) break;
1452 x
= (x
+ j1
+ i
) & nmask
;
1457 printf("New bitstate\n");
1465 unsigned long TMODE
= 0666; /* file permission bits for trail files */
1468 char snap
[64], fnm
[512];
1475 int w_flags
= O_CREAT
|O_WRONLY
|O_TRUNC
;
1477 if (exclusive
== 1 && iterative
== 0)
1478 { w_flags
|= O_EXCL
;
1481 q
= strrchr(TrailFile
, '/');
1482 if (q
== NULL
) q
= TrailFile
; else q
++;
1483 strcpy(MyFile
, q
); /* TrailFile is not a writable string */
1485 if (iterative
== 0 && Nr_Trails
++ > 0)
1486 { sprintf(fnm
, "%s%d.%s",
1487 MyFile
, Nr_Trails
-1, tprefix
);
1491 sprintf(fnm
, "%s%d.%s", MyFile
, getpid(), tprefix
);
1493 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1496 if ((fd
= open(fnm
, w_flags
, TMODE
)) < 0)
1497 { if ((q
= strchr(MyFile
, '.')))
1499 if (iterative
== 0 && Nr_Trails
-1 > 0)
1500 sprintf(fnm
, "%s%d.%s",
1501 MyFile
, Nr_Trails
-1, tprefix
);
1503 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1505 fd
= open(fnm
, w_flags
, TMODE
);
1508 { printf("pan: cannot create %s\n", fnm
);
1512 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1513 void write_root(void);
1516 printf("pan: wrote %s\n", fnm
);
1523 #define FREQ (1000000)
1531 typedef struct SV_Hold
{
1534 struct SV_Hold
*nxt
;
1537 typedef struct EV_Hold
{
1545 struct EV_Hold
*nxt
;
1548 typedef struct BFS_Trail
{
1553 struct H_el
*lstate
;
1556 struct BFS_Trail
*nxt
;
1559 BFS_Trail
*bfs_trail
, *bfs_bot
, *bfs_free
;
1561 SV_Hold
*svhold
, *svfree
;
1565 #define BFS_LIMIT 100000
1567 #ifndef BFS_DSK_LIMIT
1568 #define BFS_DSK_LIMIT 1000000
1570 #if defined(WIN32) || defined(WIN64)
1571 #define RFLAGS (O_RDONLY|O_BINARY)
1572 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1574 #define RFLAGS (O_RDONLY)
1575 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1577 long bfs_size_limit
;
1578 int bfs_dsk_write
= -1;
1579 int bfs_dsk_read
= -1;
1580 long bfs_dsk_writes
, bfs_dsk_reads
;
1581 int bfs_dsk_seqno_w
, bfs_dsk_seqno_r
;
1584 uchar
do_reverse(Trans
*, short, uchar
);
1585 void snapshot(void);
1589 { SV_Hold
*h
= (SV_Hold
*) 0, *oh
;
1592 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1598 h
->nxt
= (SV_Hold
*) 0;
1602 { h
= (SV_Hold
*) 0;
1609 { h
= (SV_Hold
*) emalloc(sizeof(SV_Hold
));
1612 if (bfs_size_limit
>= BFS_LIMIT
)
1613 { h
->sv
= (State
*) 0; /* means: read disk */
1614 bfs_dsk_writes
++; /* count */
1615 if (bfs_dsk_write
< 0 /* file descriptor */
1616 || bfs_dsk_writes
%BFS_DSK_LIMIT
== 0)
1618 if (bfs_dsk_write
>= 0)
1619 { (void) close(bfs_dsk_write
);
1621 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_w
++);
1622 bfs_dsk_write
= open(dsk_nm
, WFLAGS
, 0644);
1623 if (bfs_dsk_write
< 0)
1624 { Uerror("could not create tmp disk file");
1626 printf("pan: created disk file %s\n", dsk_nm
);
1628 if (write(bfs_dsk_write
, (char *) &now
, n
) != n
)
1629 { Uerror("aborting -- disk write failed (disk full?)");
1631 return h
; /* no memcpy */
1635 h
->sv
= (State
*) emalloc(sizeof(State
) - VECTORSZ
+ n
);
1638 memcpy((char *)h
->sv
, (char *)&now
, n
);
1645 static EV_Hold
*kept
= (EV_Hold
*) 0;
1647 for (h
= kept
; h
; h
= h
->nxt
)
1649 && (memcmp((char *) Mask
, (char *) h
->sv
, n
) == 0)
1650 && (now
._nr_pr
== h
->nrpr
)
1651 && (now
._nr_qs
== h
->nrqs
)
1653 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(int)) == 0)
1654 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(int)) == 0)
1656 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(short)) == 0)
1657 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(short)) == 0)
1659 && (memcmp((char *) proc_skip
, (char *) h
->ps
, now
._nr_pr
* sizeof(uchar
)) == 0)
1660 && (memcmp((char *) q_skip
, (char *) h
->qs
, now
._nr_qs
* sizeof(uchar
)) == 0))
1663 { h
= (EV_Hold
*) emalloc(sizeof(EV_Hold
));
1665 h
->nrpr
= now
._nr_pr
;
1666 h
->nrqs
= now
._nr_qs
;
1668 h
->sv
= (char *) emalloc(n
* sizeof(char));
1669 memcpy((char *) h
->sv
, (char *) Mask
, n
);
1672 { h
->ps
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1673 memcpy((char *) h
->ps
, (char *) proc_skip
, now
._nr_pr
* sizeof(uchar
));
1675 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1676 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(int));
1678 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(short));
1679 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(short));
1683 { h
->qs
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1684 memcpy((char *) h
->qs
, (char *) q_skip
, now
._nr_qs
* sizeof(uchar
));
1686 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1687 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(int));
1689 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(short));
1690 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(short));
1705 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1724 bfs_free
= bfs_free
->nxt
;
1725 t
->nxt
= (BFS_Trail
*) 0;
1727 { t
= (BFS_Trail
*) emalloc(sizeof(BFS_Trail
));
1729 t
->frame
= (Trail
*) emalloc(sizeof(Trail
));
1734 push_bfs(Trail
*f
, int d
)
1737 t
= get_bfs_frame();
1738 memcpy((char *)t
->frame
, (char *)f
, sizeof(Trail
));
1739 t
->frame
->o_tt
= d
; /* depth */
1742 t
->onow
= getsv(vsize
);
1743 t
->omask
= getsv_mask(vsize
);
1744 #if defined(FULLSTACK) && defined(Q_PROVISO)
1748 { bfs_bot
= bfs_trail
= t
;
1754 printf("PUSH %u (%d)\n", t
->frame
, d
);
1768 bfs_bot
= (BFS_Trail
*) 0;
1769 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1770 if (t
->lstate
) t
->lstate
->tagged
= 0;
1776 vsize
= t
->onow
->sz
;
1779 if (t
->onow
->sv
== (State
*) 0)
1781 bfs_dsk_reads
++; /* count */
1782 if (bfs_dsk_read
>= 0 /* file descriptor */
1783 && bfs_dsk_reads
%BFS_DSK_LIMIT
== 0)
1784 { (void) close(bfs_dsk_read
);
1785 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
-1);
1786 (void) unlink(dsk_nm
);
1789 if (bfs_dsk_read
< 0)
1790 { sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
++);
1791 bfs_dsk_read
= open(dsk_nm
, RFLAGS
);
1792 if (bfs_dsk_read
< 0)
1793 { Uerror("could not open temp disk file");
1795 if (read(bfs_dsk_read
, (char *) &now
, vsize
) != vsize
)
1796 { Uerror("bad bfs disk file read");
1799 if (now
._vsz
!= vsize
)
1800 { Uerror("disk read vsz mismatch");
1805 memcpy((uchar
*) &now
, (uchar
*) t
->onow
->sv
, vsize
);
1806 memcpy((uchar
*) Mask
, (uchar
*) t
->omask
->sv
, vsize
);
1809 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(int));
1811 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(short));
1813 memcpy((char *)proc_skip
, (char *)t
->omask
->ps
, now
._nr_pr
* sizeof(uchar
));
1817 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(int));
1819 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(short));
1821 memcpy((uchar
*)q_skip
, (uchar
*)t
->omask
->qs
, now
._nr_qs
* sizeof(uchar
));
1824 if (t
->onow
->sv
!= (State
*) 0)
1826 freesv(t
->onow
); /* omask not freed */
1828 printf("POP %u (%d)\n", t
->frame
, t
->frame
->o_tt
);
1834 store_state(Trail
*ntrpt
, int shortcut
, short oboq
)
1837 Trans
*t2
= (Trans
*) 0;
1838 uchar ot
; int tt
, E_state
;
1839 uchar o_opm
= trpt
->o_pm
, *othis
= this;
1844 printf("claim: shortcut\n");
1846 goto store_it
; /* no claim move */
1849 this = (((uchar
*)&now
)+proc_offset
[0]); /* 0 = never claim */
1852 tt
= (int) ((P0
*)this)->_p
;
1853 ot
= (uchar
) ((P0
*)this)->_t
;
1858 for (t2
= trans
[ot
][tt
]; t2
; t2
= t2
?t2
->nxt
:(Trans
*)0)
1862 && E_state
!= t2
->e_trans
)
1865 if (do_transit(t2
, 0))
1868 if (!reached
[ot
][t2
->st
])
1869 printf("depth: %d -- claim move from %d -> %d\n",
1870 trpt
->o_tt
, ((P0
*)this)->_p
, t2
->st
);
1873 E_state
= t2
->e_trans
;
1876 { ((P0
*)this)->_p
= t2
->st
;
1877 reached
[ot
][t2
->st
] = 1;
1879 check_claim(t2
->st
);
1882 if (now
._nr_pr
== 0) /* claim terminated */
1883 uerror("end state in claim reached");
1890 Uerror("atomic in claim not supported in BFS mode");
1896 if (!bstore((char *)&now
, vsize
))
1899 if (!gstore((char *)&now
, vsize
, 0))
1901 if (!hstore((char *)&now
, vsize
))
1904 { static long sdone
= (long) 0; long ndone
;
1909 ndone
= (unsigned long) (nstates
/((double) FREQ
));
1910 if (ndone
!= sdone
&& mreached
%10 != 0)
1913 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1914 if (nstates
> ((double)(1<<(ssize
+1))))
1915 { void resize_hashtable(void);
1923 else if (oboq
!= -1)
1925 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
1926 if (x
) x
->o_pm
|= 4; /* mark success */
1929 push_bfs(ntrpt
, trpt
->o_tt
+1);
1932 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1933 #if !defined(BITSTATE)
1934 if (Lstate
&& Lstate
->tagged
) trpt
->tau
|= 64;
1938 for (tprov
= bfs_trail
; tprov
; tprov
= tprov
->nxt
)
1939 if (tprov
->onow
->sv
!= (State
*) 0
1940 && memcmp((uchar
*)&now
, (uchar
*)tprov
->onow
->sv
, vsize
) == 0)
1942 break; /* state is in queue */
1948 ((P0
*)this)->_p
= tt
; /* reset claim */
1950 do_reverse(t2
, 0, 0);
1963 { Trans
*t
; Trail
*otrpt
, *x
;
1964 uchar _n
, _m
, ot
, nps
= 0;
1966 short II
, From
= (short) (now
._nr_pr
-1), To
= BASE
;
1969 ntrpt
= (Trail
*) emalloc(sizeof(Trail
));
1970 trpt
->ostate
= (struct H_el
*) 0;
1974 store_state(ntrpt
, 0, oboq
); /* initial state */
1976 while ((otrpt
= pop_bfs())) /* also restores now */
1977 { memcpy((char *) trpt
, (char *) otrpt
, sizeof(Trail
));
1978 #if defined(C_States) && (HAS_TRACK==1)
1979 c_revert((uchar
*) &(now
.c_state
[0]));
1984 printf("Revisit of atomic not needed (%d)\n",
1992 if (trpt
->o_pm
== 8)
1997 printf("Break atomic (pm:%d,tau:%d)\n",
1998 trpt
->o_pm
, trpt
->tau
);
2003 else if (trpt
->tau
&32)
2006 printf("Void preselection (pm:%d,tau:%d)\n",
2007 trpt
->o_pm
, trpt
->tau
);
2010 nps
= 1; /* no preselection in repeat */
2014 trpt
->o_pm
&= ~(4|8);
2015 if (trpt
->o_tt
> mreached
)
2016 { mreached
= trpt
->o_tt
;
2017 if (mreached
%10 == 0)
2021 if (depth
>= maxdepth
)
2026 { x
= (Trail
*) trpt
->ostate
;
2027 if (x
) x
->o_pm
|= 4; /* not failing */
2033 printf("error: max search depth too small\n");
2036 uerror("depth limit reached");
2040 if (boq
== -1 && !(trpt
->tau
&8) && nps
== 0)
2041 for (II
= now
._nr_pr
-1; II
>= BASE
; II
-= 1)
2043 Pickup
: this = pptr(II
);
2044 tt
= (int) ((P0
*)this)->_p
;
2045 ot
= (uchar
) ((P0
*)this)->_t
;
2046 if (trans
[ot
][tt
]->atom
& 8)
2047 { t
= trans
[ot
][tt
];
2055 trpt
->tau
|= 32; /* preselect marker */
2057 printf("%3d: proc %d PreSelected (tau=%d)\n",
2058 depth
, II
, trpt
->tau
);
2065 if (trpt
->tau
&8) /* atomic */
2066 { From
= To
= (short ) trpt
->pr
;
2069 { From
= now
._nr_pr
-1;
2074 for (II
= From
; II
>= To
; II
-= 1)
2076 this = (((uchar
*)&now
)+proc_offset
[II
]);
2077 tt
= (int) ((P0
*)this)->_p
;
2078 ot
= (uchar
) ((P0
*)this)->_t
;
2080 /* no rendezvous with same proc */
2081 if (boq
!= -1 && trpt
->pr
== II
) continue;
2083 ntrpt
->pr
= (uchar
) II
;
2085 trpt
->o_pm
&= ~1; /* no move yet */
2087 trpt
->o_event
= now
._event
;
2090 if (!provided(II
, ot
, tt
, t
)) continue;
2095 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
2099 && E_state
!= t
->e_trans
)
2106 if (!(_m
= do_transit(t
, II
)))
2109 trpt
->o_pm
|= 1; /* we moved */
2110 (trpt
+1)->o_m
= _m
; /* for unsend */
2115 printf("%3d: proc %d exec %d, ",
2116 depth
, II
, t
->forw
);
2117 printf("%d to %d, %s %s %s",
2119 (t
->atom
&2)?"atomic":"",
2120 (boq
!= -1)?"rendez-vous":"");
2123 printf(" (escapes to state %d)", t
->st
);
2125 printf(" %saccepting [tau=%d]\n",
2126 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
2129 E_state
= t
->e_trans
;
2131 if (t
->e_trans
> 0 && (boq
!= -1 /* || oboq != -1 */))
2132 { fprintf(efd
, "error: the use of rendezvous stmnt in the escape clause\n");
2133 fprintf(efd
, " of an unless stmnt is not compatible with -DBFS\n");
2138 if (t
->st
> 0) ((P0
*)this)->_p
= t
->st
;
2140 /* ptr to pred: */ ntrpt
->ostate
= (struct H_el
*) otrpt
;
2142 if (boq
== -1 && (t
->atom
&2)) /* atomic */
2143 ntrpt
->tau
= 8; /* record for next move */
2147 store_state(ntrpt
, (boq
!= -1 || (t
->atom
&2)), oboq
);
2149 now
._event
= trpt
->o_event
;
2152 /* undo move and continue */
2153 trpt
++; /* this is where ovals and ipt are set */
2154 do_reverse(t
, II
, _m
); /* restore now. */
2158 enter_critical(GLOBAL_LOCK
); /* in verbose mode only */
2159 printf("cpu%d: ", core_id
);
2161 printf("%3d: proc %d ", depth
, II
);
2162 printf("reverses %d, %d to %d,",
2163 t
->forw
, tt
, t
->st
);
2164 printf(" %s [abit=%d,adepth=%d,",
2165 t
->tp
, now
._a_t
, A_depth
);
2166 printf("tau=%d,%d]\n",
2167 trpt
->tau
, (trpt
-1)->tau
);
2169 leave_critical(GLOBAL_LOCK
);
2172 reached
[ot
][t
->st
] = 1;
2173 reached
[ot
][tt
] = 1;
2175 ((P0
*)this)->_p
= tt
;
2179 /* preselected - no succ definitely outside stack */
2180 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
2181 { From
= now
._nr_pr
-1; To
= BASE
;
2183 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2184 depth
, II
+1, (int) _n
, trpt
->tau
);
2186 _n
= 0; trpt
->tau
&= ~32;
2191 trpt
->tau
&= ~(32|64);
2196 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2197 depth
, II
, trpt
->tau
, boq
, now
._nr_pr
);
2201 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
2202 if (!x
) continue; /* root state */
2203 if ((x
->tau
&8) || (x
->tau
&32)) /* break atomic or preselect at parent */
2204 { x
->o_pm
|= 8; /* mark failure */
2205 this = (((uchar
*)&now
)+proc_offset
[otrpt
->pr
]);
2207 printf("\treset state of %d from %d to %d\n",
2208 otrpt
->pr
, ((P0
*)this)->_p
, otrpt
->st
);
2210 ((P0
*)this)->_p
= otrpt
->st
;
2211 unsend(boq
); /* retract rv offer */
2213 push_bfs(x
, x
->o_tt
);
2215 printf("failed rv, repush with %d\n", x
->o_pm
);
2219 else printf("failed rv, tau at parent: %d\n", x
->tau
);
2221 } else if (now
._nr_pr
> 0)
2223 if ((trpt
->tau
&8)) /* atomic */
2224 { trpt
->tau
&= ~(1|8); /* 1=timeout, 8=atomic */
2226 printf("%3d: atomic step proc %d blocks\n",
2232 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
2235 printf("%d: timeout\n", depth
);
2240 if (!noends
&& !a_cycles
&& !endstate())
2241 uerror("invalid end state");
2247 putter(Trail
*trpt
, int fd
)
2252 if (trpt
!= (Trail
*) trpt
->ostate
)
2253 putter((Trail
*) trpt
->ostate
, fd
);
2256 { sprintf(snap
, "%d:%d:%d\n",
2257 trcnt
++, trpt
->pr
, trpt
->o_t
->t_id
);
2259 if (write(fd
, snap
, j
) != j
)
2260 { printf("pan: error writing %s\n", fnm
);
2267 { int fd
= make_trail();
2272 sprintf(snap
, "-2:%d:-2\n", VERI
);
2273 write(fd
, snap
, strlen(snap
));
2276 sprintf(snap
, "-4:-4:-4\n");
2277 write(fd
, snap
, strlen(snap
));
2282 { sprintf(snap
, "%d:%d:%d\n",
2283 trcnt
++, ntrpt
->pr
, ntrpt
->o_t
->t_id
);
2285 if (write(fd
, snap
, j
) != j
)
2286 { printf("pan: error writing %s\n", fnm
);
2290 if (errors
>= upto
&& upto
!= 0)
2296 #if defined(WIN32) || defined(WIN64)
2303 #include <windows.h>
2306 #define long long long
2309 #include <sys/ipc.h>
2310 #include <sys/sem.h>
2311 #include <sys/shm.h>
2314 /* code common to cygwin/linux and win32/win64: */
2317 #define VVERBOSE (1)
2319 #define VVERBOSE (0)
2322 /* the following values must be larger than 256 and must fit in an int */
2323 #define QUIT 1024 /* terminate now command */
2324 #define QUERY 512 /* termination status query message */
2325 #define QUERY_F 513 /* query failed, cannot quit */
2327 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2328 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2331 #define VMAX VECTORSZ
2347 /* no longer usefule -- being recomputed for local heap size anyway */
2348 double SEG_SIZE
= (((double) SET_SEG_SIZE
) * 1048576.);
2350 double SEG_SIZE
= (1048576.*1024.); /* 1GB default shared memory pool segments */
2353 double LWQ_SIZE
= 0.; /* initialized in main */
2357 #warning SET_WQ_SIZE applies to global queue -- ignored
2358 double GWQ_SIZE
= 0.;
2360 double GWQ_SIZE
= (((double) SET_WQ_SIZE
) * 1048576.);
2361 /* must match the value in pan_proxy.c, if used */
2365 double GWQ_SIZE
= 0.;
2367 double GWQ_SIZE
= (128.*1048576.); /* 128 MB default queue sizes */
2371 /* Crash Detection Parameters */
2373 #define ONESECOND (1<<25)
2376 #define SHORT_T (0.1)
2379 #define LONG_T (600)
2382 double OneSecond
= (double) (ONESECOND
); /* waiting for a free slot -- checks crash */
2383 double TenSeconds
= 10. * (ONESECOND
); /* waiting for a lock -- check for a crash */
2385 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2386 double Delay
= ((double) SHORT_T
) * (ONESECOND
); /* termination detection trigger */
2387 double OneHour
= ((double) LONG_T
) * (ONESECOND
); /* timeout termination detection */
2389 typedef struct SM_frame SM_frame
;
2390 typedef struct SM_results SM_results
;
2391 typedef struct sh_Allocater sh_Allocater
;
2393 struct SM_frame
{ /* about 6K per slot */
2394 volatile int m_vsize
; /* 0 means free slot */
2395 volatile int m_boq
; /* >500 is a control message */
2397 volatile struct Stack_Tree
*m_stack
; /* ptr to previous state */
2399 volatile uchar m_tau
;
2400 volatile uchar m_o_pm
;
2401 volatile int nr_handoffs
; /* to compute real_depth */
2402 volatile char m_now
[VMAX
];
2403 volatile char m_Mask
[(VMAX
+ 7)/8];
2404 volatile OFFT m_p_offset
[PMAX
];
2405 volatile OFFT m_q_offset
[QMAX
];
2406 volatile uchar m_p_skip
[PMAX
];
2407 volatile uchar m_q_skip
[QMAX
];
2408 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2409 volatile uchar m_c_stack
[StackSize
];
2413 int proxy_pid
; /* id of proxy if nonzero -- receive half */
2414 int store_proxy_pid
;
2416 int proxy_pid_snd
; /* id of proxy if nonzero -- send half */
2417 char o_cmdline
[512]; /* to pass options to children */
2419 int iamin
[CS_NR
+NCORE
]; /* non-shared */
2421 #if defined(WIN32) || defined(WIN64)
2422 int tas(volatile LONG
*);
2424 HANDLE proxy_handle_snd
; /* for Windows Create and Terminate */
2426 struct sh_Allocater
{ /* shared memory for states */
2427 volatile char *dc_arena
; /* to allocate states from */
2428 volatile long pattern
; /* to detect overruns */
2429 volatile long dc_size
; /* nr of bytes left */
2430 volatile void *dc_start
; /* where memory segment starts */
2431 volatile void *dc_id
; /* to attach, detach, remove shared memory segments */
2432 volatile sh_Allocater
*nxt
; /* linked list of pools */
2434 DWORD worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2435 HANDLE worker_handles
[NCORE
]; /* for windows Create and Terminate */
2436 void * shmid
[NR_QS
]; /* return value from CreateFileMapping */
2437 void * shmid_M
; /* shared mem for state allocation in hashtable */
2442 void *shmid_S
; /* shared bitstate arena or hashtable */
2445 int tas(volatile int *);
2447 struct sh_Allocater
{ /* shared memory for states */
2448 volatile char *dc_arena
; /* to allocate states from */
2449 volatile long pattern
; /* to detect overruns */
2450 volatile long dc_size
; /* nr of bytes left */
2451 volatile char *dc_start
; /* where memory segment starts */
2452 volatile int dc_id
; /* to attach, detach, remove shared memory segments */
2453 volatile sh_Allocater
*nxt
; /* linked list of pools */
2456 int worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2457 int shmid
[NR_QS
]; /* return value from shmget */
2458 int nibis
= 0; /* set after shared mem has been released */
2459 int shmid_M
; /* shared mem for state allocation in hashtable */
2463 int shmid_S
; /* shared bitstate arena or hashtable */
2464 volatile sh_Allocater
*first_pool
; /* of shared state memory */
2465 volatile sh_Allocater
*last_pool
;
2469 struct SM_results
{ /* for shuttling back final stats */
2470 volatile int m_vsize
; /* avoid conflicts with frames */
2471 volatile int m_boq
; /* these 2 fields are not written in record_info */
2472 /* probably not all fields really need to be volatile */
2473 volatile double m_memcnt
;
2474 volatile double m_nstates
;
2475 volatile double m_truncs
;
2476 volatile double m_truncs2
;
2477 volatile double m_nShadow
;
2478 volatile double m_nlinks
;
2479 volatile double m_ngrabs
;
2480 volatile double m_nlost
;
2481 volatile double m_hcmp
;
2482 volatile double m_frame_wait
;
2483 volatile int m_hmax
;
2484 volatile int m_svmax
;
2485 volatile int m_smax
;
2486 volatile int m_mreached
;
2487 volatile int m_errors
;
2488 volatile int m_VMAX
;
2489 volatile short m_PMAX
;
2490 volatile short m_QMAX
;
2491 volatile uchar m_R
; /* reached info for all proctypes */
2494 int core_id
= 0; /* internal process nr, to know which q to use */
2495 unsigned long nstates_put
= 0; /* statistics */
2496 unsigned long nstates_get
= 0;
2497 int query_in_progress
= 0; /* termination detection */
2499 double free_wait
= 0.; /* waiting for a free frame */
2500 double frame_wait
= 0.; /* waiting for a full frame */
2501 double lock_wait
= 0.; /* waiting for access to cs */
2502 double glock_wait
[3]; /* waiting for access to global lock */
2504 char *sprefix
= "rst";
2505 uchar was_interrupted
, issued_kill
, writing_trail
;
2507 static SM_frame cur_Root
; /* current root, to be safe with error trails */
2509 SM_frame
*m_workq
[NR_QS
]; /* per cpu work queues + global q */
2510 char *shared_mem
[NR_QS
]; /* return value from shmat */
2515 volatile sh_Allocater
*dc_shared
; /* assigned at initialization */
2517 static int vmax_seen
, pmax_seen
, qmax_seen
;
2518 static double gq_tries
, gq_hasroom
, gq_hasnoroom
;
2520 volatile int *prfree
;
2521 volatile int *prfull
;
2522 volatile int *prcnt
;
2523 volatile int *prmax
;
2525 volatile int *sh_lock
; /* mutual exclusion locks - in shared memory */
2526 volatile double *is_alive
; /* to detect when processes crash */
2527 volatile int *grfree
, *grfull
, *grcnt
, *grmax
; /* access to shared global q */
2528 volatile double *gr_readmiss
, *gr_writemiss
;
2529 static int lrfree
; /* used for temporary recording of slot */
2530 static int dfs_phase2
;
2532 void mem_put(int); /* handoff state to other cpu */
2533 void mem_put_acc(void); /* liveness mode */
2534 void mem_get(void); /* get state from work queue */
2535 void sudden_stop(char *);
2537 void enter_critical(int);
2538 void leave_critical(int);
2542 record_info(SM_results
*r
)
2548 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2549 nstates
, nShadow
, memcnt
/(1048576.));
2554 r
->m_memcnt
= 0; /* it's shared */
2556 r
->m_memcnt
= memcnt
;
2558 if (a_cycles
&& core_id
== 1)
2559 { r
->m_nstates
= nstates
;
2560 r
->m_nShadow
= nstates
;
2562 { r
->m_nstates
= nstates
;
2563 r
->m_nShadow
= nShadow
;
2565 r
->m_truncs
= truncs
;
2566 r
->m_truncs2
= truncs2
;
2567 r
->m_nlinks
= nlinks
;
2568 r
->m_ngrabs
= ngrabs
;
2571 r
->m_frame_wait
= frame_wait
;
2575 r
->m_mreached
= mreached
;
2576 r
->m_errors
= errors
;
2577 r
->m_VMAX
= vmax_seen
;
2578 r
->m_PMAX
= (short) pmax_seen
;
2579 r
->m_QMAX
= (short) qmax_seen
;
2580 ptr
= (uchar
*) &(r
->m_R
);
2581 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2582 { memcpy(ptr
, reached
[i
], NrStates
[i
]*sizeof(uchar
));
2583 ptr
+= NrStates
[i
]*sizeof(uchar
);
2586 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates
, ptr
- &(r
->m_R
));
2590 void snapshot(void);
2593 retrieve_info(SM_results
*r
)
2595 volatile uchar
*ptr
;
2597 snapshot(); /* for a final report */
2599 enter_critical(GLOBAL_LOCK
);
2602 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2603 core_id
, (int) (my_size
/1024), (int) (my_size
/1048576));
2606 if (verbose
&& core_id
== 0)
2608 for (i
= 0; i
< NCORE
; i
++)
2609 { printf("%d ", prmax
[i
]);
2612 printf("G: %d", *grmax
);
2616 leave_critical(GLOBAL_LOCK
);
2618 memcnt
+= r
->m_memcnt
;
2619 nstates
+= r
->m_nstates
;
2620 nShadow
+= r
->m_nShadow
;
2621 truncs
+= r
->m_truncs
;
2622 truncs2
+= r
->m_truncs2
;
2623 nlinks
+= r
->m_nlinks
;
2624 ngrabs
+= r
->m_ngrabs
;
2625 nlost
+= r
->m_nlost
;
2627 /* frame_wait += r->m_frame_wait; */
2628 errors
+= r
->m_errors
;
2630 if (hmax
< r
->m_hmax
) hmax
= r
->m_hmax
;
2631 if (svmax
< r
->m_svmax
) svmax
= r
->m_svmax
;
2632 if (smax
< r
->m_smax
) smax
= r
->m_smax
;
2633 if (mreached
< r
->m_mreached
) mreached
= r
->m_mreached
;
2635 if (vmax_seen
< r
->m_VMAX
) vmax_seen
= r
->m_VMAX
;
2636 if (pmax_seen
< (int) r
->m_PMAX
) pmax_seen
= (int) r
->m_PMAX
;
2637 if (qmax_seen
< (int) r
->m_QMAX
) qmax_seen
= (int) r
->m_QMAX
;
2640 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2641 { for (j
= 0; j
< NrStates
[i
]; j
++)
2642 { if (*(ptr
+ j
) != 0)
2643 { reached
[i
][j
] = 1;
2645 ptr
+= NrStates
[i
]*sizeof(uchar
);
2648 { cpu_printf("Got Results (%d)\n", ptr
- &(r
->m_R
));
2653 #if !defined(WIN32) && !defined(WIN64)
2655 rm_shared_segments(void)
2657 volatile sh_Allocater
*nxt_pool
;
2659 * mark all shared memory segments for removal
2660 * the actual removes wont happen intil last process dies or detaches
2661 * the shmctl calls can return -1 if not all procs have detached yet
2663 for (m
= 0; m
< NR_QS
; m
++) /* +1 for global q */
2664 { if (shmid
[m
] != -1)
2665 { (void) shmctl(shmid
[m
], IPC_RMID
, NULL
);
2669 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2673 { (void) shmctl(shmid_S
, IPC_RMID
, NULL
);
2675 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
2676 { shmid_M
= (int) (last_pool
->dc_id
);
2677 nxt_pool
= last_pool
->nxt
; /* as a pre-caution only */
2679 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2686 sudden_stop(char *s
)
2690 printf("cpu%d: stop - %s\n", core_id
, s
);
2691 #if !defined(WIN32) && !defined(WIN64)
2693 { rm_shared_segments();
2696 if (search_terminated
!= NULL
)
2697 { if (*search_terminated
!= 0)
2699 { printf("cpu%d: termination initiated (%d)\n",
2700 core_id
, *search_terminated
);
2704 { printf("cpu%d: initiated termination\n", core_id
);
2706 *search_terminated
|= 8; /* sudden_stop */
2709 { if (((*search_terminated
) & 4) /* uerror in one of the cpus */
2710 && !((*search_terminated
) & (8|32|128|256))) /* abnormal stop */
2711 { if (errors
== 0) errors
++; /* we know there is at least 1 */
2713 wrapup(); /* incomplete stats, but at least something */
2716 } /* else: should rarely happen, take more drastic measures */
2718 if (core_id
== 0) /* local root process */
2719 { for (i
= 1; i
< NCORE
; i
++) /* not for 0 of course */
2721 #if defined(WIN32) || defined(WIN64)
2722 DWORD dwExitCode
= 0;
2723 GetExitCodeProcess(worker_handles
[i
], &dwExitCode
);
2724 if (dwExitCode
== STILL_ACTIVE
)
2725 { TerminateProcess(worker_handles
[i
], 0);
2727 printf("cpu0: terminate %d %d\n",
2728 worker_pids
[i
], (dwExitCode
== STILL_ACTIVE
));
2730 sprintf(b
, "kill -%d %d", SIGKILL
, worker_pids
[i
]);
2731 system(b
); /* if this is a proxy: receive half */
2732 printf("cpu0: %s\n", b
);
2737 { /* on WIN32/WIN64 -- these merely kills the root process... */
2738 if (was_interrupted
== 0)
2739 { sprintf(b
, "kill -%d %d", SIGINT
, worker_pids
[0]);
2740 system(b
); /* warn the root process */
2741 printf("cpu%d: %s\n", core_id
, b
);
2746 #define iam_alive() is_alive[core_id]++
2748 extern int crash_test(double);
2749 extern void crash_reset(void);
2752 someone_crashed(int wait_type
)
2753 { static double last_value
= 0.0;
2754 static int count
= 0;
2756 if (search_terminated
== NULL
2757 || *search_terminated
!= 0)
2759 if (!(*search_terminated
& (8|32|128|256)))
2760 { if (count
++ < 100*NCORE
)
2765 /* check left neighbor only */
2766 if (last_value
== is_alive
[(core_id
+ NCORE
- 1) % NCORE
])
2767 { if (count
++ >= 100) /* to avoid unnecessary checks */
2772 last_value
= is_alive
[(core_id
+ NCORE
- 1) % NCORE
];
2781 enter_critical(GLOBAL_LOCK
);
2785 printf("cpu%d: locks: global %g\tother %g\t",
2786 core_id
, glock_wait
[0], lock_wait
- glock_wait
[0]);
2788 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2789 core_id
, glock_wait
[0], glock_wait
[1], glock_wait
[2],
2790 lock_wait
- glock_wait
[0] - glock_wait
[1] - glock_wait
[2]);
2792 printf("waits: states %g slots %g\n", frame_wait
, free_wait
);
2794 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id
, gq_tries
, gq_hasroom
, gq_hasnoroom
);
2795 if (core_id
== 0 && (*gr_readmiss
>= 1.0 || *gr_readmiss
>= 1.0 || *grcnt
!= 0))
2796 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss
, *gr_writemiss
, *grcnt
);
2799 if (free_wait
> 1000000.)
2802 { printf("hint: this search may be faster with a larger work-queue\n");
2803 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2804 GWQ_SIZE
/sizeof(SM_frame
));
2805 printf(" or with a larger value for -zN (N>%d)\n", z_handoff
);
2807 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2808 printf("or with a larger -zN (N>%d)\n", z_handoff
);
2811 leave_critical(GLOBAL_LOCK
);
2814 #ifndef MAX_DSK_FILE
2815 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2819 multi_usage(FILE *fd
)
2820 { static int warned
= 0;
2821 if (warned
> 0) { return; } else { warned
++; }
2823 fprintf(fd
, "Defining multi-core mode:\n\n");
2824 fprintf(fd
, " -DDUAL_CORE --> same as -DNCORE=2\n");
2825 fprintf(fd
, " -DQUAD_CORE --> same as -DNCORE=4\n");
2826 fprintf(fd
, " -DNCORE=N --> enables multi_core verification if N>1\n");
2828 fprintf(fd
, "Additional directives supported in multi-core mode:\n\n");
2829 fprintf(fd
, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2830 fprintf(fd
, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2831 fprintf(fd
, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE
);
2832 fprintf(fd
, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2834 fprintf(fd
, "More advanced use (should rarely need changing):\n\n");
2835 fprintf(fd
, " To change the nr of states that can be stored in the global queue\n");
2836 fprintf(fd
, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2837 fprintf(fd
, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX
);
2838 fprintf(fd
, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX
);
2839 fprintf(fd
, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX
);
2841 fprintf(fd
, " To set the total amount of memory reserved for the global workqueue:\n");
2842 fprintf(fd
, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2843 fprintf(fd
, " To force the use of a single global heap, instead of separate heaps:\n");
2844 fprintf(fd
, " -DGLOB_HEAP\n");
2846 fprintf(fd
, " To define a fct to initialize data before spawning processes (use quotes):\n");
2847 fprintf(fd
, " \"-DC_INIT=fct()\"\n");
2849 fprintf(fd
, " Timer settings for termination and crash detection:\n");
2850 fprintf(fd
, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T
);
2851 fprintf(fd
, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T
);
2852 fprintf(fd
, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2853 fprintf(fd
, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2854 fprintf(fd
, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2855 fprintf(fd
, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2858 #if NCORE>1 && defined(FULL_TRAIL)
2859 typedef struct Stack_Tree
{
2860 uchar pr
; /* process that made transition */
2861 T_ID t_id
; /* id of transition */
2862 volatile struct Stack_Tree
*prv
; /* backward link towards root */
2865 struct H_el
*grab_shared(int);
2866 volatile Stack_Tree
**stack_last
; /* in shared memory */
2867 char *stack_cache
= NULL
; /* local */
2868 int nr_cached
= 0; /* local */
2871 #define CACHE_NR 1024
2874 volatile Stack_Tree
*
2875 stack_prefetch(void)
2876 { volatile Stack_Tree
*st
;
2879 { stack_cache
= (char *) grab_shared(CACHE_NR
* sizeof(Stack_Tree
));
2880 nr_cached
= CACHE_NR
;
2882 st
= (volatile Stack_Tree
*) stack_cache
;
2883 stack_cache
+= sizeof(Stack_Tree
);
2889 Push_Stack_Tree(short II
, T_ID t_id
)
2890 { volatile Stack_Tree
*st
;
2892 st
= (volatile Stack_Tree
*) stack_prefetch();
2895 st
->prv
= (Stack_Tree
*) stack_last
[core_id
];
2896 stack_last
[core_id
] = st
;
2900 Pop_Stack_Tree(void)
2901 { volatile Stack_Tree
*cf
= stack_last
[core_id
];
2904 { stack_last
[core_id
] = cf
->prv
;
2905 } else if (nr_handoffs
* z_handoff
+ depth
> 0)
2906 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2913 e_critical(int which
)
2916 if (readtrail
|| iamin
[which
] > 0)
2917 { if (!readtrail
&& verbose
)
2918 { printf("cpu%d: Double Lock on %d (now %d)\n",
2919 core_id
, which
, iamin
[which
]+1);
2922 iamin
[which
]++; /* local variable */
2926 cnt_start
= lock_wait
;
2928 while (sh_lock
!= NULL
) /* as long as we have shared memory */
2929 { int r
= tas(&sh_lock
[which
]);
2932 return; /* locked */
2937 if (which
< 3) { glock_wait
[which
]++; }
2939 if (which
== 0) { glock_wait
[which
]++; }
2943 if (lock_wait
- cnt_start
> TenSeconds
)
2944 { printf("cpu%d: lock timeout on %d\n", core_id
, which
);
2945 cnt_start
= lock_wait
;
2946 if (someone_crashed(1))
2947 { sudden_stop("lock timeout");
2953 x_critical(int which
)
2955 if (iamin
[which
] != 1)
2956 { if (iamin
[which
] > 1)
2957 { iamin
[which
]--; /* this is thread-local - no races on this one */
2958 if (!readtrail
&& verbose
)
2959 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2960 core_id
, which
, iamin
[which
]);
2964 } else /* iamin[which] <= 0 */
2966 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2967 core_id
, which
, iamin
[which
]);
2973 if (sh_lock
!= NULL
)
2975 sh_lock
[which
] = 0; /* unlock */
2980 #if defined(WIN32) || defined(WIN64)
2981 start_proxy(char *s
, DWORD r_pid
)
2983 start_proxy(char *s
, int r_pid
)
2985 { char Q_arg
[16], Z_arg
[16], Y_arg
[16];
2986 char *args
[32], *ptr
;
2989 sprintf(Q_arg
, "-Q%d", getpid());
2990 sprintf(Y_arg
, "-Y%d", r_pid
);
2991 sprintf(Z_arg
, "-Z%d", proxy_pid
/* core_id */);
2993 args
[argcnt
++] = "proxy";
2994 args
[argcnt
++] = s
; /* -r or -s */
2995 args
[argcnt
++] = Q_arg
;
2996 args
[argcnt
++] = Z_arg
;
2997 args
[argcnt
++] = Y_arg
;
2999 if (strlen(o_cmdline
) > 0)
3000 { ptr
= o_cmdline
; /* assume args separated by spaces */
3001 do { args
[argcnt
++] = ptr
++;
3002 if ((ptr
= strchr(ptr
, ' ')) != NULL
)
3003 { while (*ptr
== ' ')
3009 } while (argcnt
< 31);
3011 args
[argcnt
] = NULL
;
3012 #if defined(WIN32) || defined(WIN64)
3013 execvp("pan_proxy", args
); /* no return */
3015 execvp("./pan_proxy", args
); /* no return */
3017 Uerror("pan_proxy exec failed");
3019 /*** end of common code fragment ***/
3021 #if !defined(WIN32) && !defined(WIN64)
3023 init_shm(void) /* initialize shared work-queues - linux/cygwin */
3028 if (core_id
== 0 && verbose
)
3029 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
3030 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.) );
3032 for (m
= 0; m
< NR_QS
; m
++) /* last q is the global q */
3033 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
3034 key
[m
] = ftok(PanSource
, m
+1);
3036 { perror("ftok shared queues"); must_exit
= 1; break;
3039 if (core_id
== 0) /* root creates */
3040 { /* check for stale copy */
3041 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3042 if (shmid
[m
] != -1) /* yes there is one; remove it */
3043 { printf("cpu0: removing stale q%d, status: %d\n",
3044 m
, shmctl(shmid
[m
], IPC_RMID
, NULL
));
3046 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600|IPC_CREAT
|IPC_EXCL
);
3048 } else /* workers attach */
3049 { shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3050 /* never called, since we create shm *before* we fork */
3053 { perror("shmget shared queues"); must_exit
= 1; break;
3056 shared_mem
[m
] = (char *) shmat(shmid
[m
], (void *) 0, 0); /* attach */
3057 if (shared_mem
[m
] == (char *) -1)
3058 { fprintf(stderr
, "error: cannot attach shared wq %d (%d Mb)\n",
3059 m
+1, (int) (qsize
/(1048576.)));
3060 perror("shmat shared queues"); must_exit
= 1; break;
3063 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
3065 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
3066 for (n
= 0; n
< nframes
; n
++)
3067 { m_workq
[m
][n
].m_vsize
= 0;
3068 m_workq
[m
][n
].m_boq
= 0;
3072 { rm_shared_segments();
3073 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3074 pan_exit(1); /* calls cleanup_shm */
3079 prep_shmid_S(size_t n
) /* either sets SS or H_tab, linux/cygwin */
3084 if (verbose
&& core_id
== 0)
3087 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3088 (double) n
/ (1048576.));
3090 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3091 (double) n
/ (1048576.));
3095 if (memcnt
+ (double) n
> memlim
)
3096 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3097 memcnt
/1024., n
/1024, memlim
/(1048576.));
3098 printf("cpu0: insufficient memory -- aborting\n");
3103 key
= ftok(PanSource
, NCORE
+2); /* different from queues */
3105 { perror("ftok shared bitstate or hashtable");
3106 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3110 if (core_id
== 0) /* root */
3111 { shmid_S
= shmget(key
, n
, 0600);
3113 { printf("cpu0: removing stale segment, status: %d\n",
3114 shmctl(shmid_S
, IPC_RMID
, NULL
));
3116 shmid_S
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3117 memcnt
+= (double) n
;
3119 { shmid_S
= shmget(key
, n
, 0600);
3122 { perror("shmget shared bitstate or hashtable too large?");
3123 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3127 rval
= (char *) shmat(shmid_S
, (void *) 0, 0); /* attach */
3128 if ((char *) rval
== (char *) -1)
3129 { perror("shmat shared bitstate or hashtable");
3130 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3134 rval
= (char *) emalloc(n
);
3136 return (uchar
*) rval
;
3142 static char shm_prep_result
;
3145 prep_state_mem(size_t n
) /* sets memory arena for states linux/cygwin */
3148 static int cnt
= 3; /* start larger than earlier ftok calls */
3150 shm_prep_result
= NOT_AGAIN
; /* default */
3151 if (verbose
&& core_id
== 0)
3152 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3153 cnt
-3, (double) n
/ (1048576.));
3156 if (memcnt
+ (double) n
> memlim
)
3157 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3158 memcnt
/1024.0, (double) n
/1024.0, memlim
/(1048576.));
3163 key
= ftok(PanSource
, NCORE
+cnt
); cnt
++;
3166 printf("pan: check './pan --' for usage details\n");
3171 { shmid_M
= shmget(key
, n
, 0600);
3173 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3174 cnt
-3, shmctl(shmid_M
, IPC_RMID
, NULL
));
3176 shmid_M
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3177 /* memcnt += (double) n; -- only amount actually used is counted */
3179 { shmid_M
= shmget(key
, n
, 0600);
3184 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3185 cnt
-3, ((double)n
)/(1048576.));
3186 perror("state mem");
3187 printf("pan: check './pan --' for usage details\n");
3189 shm_prep_result
= TRY_AGAIN
;
3192 rval
= (char *) shmat(shmid_M
, (void *) 0, 0); /* attach */
3194 if ((char *) rval
== (char *) -1)
3195 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3196 core_id
, cnt
-3, ((double)n
)/(1048576.));
3197 perror("state mem");
3200 return (uchar
*) rval
;
3204 init_HT(unsigned long n
) /* cygwin/linux version */
3208 volatile char *dc_mem_start
;
3209 double need_mem
, got_mem
= 0.;
3215 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3219 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3220 MEMLIM
, ((double)n
/(1048576.)), (((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
) /(1048576.) );
3223 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3224 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3225 get_mem
+= 4 * NCORE
* sizeof(void *); /* prfree, prfull, prcnt, prmax */
3227 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*); /* NCORE * stack_last */
3229 x
= (volatile char *) prep_state_mem((size_t) get_mem
); /* work queues and basic structs */
3232 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3235 search_terminated
= (volatile unsigned int *) x
; /* comes first */
3236 x
+= sizeof(void *); /* maintain alignment */
3238 is_alive
= (volatile double *) x
;
3239 x
+= NCORE
* sizeof(double);
3241 sh_lock
= (volatile int *) x
;
3242 x
+= CS_NR
* sizeof(void *);
3244 grfree
= (volatile int *) x
;
3245 x
+= sizeof(void *);
3246 grfull
= (volatile int *) x
;
3247 x
+= sizeof(void *);
3248 grcnt
= (volatile int *) x
;
3249 x
+= sizeof(void *);
3250 grmax
= (volatile int *) x
;
3251 x
+= sizeof(void *);
3252 prfree
= (volatile int *) x
;
3253 x
+= NCORE
* sizeof(void *);
3254 prfull
= (volatile int *) x
;
3255 x
+= NCORE
* sizeof(void *);
3256 prcnt
= (volatile int *) x
;
3257 x
+= NCORE
* sizeof(void *);
3258 prmax
= (volatile int *) x
;
3259 x
+= NCORE
* sizeof(void *);
3260 gr_readmiss
= (volatile double *) x
;
3261 x
+= sizeof(double);
3262 gr_writemiss
= (volatile double *) x
;
3263 x
+= sizeof(double);
3266 stack_last
= (volatile Stack_Tree
**) x
;
3267 x
+= NCORE
* sizeof(Stack_Tree
*);
3271 H_tab
= (struct H_el
**) emalloc(n
);
3275 #warning MEMLIM not set
3276 #define MEMLIM (2048)
3279 if (core_id
== 0 && verbose
)
3280 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3281 MEMLIM
, ((double)n
/(1048576.)), (NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
3282 (memlim
- memcnt
- (double) n
- (NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
3285 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
3287 need_mem
= memlim
- memcnt
- ((double) NCORE
* LWQ_SIZE
) - GWQ_SIZE
;
3289 { Uerror("internal error -- shared state memory");
3292 if (core_id
== 0 && verbose
)
3293 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3294 need_mem
/(1048576.));
3297 SEG_SIZE
= need_mem
/ NCORE
;
3298 if (verbose
&& core_id
== 0)
3299 { printf("cpu0: setting segsize to %6g MB\n",
3300 SEG_SIZE
/(1048576.));
3302 #if defined(CYGWIN) || defined(__CYGWIN__)
3303 if (SEG_SIZE
> 512.*1024.*1024.)
3304 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3305 SEG_SIZE
/(1024.*1024.));
3306 SEG_SIZE
= 512.*1024.*1024.;
3310 mem_reserved
= need_mem
;
3311 while (need_mem
> 1024.)
3312 { get_mem
= need_mem
;
3314 if (get_mem
> (double) SEG_SIZE
)
3315 { get_mem
= (double) SEG_SIZE
;
3317 if (get_mem
<= 0.0) break;
3319 /* for allocating states: */
3320 x
= dc_mem_start
= (volatile char *) prep_state_mem((size_t) get_mem
);
3322 { if (shm_prep_result
== NOT_AGAIN
3323 || first_pool
!= NULL
3324 || SEG_SIZE
< (16. * 1048576.))
3329 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE
);
3331 if (SEG_SIZE
>= 1024.)
3337 need_mem
-= get_mem
;
3339 if (first_pool
== NULL
)
3340 { search_terminated
= (volatile unsigned int *) x
; /* comes first */
3341 x
+= sizeof(void *); /* maintain alignment */
3343 is_alive
= (volatile double *) x
;
3344 x
+= NCORE
* sizeof(double);
3346 sh_lock
= (volatile int *) x
;
3347 x
+= CS_NR
* sizeof(void *);
3349 grfree
= (volatile int *) x
;
3350 x
+= sizeof(void *);
3351 grfull
= (volatile int *) x
;
3352 x
+= sizeof(void *);
3353 grcnt
= (volatile int *) x
;
3354 x
+= sizeof(void *);
3355 grmax
= (volatile int *) x
;
3356 x
+= sizeof(void *);
3357 prfree
= (volatile int *) x
;
3358 x
+= NCORE
* sizeof(void *);
3359 prfull
= (volatile int *) x
;
3360 x
+= NCORE
* sizeof(void *);
3361 prcnt
= (volatile int *) x
;
3362 x
+= NCORE
* sizeof(void *);
3363 prmax
= (volatile int *) x
;
3364 x
+= NCORE
* sizeof(void *);
3365 gr_readmiss
= (volatile double *) x
;
3366 x
+= sizeof(double);
3367 gr_writemiss
= (volatile double *) x
;
3368 x
+= sizeof(double);
3370 stack_last
= (volatile Stack_Tree
**) x
;
3371 x
+= NCORE
* sizeof(Stack_Tree
*);
3373 if (((long)x
)&(sizeof(void *)-1)) /* 64-bit word alignment */
3374 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1));
3378 ncomps
= (unsigned long *) x
;
3379 x
+= (256+2) * sizeof(unsigned long);
3383 dc_shared
= (sh_Allocater
*) x
; /* must be in shared memory */
3384 x
+= sizeof(sh_Allocater
);
3386 if (core_id
== 0) /* root only */
3387 { dc_shared
->dc_id
= shmid_M
;
3388 dc_shared
->dc_start
= dc_mem_start
;
3389 dc_shared
->dc_arena
= x
;
3390 dc_shared
->pattern
= 1234567; /* protection */
3391 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
3392 dc_shared
->nxt
= (long) 0;
3394 if (last_pool
== NULL
)
3395 { first_pool
= last_pool
= dc_shared
;
3397 { last_pool
->nxt
= dc_shared
;
3398 last_pool
= dc_shared
;
3400 } else if (first_pool
== NULL
)
3401 { first_pool
= dc_shared
;
3404 if (need_mem
> 1024.)
3405 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3406 got_mem
/(1048576.), need_mem
/(1048576.));
3410 { printf("cpu0: insufficient memory -- aborting.\n");
3413 /* we are still single-threaded at this point, with core_id 0 */
3414 dc_shared
= first_pool
;
3419 /* Test and Set assembly code */
3421 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3423 tas(volatile int *s
) /* tested */
3425 __asm__
__volatile__(
3433 #elif defined(__arm__)
3435 tas(volatile int *s
) /* not tested */
3437 __asm__
__volatile__(
3438 "swpb %0, %0, [%3] \n"
3444 #elif defined(sparc) || defined(__sparc__)
3446 tas(volatile int *s
) /* not tested */
3448 __asm__
__volatile__(
3449 " ldstub [%2], %0 \n"
3455 #elif defined(ia64) || defined(__ia64__)
3458 tas(volatile int *s
) /* tested */
3460 __asm__
__volatile__(
3461 " xchg4 %0=%1,%2 \n"
3468 #error missing definition of test and set operation for this platform
3472 cleanup_shm(int val
)
3473 { volatile sh_Allocater
*nxt_pool
;
3474 unsigned long cnt
= 0;
3478 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
3483 if (search_terminated
!= NULL
)
3484 { *search_terminated
|= 16; /* cleanup_shm */
3487 for (m
= 0; m
< NR_QS
; m
++)
3488 { if (shmdt((void *) shared_mem
[m
]) > 0)
3489 { perror("shmdt detaching from shared queues");
3493 if (shmdt((void *) shmid_X
) != 0)
3494 { perror("shmdt detaching from shared state memory");
3498 if (SS
> 0 && shmdt((void *) SS
) != 0)
3500 { perror("shmdt detaching from shared bitstate arena");
3504 { /* before detaching: */
3505 for (nxt_pool
= dc_shared
; nxt_pool
!= NULL
; nxt_pool
= nxt_pool
->nxt
)
3506 { cnt
+= nxt_pool
->dc_size
;
3509 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3510 cnt
/ (long)(1048576));
3513 if (shmdt((void *) H_tab
) != 0)
3514 { perror("shmdt detaching from shared hashtable");
3517 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
3518 { nxt_pool
= last_pool
->nxt
;
3519 if (shmdt((void *) last_pool
->dc_start
) != 0)
3520 { perror("shmdt detaching from shared state memory");
3522 first_pool
= last_pool
= NULL
; /* precaution */
3525 /* detached from shared memory - so cannot use cpu_printf */
3527 { printf("cpu%d: done -- got %d states from queue\n",
3528 core_id
, nstates_get
);
3532 extern void give_up(int);
3533 extern void Read_Queue(int);
3540 #if defined(MA) && !defined(SEP_STATE)
3541 #error MA without SEP_STATE is not supported with multi-core
3544 #error BFS is not supported with multi-core
3547 #error SC is not supported with multi-core
3549 init_shm(); /* we are single threaded when this starts */
3551 if (core_id
== 0 && verbose
)
3552 { printf("cpu0: step 4: calling fork()\n");
3556 /* if NCORE > 1 the child or the parent should fork N-1 more times
3557 * the parent is the only process with core_id == 0 and is_parent > 0
3558 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3561 { worker_pids
[0] = getpid(); /* for completeness */
3562 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
3563 { is_parent
= fork();
3564 if (is_parent
== -1)
3565 { Uerror("fork failed");
3567 if (is_parent
== 0) /* this is a worker process */
3568 { if (proxy_pid
== core_id
) /* always non-zero */
3569 { start_proxy("-r", 0); /* no return */
3571 goto adapt
; /* root process continues spawning */
3573 worker_pids
[core_id
] = is_parent
;
3575 /* note that core_id is now NCORE */
3576 if (proxy_pid
> 0 && proxy_pid
< NCORE
)
3577 { proxy_pid_snd
= fork();
3578 if (proxy_pid_snd
== -1)
3579 { Uerror("proxy fork failed");
3581 if (proxy_pid_snd
== 0)
3582 { start_proxy("-s", worker_pids
[proxy_pid
]); /* no return */
3583 } } /* else continue */
3585 { core_id
= 0; /* reset core_id for root process */
3588 { static char db0
[16]; /* good for up to 10^6 cores */
3589 static char db1
[16];
3590 adapt
: tprefix
= db0
; sprefix
= db1
;
3591 sprintf(tprefix
, "cpu%d_trail", core_id
);
3592 sprintf(sprefix
, "cpu%d_rst", core_id
);
3593 memcnt
= 0; /* count only additionally allocated memory */
3595 signal(SIGINT
, give_up
);
3597 if (proxy_pid
== 0) /* not in a cluster setup, pan_proxy must attach */
3598 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3601 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
3603 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3605 volatile sh_Allocater
*ptr
;
3607 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3609 { my_heap
= (char *) ptr
->dc_arena
;
3610 my_size
= (long) ptr
->dc_size
;
3612 cpu_printf("local heap %ld MB\n", my_size
/(1048576));
3615 ptr
= ptr
->nxt
; /* local */
3617 if (my_heap
== NULL
)
3618 { printf("cpu%d: no local heap\n", core_id
);
3621 #if defined(CYGWIN) || defined(__CYGWIN__)
3623 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3624 { ptr
= ptr
->nxt
; /* local */
3626 dc_shared
= ptr
; /* any remainder */
3628 dc_shared
= NULL
; /* used all mem for local heaps */
3632 if (core_id
== 0 && !remote_party
)
3633 { new_state(); /* cpu0 explores root */
3635 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3636 nstates
, nstates_put
);
3639 Read_Queue(core_id
); /* all cores */
3642 { cpu_printf("put %6d states into queue -- got %6d\n",
3643 nstates_put
, nstates_get
);
3646 { rm_shared_segments();
3654 int unpack_state(SM_frame
*, int);
3661 char *rval
= (char *) 0;
3664 { printf("cpu%d: grab shared zero\n", core_id
); fflush(stdout
);
3665 return (struct H_el
*) rval
;
3666 } else if (n
&(sizeof(void *)-1))
3667 { n
+= sizeof(void *)-(n
&(sizeof(void *)-1)); /* alignment */
3672 if (my_heap
!= NULL
&& my_size
> n
)
3681 { sudden_stop("pan: out of memory");
3684 /* another lock is always already in effect when this is called */
3685 /* but not always the same lock -- i.e., on different parts of the hashtable */
3686 enter_critical(GLOBAL_LOCK
); /* this must be independently mutex */
3687 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3688 { static int noted
= 0;
3691 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3692 core_id
, dc_shared
?dc_shared
->dc_size
:0, n
);
3696 if (dc_shared
->pattern
!= 1234567)
3697 { leave_critical(GLOBAL_LOCK
);
3698 Uerror("overrun -- memory corruption");
3701 if (dc_shared
->dc_size
< n
)
3703 { printf("Next Pool %g Mb + %d\n", memcnt
/(1048576.), n
);
3705 if (dc_shared
->nxt
== NULL
3706 || dc_shared
->nxt
->dc_arena
== NULL
3707 || dc_shared
->nxt
->dc_size
< n
)
3708 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3709 core_id
, memcnt
/ (1048576.), n
);
3710 leave_critical(GLOBAL_LOCK
);
3711 sudden_stop("out of memory -- aborting");
3712 wrapup(); /* exits */
3714 { dc_shared
= (sh_Allocater
*) dc_shared
->nxt
;
3717 rval
= (char *) dc_shared
->dc_arena
;
3718 dc_shared
->dc_arena
+= n
;
3719 dc_shared
->dc_size
-= (long) n
;
3722 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3723 core_id
, n
, dc_shared
->dc_size
);
3725 leave_critical(GLOBAL_LOCK
);
3728 memcnt
+= (double) n
;
3730 return (struct H_el
*) rval
;
3732 return (struct H_el
*) emalloc(n
);
3737 Get_Full_Frame(int n
)
3739 double cnt_start
= frame_wait
;
3741 f
= &m_workq
[n
][prfull
[n
]];
3742 while (f
->m_vsize
== 0) /* await full slot LOCK : full frame */
3746 if (!a_cycles
|| core_id
!= 0)
3748 if (*grcnt
> 0) /* accessed outside lock, but safe even if wrong */
3749 { enter_critical(GQ_RD
); /* gq - read access */
3750 if (*grcnt
> 0) /* could have changed */
3751 { f
= &m_workq
[NCORE
][*grfull
]; /* global q */
3752 if (f
->m_vsize
== 0)
3753 { /* writer is still filling the slot */
3755 f
= &m_workq
[n
][prfull
[n
]]; /* reset */
3757 { *grfull
= (*grfull
+1) % (GN_FRAMES
);
3758 enter_critical(GQ_WR
);
3759 *grcnt
= *grcnt
- 1;
3760 leave_critical(GQ_WR
);
3761 leave_critical(GQ_RD
);
3764 leave_critical(GQ_RD
);
3767 if (frame_wait
++ - cnt_start
> Delay
)
3769 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3770 n
, f
, query_in_progress
);
3772 return (SM_frame
*) 0; /* timeout */
3775 if (VVERBOSE
) cpu_printf("got frame from q%d\n", n
);
3776 prfull
[n
] = (prfull
[n
] + 1) % (LN_FRAMES
);
3777 enter_critical(QLOCK(n
));
3778 prcnt
[n
]--; /* lock out increments */
3779 leave_critical(QLOCK(n
));
3784 Get_Free_Frame(int n
)
3786 double cnt_start
= free_wait
;
3788 if (VVERBOSE
) { cpu_printf("get free frame from q%d\n", n
); }
3790 if (n
== NCORE
) /* global q */
3791 { f
= &(m_workq
[n
][lrfree
]);
3793 { f
= &(m_workq
[n
][prfree
[n
]]);
3795 while (f
->m_vsize
!= 0) /* await free slot LOCK : free slot */
3797 if (free_wait
++ - cnt_start
> OneSecond
)
3799 { cpu_printf("timeout waiting for free slot q%d\n", n
);
3801 cnt_start
= free_wait
;
3802 if (someone_crashed(1))
3803 { printf("cpu%d: search terminated\n", core_id
);
3804 sudden_stop("get free frame");
3808 { prfree
[n
] = (prfree
[n
] + 1) % (LN_FRAMES
);
3809 enter_critical(QLOCK(n
));
3810 prcnt
[n
]++; /* lock out decrements */
3811 if (prmax
[n
] < prcnt
[n
])
3812 { prmax
[n
] = prcnt
[n
];
3814 leave_critical(QLOCK(n
));
3820 GlobalQ_HasRoom(void)
3824 if (*grcnt
< GN_FRAMES
) /* there seems to be room */
3825 { enter_critical(GQ_WR
); /* gq write access */
3826 if (*grcnt
< GN_FRAMES
)
3827 { if (m_workq
[NCORE
][*grfree
].m_vsize
!= 0)
3828 { /* can happen if reader is slow emptying slot */
3830 goto out
; /* dont wait: release lock and return */
3832 lrfree
= *grfree
; /* Get_Free_Frame use lrfree in this mode */
3833 *grfree
= (*grfree
+ 1) % GN_FRAMES
;
3834 *grcnt
= *grcnt
+ 1; /* count nr of slots filled -- no additional lock needed */
3835 if (*grmax
< *grcnt
) *grmax
= *grcnt
;
3836 leave_critical(GQ_WR
); /* for short lock duration */
3838 mem_put(NCORE
); /* copy state into reserved slot */
3839 rval
= 1; /* successfull handoff */
3842 out
: leave_critical(GQ_WR
);
3849 unpack_state(SM_frame
*f
, int from_q
)
3851 static struct H_el D_State
;
3856 { cpu_printf("saw control %d, expected state\n", boq
);
3861 memcpy((uchar
*) &now
, (uchar
*) f
->m_now
, vsize
);
3862 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
3863 { Mask
[i
] = (f
->m_Mask
[i
/8] & (1<<j
)) ? 1 : 0;
3866 { memcpy((uchar
*) proc_offset
, (uchar
*) f
->m_p_offset
, now
._nr_pr
* sizeof(OFFT
));
3867 memcpy((uchar
*) proc_skip
, (uchar
*) f
->m_p_skip
, now
._nr_pr
* sizeof(uchar
));
3870 { memcpy((uchar
*) q_offset
, (uchar
*) f
->m_q_offset
, now
._nr_qs
* sizeof(OFFT
));
3871 memcpy((uchar
*) q_skip
, (uchar
*) f
->m_q_skip
, now
._nr_qs
* sizeof(uchar
));
3874 if (vsize
!= now
._vsz
)
3875 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3876 vsize
, now
._vsz
, f
->m_boq
, f
->m_vsize
);
3878 goto correct
; /* rare event: a race */
3881 hmax
= max(hmax
, vsize
);
3884 { memcpy((uchar
*) &cur_Root
, (uchar
*) f
, sizeof(SM_frame
));
3887 if (((now
._a_t
) & 1) == 1) /* i.e., when starting nested DFS */
3888 { A_depth
= depthfound
= 0;
3889 memcpy((uchar
*)&A_Root
, (uchar
*)&now
, vsize
);
3891 nr_handoffs
= f
->nr_handoffs
;
3893 { cpu_printf("pan: state empty\n");
3898 trpt
->tau
= f
->m_tau
;
3899 trpt
->o_pm
= f
->m_o_pm
;
3901 (trpt
-1)->ostate
= &D_State
; /* stub */
3902 trpt
->ostate
= &D_State
;
3906 { stack_last
[core_id
] = (Stack_Tree
*) f
->m_stack
;
3908 #if defined(VERBOSE)
3909 if (stack_last
[core_id
])
3910 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3911 depth
, stack_last
[core_id
], stack_last
[core_id
]->pr
,
3912 stack_last
[core_id
]->t_id
);
3918 { static Trans D_Trans
;
3919 trpt
->o_t
= &D_Trans
;
3923 if ((trpt
->tau
& 4) != 4)
3924 { trpt
->tau
|= 4; /* the claim moves first */
3925 cpu_printf("warning: trpt was not up to date\n");
3929 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
3930 { P0
*ptr
= (P0
*) pptr(i
);
3932 if (accpstate
[ptr
->_t
][ptr
->_p
])
3936 if (progstate
[ptr
->_t
][ptr
->_p
])
3944 if (accpstate
[EVENT_TRACE
][now
._event
])
3948 if (progstate
[EVENT_TRACE
][now
._event
])
3954 #if defined(C_States) && (HAS_TRACK==1)
3955 /* restore state of tracked C objects */
3956 c_revert((uchar
*) &(now
.c_state
[0]));
3958 c_unstack((uchar
*) f
->m_c_stack
); /* unmatched tracked data */
3965 write_root(void) /* for trail file */
3968 if (iterative
== 0 && Nr_Trails
> 1)
3969 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3971 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3973 if (cur_Root
.m_vsize
== 0)
3974 { (void) unlink(fnm
); /* remove possible old copy */
3975 return; /* its the default initial state */
3978 if ((fd
= creat(fnm
, TMODE
)) < 0)
3980 if ((q
= strchr(TrailFile
, '.')))
3981 { *q
= '\0'; /* strip .pml */
3982 if (iterative
== 0 && Nr_Trails
-1 > 0)
3983 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3985 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3987 fd
= creat(fnm
, TMODE
);
3990 { cpu_printf("pan: cannot create %s\n", fnm
);
3995 if (write(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
3996 { cpu_printf("pan: error writing %s\n", fnm
);
3998 { cpu_printf("pan: wrote %s\n", fnm
);
4009 char *ssuffix
= "rst";
4012 strcpy(MyFile
, TrailFile
);
4015 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
4016 fd
= open(fnm
, O_RDONLY
, 0);
4017 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
4018 { *q
= '\0'; /* strip .pml */
4019 sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
4021 fd
= open(fnm
, O_RDONLY
, 0);
4024 { sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
4025 fd
= open(fnm
, O_RDONLY
, 0);
4026 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
4027 { *q
= '\0'; /* strip .pml */
4028 sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
4030 fd
= open(fnm
, O_RDONLY
, 0);
4034 { if (try_core
< NCORE
)
4035 { ssuffix
= MySuffix
;
4036 sprintf(ssuffix
, "cpu%d_rst", try_core
++);
4039 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile
, fnm
);
4041 { if (read(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4042 { cpu_printf("read error %s\n", fnm
);
4047 (void) unpack_state(&cur_Root
, -2);
4049 cpu_printf("partial trail -- last few steps only\n");
4051 cpu_printf("restored root from '%s'\n", fnm
);
4052 printf("=====State:=====\n");
4054 for (i
= 0; i
< now
._nr_pr
; i
++)
4055 { z
= (P0
*)pptr(i
);
4056 printf("proc %2d (%s) ", i
, procname
[z
->_t
]);
4057 for (j
= 0; src_all
[j
].src
; j
++)
4058 if (src_all
[j
].tp
== (int) z
->_t
)
4059 { printf(" line %3d \"%s\" ",
4060 src_all
[j
].src
[z
->_p
], PanSource
);
4063 printf("(state %d)\n", z
->_p
);
4068 printf("================\n");
4073 unsigned long dsk_written
, dsk_drained
;
4074 void mem_drain(void);
4078 m_clear_frame(SM_frame
*f
)
4079 { int i
, clr_sz
= sizeof(SM_results
);
4081 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
4082 { clr_sz
+= NrStates
[i
]*sizeof(uchar
);
4084 memset(f
, 0, clr_sz
);
4085 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4088 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4089 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4092 AllQueuesEmpty(void)
4099 for (q
= 0; q
< NCORE
; q
++)
4100 { if (prcnt
[q
] != 0)
4109 int remember
, target_q
;
4111 double patience
= 0.0;
4113 target_q
= (q
+ 1) % NCORE
;
4116 { f
= Get_Full_Frame(q
);
4117 if (!f
) /* 1 second timeout -- and trigger for Query */
4118 { if (someone_crashed(2))
4119 { printf("cpu%d: search terminated [code %d]\n",
4120 core_id
, search_terminated
?*search_terminated
:-1);
4125 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4129 if (core_id
== 0 /* root can initiate termination */
4130 && remote_party
== 0 /* and only the original root */
4131 && query_in_progress
== 0 /* unless its already in progress */
4132 && AllQueuesEmpty())
4133 { f
= Get_Free_Frame(target_q
);
4134 query_in_progress
= 1; /* only root process can do this */
4135 if (!f
) { Uerror("Fatal1: no free slot"); }
4136 f
->m_boq
= QUERY
; /* initiate Query */
4138 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4139 target_q
, nstates_get
+ 1, prfree
[target_q
]-1);
4141 f
->m_vsize
= remember
+ 1;
4142 /* number will not change unless we receive more states */
4143 } else if (patience
++ > OneHour
) /* one hour watchdog timer */
4144 { cpu_printf("timeout -- giving up\n");
4145 sudden_stop("queue timeout");
4148 if (0) cpu_printf("timed out -- try again\n");
4151 patience
= 0.0; /* reset watchdog */
4153 if (f
->m_boq
== QUERY
)
4155 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4156 q
, f
->m_vsize
, nstates_put
+ 1, prfull
[q
]-1);
4159 remember
= f
->m_vsize
;
4160 f
->m_vsize
= 0; /* release slot */
4162 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4163 { if (query_in_progress
== 1 /* didn't send more states in the interim */
4164 && *grfree
+ 1 == remember
) /* no action on global queue meanwhile */
4165 { if (verbose
) cpu_printf("Termination detected\n");
4166 if (TargetQ_Full(target_q
))
4168 cpu_printf("warning: target q is full\n");
4170 f
= Get_Free_Frame(target_q
);
4171 if (!f
) { Uerror("Fatal2: no free slot"); }
4173 f
->m_boq
= QUIT
; /* send final Quit, collect stats */
4174 f
->m_vsize
= 111; /* anything non-zero will do */
4176 cpu_printf("put QUIT on q%d\n", target_q
);
4178 { if (verbose
) cpu_printf("Stale Query\n");
4183 query_in_progress
= 0;
4185 { if (TargetQ_Full(target_q
))
4187 cpu_printf("warning: forward query - target q full\n");
4189 f
= Get_Free_Frame(target_q
);
4191 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4192 target_q
, remember
, *grfree
+ 1, prfree
[target_q
]-1);
4193 if (!f
) { Uerror("Fatal4: no free slot"); }
4195 if (*grfree
+ 1 == remember
) /* no action on global queue */
4196 { f
->m_boq
= QUERY
; /* forward query, to root */
4197 f
->m_vsize
= remember
;
4199 { f
->m_boq
= QUERY_F
; /* no match -- busy */
4200 f
->m_vsize
= 112; /* anything non-zero */
4202 if (dsk_written
!= dsk_drained
)
4210 if (f
->m_boq
== QUERY_F
)
4212 { cpu_printf("got QUERY_F on q%d from slot %d\n", q
, prfull
[q
]-1);
4214 f
->m_vsize
= 0; /* release slot */
4216 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4217 { if (verbose
) cpu_printf("No Match on Query\n");
4218 query_in_progress
= 0;
4220 { if (TargetQ_Full(target_q
))
4221 { if (verbose
) cpu_printf("warning: forwarding query_f, target queue full\n");
4223 f
= Get_Free_Frame(target_q
);
4224 if (verbose
) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4225 target_q
, prfree
[target_q
]-1);
4226 if (!f
) { Uerror("Fatal5: no free slot"); }
4227 f
->m_boq
= QUERY_F
; /* cannot terminate yet */
4228 f
->m_vsize
= 113; /* anything non-zero */
4231 if (dsk_written
!= dsk_drained
)
4238 if (f
->m_boq
== QUIT
)
4239 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt
/(1048576.));
4240 retrieve_info((SM_results
*) f
); /* collect and combine stats */
4242 { cpu_printf("received Quit\n");
4245 f
->m_vsize
= 0; /* release incoming slot */
4247 { f
= Get_Free_Frame(target_q
); /* new outgoing slot */
4248 if (!f
) { Uerror("Fatal6: no free slot"); }
4249 m_clear_frame(f
); /* start with zeroed stats */
4250 record_info((SM_results
*) f
);
4251 f
->m_boq
= QUIT
; /* forward combined results */
4252 f
->m_vsize
= 114; /* anything non-zero */
4254 cpu_printf("fwd Results to q%d\n", target_q
);
4256 break; /* successful termination */
4259 /* else: 0<= boq <= 255, means STATE transfer */
4260 if (unpack_state(f
, q
) != 0)
4262 f
->m_vsize
= 0; /* release slot */
4263 if (VVERBOSE
) cpu_printf("Got state\n");
4265 if (search_terminated
!= NULL
4266 && *search_terminated
== 0)
4267 { new_state(); /* explore successors */
4268 memset((uchar
*) &cur_Root
, 0, sizeof(SM_frame
)); /* avoid confusion */
4275 if (verbose
) cpu_printf("done got %d put %d\n", nstates_get
, nstates_put
);
4280 give_up(int unused_x
)
4282 if (search_terminated
!= NULL
)
4283 { *search_terminated
|= 32; /* give_up */
4286 { was_interrupted
= 1;
4288 cpu_printf("Give Up\n");
4291 } else /* we are already terminating */
4292 { cpu_printf("SIGINT\n");
4297 check_overkill(void)
4299 vmax_seen
= (vmax_seen
+ 7)/ 8;
4300 vmax_seen
*= 8; /* round up to a multiple of 8 */
4305 && VMAX
- vmax_seen
> 8)
4308 printf("cpu0: max VMAX value seen in this run: ");
4310 printf("cpu0: recommend recompiling with ");
4312 printf("-DVMAX=%d\n", vmax_seen
);
4317 mem_put(int q
) /* handoff state to other cpu, workq q */
4322 { vsize
= (vsize
+ 7)/8; vsize
*= 8; /* round up */
4323 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4326 if (now
._nr_pr
> PMAX
)
4327 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4330 if (now
._nr_qs
> QMAX
)
4331 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4334 if (vsize
> vmax_seen
) vmax_seen
= vsize
;
4335 if (now
._nr_pr
> pmax_seen
) pmax_seen
= now
._nr_pr
;
4336 if (now
._nr_qs
> qmax_seen
) qmax_seen
= now
._nr_qs
;
4338 f
= Get_Free_Frame(q
); /* not called in likely deadlock states */
4339 if (!f
) { Uerror("Fatal3: no free slot"); }
4341 if (VVERBOSE
) cpu_printf("putting state into q%d\n", q
);
4343 memcpy((uchar
*) f
->m_now
, (uchar
*) &now
, vsize
);
4344 memset((uchar
*) f
->m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4345 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4347 { f
->m_Mask
[i
/8] |= (1<<j
);
4351 { memcpy((uchar
*) f
->m_p_offset
, (uchar
*) proc_offset
, now
._nr_pr
* sizeof(OFFT
));
4352 memcpy((uchar
*) f
->m_p_skip
, (uchar
*) proc_skip
, now
._nr_pr
* sizeof(uchar
));
4355 { memcpy((uchar
*) f
->m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
* sizeof(OFFT
));
4356 memcpy((uchar
*) f
->m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
* sizeof(uchar
));
4358 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4359 c_stack((uchar
*) f
->m_c_stack
); /* save unmatched tracked data */
4362 f
->m_stack
= stack_last
[core_id
];
4364 f
->nr_handoffs
= nr_handoffs
+1;
4365 f
->m_tau
= trpt
->tau
;
4366 f
->m_o_pm
= trpt
->o_pm
;
4368 f
->m_vsize
= vsize
; /* must come last - now the other cpu can see it */
4370 if (query_in_progress
== 1)
4371 query_in_progress
= 2; /* make sure we know, if a query makes the rounds */
4376 int Dsk_W_Nr
, Dsk_R_Nr
;
4377 int dsk_file
= -1, dsk_read
= -1;
4378 unsigned long dsk_written
, dsk_drained
;
4382 #if defined(WIN32) || defined(WIN64)
4383 #define RFLAGS (O_RDONLY|O_BINARY)
4384 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4386 #define RFLAGS (O_RDONLY)
4387 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4395 if (dsk_written
> 0)
4396 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4397 dsk_written
, Dsk_W_Nr
, core_id
, dsk_drained
);
4400 for (i
= 0; i
< Dsk_W_Nr
; i
++)
4401 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", i
, core_id
);
4409 int q
= (core_id
+ 1) % NCORE
; /* target q */
4413 || dsk_written
<= dsk_drained
)
4417 while (dsk_written
> dsk_drained
4418 && TargetQ_NotFull(q
))
4419 { f
= Get_Free_Frame(q
);
4420 if (!f
) { Uerror("Fatal: unhandled condition"); }
4422 if ((dsk_drained
+1)%MAX_DSK_FILE
== 0) /* 100K states max per file */
4423 { (void) close(dsk_read
); /* close current read handle */
4424 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
++, core_id
);
4425 (void) unlink(dsk_name
); /* remove current file */
4426 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
, core_id
);
4427 cpu_printf("reading %s\n", dsk_name
);
4428 dsk_read
= open(dsk_name
, RFLAGS
); /* open next file */
4430 { Uerror("could not open dsk file");
4432 if (read(dsk_read
, &g
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4433 { Uerror("bad dsk file read");
4437 memcpy(f
, &g
, sizeof(SM_frame
));
4438 f
->m_vsize
= sz
; /* last */
4447 int i
, j
, q
= (core_id
+ 1) % NCORE
; /* target q */
4450 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4453 if (now
._nr_pr
> PMAX
)
4454 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4457 if (now
._nr_qs
> QMAX
)
4458 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4462 if (VVERBOSE
) cpu_printf("filing state for q%d\n", q
);
4464 memcpy((uchar
*) f
.m_now
, (uchar
*) &now
, vsize
);
4465 memset((uchar
*) f
.m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4466 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4468 { f
.m_Mask
[i
/8] |= (1<<j
);
4472 { memcpy((uchar
*)f
.m_p_offset
, (uchar
*)proc_offset
, now
._nr_pr
*sizeof(OFFT
));
4473 memcpy((uchar
*)f
.m_p_skip
, (uchar
*)proc_skip
, now
._nr_pr
*sizeof(uchar
));
4476 { memcpy((uchar
*) f
.m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
*sizeof(OFFT
));
4477 memcpy((uchar
*) f
.m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
*sizeof(uchar
));
4479 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4480 c_stack((uchar
*) f
.m_c_stack
); /* save unmatched tracked data */
4483 f
.m_stack
= stack_last
[core_id
];
4485 f
.nr_handoffs
= nr_handoffs
+1;
4486 f
.m_tau
= trpt
->tau
;
4487 f
.m_o_pm
= trpt
->o_pm
;
4491 if (query_in_progress
== 1)
4492 { query_in_progress
= 2;
4495 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
, core_id
);
4496 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4497 dsk_read
= open(dsk_name
, RFLAGS
);
4498 if (dsk_file
< 0 || dsk_read
< 0)
4499 { cpu_printf("File: <%s>\n", dsk_name
);
4500 Uerror("cannot open diskfile");
4502 Dsk_W_Nr
++; /* nr of next file to open */
4503 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4504 } else if ((dsk_written
+1)%MAX_DSK_FILE
== 0)
4505 { close(dsk_file
); /* close write handle */
4506 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
++, core_id
);
4507 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4509 { cpu_printf("File: <%s>\n", dsk_name
);
4510 Uerror("aborting: cannot open new diskfile");
4512 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4514 if (write(dsk_file
, &f
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4515 { Uerror("aborting -- disk write failed (disk full?)");
4525 if (search_terminated
== NULL
4526 || *search_terminated
!= 0) /* not a full crash check */
4529 iam_alive(); /* on every transition of Down */
4531 mem_drain(); /* maybe call this also on every Up */
4533 if (depth
> z_handoff
/* above handoff limit */
4535 && !a_cycles
/* not in liveness mode */
4538 && boq
== -1 /* not mid-rv */
4541 && (trpt
->tau
&4) /* claim moves first */
4542 && !((trpt
-1)->tau
&128) /* not a stutter move */
4544 && !(trpt
->tau
&8)) /* not an atomic move */
4545 { int q
= (core_id
+ 1) % NCORE
; /* circular handoff */
4547 if (prcnt
[q
] < LN_FRAMES
)
4549 if (TargetQ_NotFull(q
)
4550 && (dfs_phase2
== 0 || prcnt
[core_id
] > 0))
4557 rval
= GlobalQ_HasRoom();
4563 { void mem_file(void);
4571 return 0; /* i.e., no handoff */
4575 mem_put_acc(void) /* liveness mode */
4576 { int q
= (core_id
+ 1) % NCORE
;
4578 if (search_terminated
== NULL
4579 || *search_terminated
!= 0)
4585 /* some tortured use of preprocessing: */
4586 #if !defined(NGQ) || defined(USE_DISK)
4587 if (TargetQ_Full(q
))
4591 if (GlobalQ_HasRoom())
4599 #if !defined(NGQ) || defined(USE_DISK)
4607 #if defined(WIN32) || defined(WIN64)
4609 init_shm(void) /* initialize shared work-queues */
4614 if (core_id
== 0 && verbose
)
4615 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4616 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.));
4618 for (m
= 0; m
< NR_QS
; m
++) /* last q is global 1 */
4619 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
4620 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, m
);
4622 { shmid
[m
] = CreateFileMapping(
4623 INVALID_HANDLE_VALUE
, /* use paging file */
4624 NULL
, /* default security */
4625 PAGE_READWRITE
, /* access permissions */
4626 0, /* high-order 4 bytes */
4627 qsize
, /* low-order bytes, size in bytes */
4629 } else /* worker nodes just open these segments */
4630 { shmid
[m
] = OpenFileMapping(
4631 FILE_MAP_ALL_ACCESS
, /* read/write access */
4632 FALSE
, /* children do not inherit handle */
4635 if (shmid
[m
] == NULL
)
4636 { fprintf(stderr
, "cpu%d: could not create or open shared queues\n",
4642 shared_mem
[m
] = (char *) MapViewOfFile(shmid
[m
], FILE_MAP_ALL_ACCESS
, 0, 0, 0);
4643 if (shared_mem
[m
] == NULL
)
4644 { fprintf(stderr
, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4645 core_id
, m
+1, (int) (qsize
/(1048576.)));
4652 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
4654 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
4655 for (n
= 0; n
< nframes
; n
++)
4656 { m_workq
[m
][n
].m_vsize
= 0;
4657 m_workq
[m
][n
].m_boq
= 0;
4661 { fprintf(stderr
, "pan: check './pan --' for usage details\n");
4662 pan_exit(1); /* calls cleanup_shm */
4667 prep_shmid_S(size_t n
) /* either sets SS or H_tab, WIN32/WIN64 */
4672 if (verbose
&& core_id
== 0)
4675 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4676 (double) n
/ (1048576.));
4678 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4679 (double) n
/ (1048576.));
4683 if (memcnt
+ (double) n
> memlim
)
4684 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4685 core_id
, memcnt
/1024., n
/1024, memlim
/(1048576.));
4686 printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4691 /* make key different from queues: */
4692 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+2); /* different from qs */
4694 if (core_id
== 0) /* root */
4695 { shmid_S
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4697 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4699 PAGE_READWRITE
, 0, n
, key
);
4701 memcnt
+= (double) n
;
4703 { shmid_S
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4705 if (shmid_S
== NULL
)
4708 fprintf(stderr
, "cpu%d: cannot %s shared bitstate",
4709 core_id
, core_id
?"open":"create");
4711 fprintf(stderr
, "cpu%d: cannot %s shared hashtable",
4712 core_id
, core_id
?"open":"create");
4714 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4718 rval
= (char *) MapViewOfFile(shmid_S
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4719 if ((char *) rval
== NULL
)
4720 { fprintf(stderr
, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id
);
4721 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4725 rval
= (char *) emalloc(n
);
4727 return (uchar
*) rval
;
4731 prep_state_mem(size_t n
) /* WIN32/WIN64 sets memory arena for states */
4734 static int cnt
= 3; /* start larger than earlier ftok calls */
4736 if (verbose
&& core_id
== 0)
4737 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4738 cnt
-3, (double) n
/ (1048576.));
4741 if (memcnt
+ (double) n
> memlim
)
4742 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4743 core_id
, memcnt
/1024.0, (double) n
/1024.0, memlim
/1024.0);
4748 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+cnt
); cnt
++;
4751 { shmid_M
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4753 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4755 PAGE_READWRITE
, 0, n
, key
);
4758 { shmid_M
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4760 if (shmid_M
== NULL
)
4761 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4763 printf("pan: check './pan --' for usage details\n");
4766 rval
= (char *) MapViewOfFile(shmid_M
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4769 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4773 return (uchar
*) rval
;
4777 init_HT(unsigned long n
) /* WIN32/WIN64 version */
4783 if (verbose
) printf("cpu%d: initialization for Windows\n", core_id
);
4788 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4792 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4793 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.));
4795 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4796 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4797 get_mem
+= 4 * NCORE
* sizeof(void *);
4799 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*);
4800 /* NCORE * stack_last */
4802 x
= (volatile char *) prep_state_mem((size_t) get_mem
);
4803 shmid_X
= (void *) x
;
4805 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4808 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4809 x
+= sizeof(void *); /* maintain alignment */
4811 is_alive
= (volatile double *) x
;
4812 x
+= NCORE
* sizeof(double);
4814 sh_lock
= (volatile int *) x
;
4815 x
+= CS_NR
* sizeof(void *); /* allow 1 word per entry */
4817 grfree
= (volatile int *) x
;
4818 x
+= sizeof(void *);
4819 grfull
= (volatile int *) x
;
4820 x
+= sizeof(void *);
4821 grcnt
= (volatile int *) x
;
4822 x
+= sizeof(void *);
4823 grmax
= (volatile int *) x
;
4824 x
+= sizeof(void *);
4825 prfree
= (volatile int *) x
;
4826 x
+= NCORE
* sizeof(void *);
4827 prfull
= (volatile int *) x
;
4828 x
+= NCORE
* sizeof(void *);
4829 prcnt
= (volatile int *) x
;
4830 x
+= NCORE
* sizeof(void *);
4831 prmax
= (volatile int *) x
;
4832 x
+= NCORE
* sizeof(void *);
4833 gr_readmiss
= (volatile double *) x
;
4834 x
+= sizeof(double);
4835 gr_writemiss
= (volatile double *) x
;
4836 x
+= sizeof(double);
4839 stack_last
= (volatile Stack_Tree
**) x
;
4840 x
+= NCORE
* sizeof(Stack_Tree
*);
4844 H_tab
= (struct H_el
**) emalloc(n
);
4848 #warning MEMLIM not set
4849 #define MEMLIM (2048)
4852 if (core_id
== 0 && verbose
)
4853 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4854 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
4855 (memlim
- memcnt
- (double) n
- ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
4857 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
4859 get_mem
= memlim
- memcnt
- ((double) NCORE
) * LWQ_SIZE
- GWQ_SIZE
;
4861 { Uerror("internal error -- shared state memory");
4864 if (core_id
== 0 && verbose
)
4865 { printf("cpu0: step 2: shared state memory %g Mb\n",
4866 get_mem
/(1048576.));
4868 x
= dc_mem_start
= (char *) prep_state_mem((size_t) get_mem
); /* for states */
4870 { printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4874 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4875 x
+= sizeof(void *); /* maintain alignment */
4877 is_alive
= (volatile double *) x
;
4878 x
+= NCORE
* sizeof(double);
4880 sh_lock
= (volatile int *) x
;
4881 x
+= CS_NR
* sizeof(int);
4883 grfree
= (volatile int *) x
;
4884 x
+= sizeof(void *);
4885 grfull
= (volatile int *) x
;
4886 x
+= sizeof(void *);
4887 grcnt
= (volatile int *) x
;
4888 x
+= sizeof(void *);
4889 grmax
= (volatile int *) x
;
4890 x
+= sizeof(void *);
4891 prfree
= (volatile int *) x
;
4892 x
+= NCORE
* sizeof(void *);
4893 prfull
= (volatile int *) x
;
4894 x
+= NCORE
* sizeof(void *);
4895 prcnt
= (volatile int *) x
;
4896 x
+= NCORE
* sizeof(void *);
4897 prmax
= (volatile int *) x
;
4898 x
+= NCORE
* sizeof(void *);
4899 gr_readmiss
= (volatile double *) x
;
4900 x
+= sizeof(double);
4901 gr_writemiss
= (volatile double *) x
;
4902 x
+= sizeof(double);
4905 stack_last
= (volatile Stack_Tree
**) x
;
4906 x
+= NCORE
* sizeof(Stack_Tree
*);
4908 if (((long)x
)&(sizeof(void *)-1)) /* word alignment */
4909 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1)); /* 64-bit align */
4913 ncomps
= (unsigned long *) x
;
4914 x
+= (256+2) * sizeof(unsigned long);
4917 dc_shared
= (sh_Allocater
*) x
; /* in shared memory */
4918 x
+= sizeof(sh_Allocater
);
4920 if (core_id
== 0) /* root only */
4921 { dc_shared
->dc_id
= shmid_M
;
4922 dc_shared
->dc_start
= (void *) dc_mem_start
;
4923 dc_shared
->dc_arena
= x
;
4924 dc_shared
->pattern
= 1234567;
4925 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
4926 dc_shared
->nxt
= NULL
;
4931 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4932 extern BOOLEAN
InterlockedBitTestAndSet(LONG
volatile* Base
, LONG Bit
);
4934 tas(volatile LONG
*s
)
4935 { return InterlockedBitTestAndSet(s
, 1);
4938 #error missing definition of test and set operation for this platform
4942 cleanup_shm(int val
)
4944 static int nibis
= 0;
4947 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
4952 if (search_terminated
!= NULL
)
4953 { *search_terminated
|= 16; /* cleanup_shm */
4956 for (m
= 0; m
< NR_QS
; m
++)
4957 { if (shmid
[m
] != NULL
)
4958 { UnmapViewOfFile((char *) shared_mem
[m
]);
4959 CloseHandle(shmid
[m
]);
4962 UnmapViewOfFile((void *) shmid_X
);
4963 CloseHandle((void *) shmid_M
);
4966 if (shmid_S
!= NULL
)
4967 { UnmapViewOfFile(SS
);
4968 CloseHandle(shmid_S
);
4971 if (core_id
== 0 && verbose
)
4972 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4973 dc_shared
->dc_size
/ (long)(1048576));
4975 if (shmid_S
!= NULL
)
4976 { UnmapViewOfFile(H_tab
);
4977 CloseHandle(shmid_S
);
4979 shmid_M
= (void *) (dc_shared
->dc_id
);
4980 UnmapViewOfFile((char *) dc_shared
->dc_start
);
4981 CloseHandle(shmid_M
);
4984 /* detached from shared memory - so cannot use cpu_printf */
4986 { printf("cpu%d: done -- got %d states from queue\n",
4987 core_id
, nstates_get
);
4996 #if defined(MA) && !defined(SEP_STATE)
4997 #error MA requires SEP_STATE in multi-core mode
5000 #error BFS is not supported in multi-core mode
5003 #error SC is not supported in multi-core mode
5005 init_shm(); /* we are single threaded when this starts */
5006 signal(SIGINT
, give_up
); /* windows control-c interrupt */
5008 if (core_id
== 0 && verbose
)
5009 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
5013 if NCORE
> 1 the child
or the parent should fork N
-1 more times
5014 the parent is the only process with core_id
== 0 and is_parent
> 0
5015 the
others (workers
) have is_parent
= 0 and core_id
= 1..NCORE
-1
5017 if (core_id
== 0) /* root starts up the workers */
5018 { worker_pids
[0] = (DWORD
) getpid(); /* for completeness */
5019 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
5021 STARTUPINFO si
= { sizeof(si
) };
5022 PROCESS_INFORMATION pi
;
5024 if (proxy_pid
== core_id
) /* always non-zero */
5025 { sprintf(cmdline
, "pan_proxy.exe -r %s-Q%d -Z%d",
5026 o_cmdline
, getpid(), core_id
);
5028 { sprintf(cmdline
, "pan.exe %s-Q%d -Z%d",
5029 o_cmdline
, getpid(), core_id
);
5031 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5033 is_parent
= CreateProcess(0, cmdline
, 0, 0, FALSE
, 0, 0, 0, &si
, &pi
);
5035 { Uerror("fork failed");
5037 worker_pids
[core_id
] = pi
.dwProcessId
;
5038 worker_handles
[core_id
] = pi
.hProcess
;
5040 { cpu_printf("created core %d, pid %d\n",
5041 core_id
, pi
.dwProcessId
);
5043 if (proxy_pid
== core_id
) /* we just created the receive half */
5044 { /* add proxy send, store pid in proxy_pid_snd */
5045 sprintf(cmdline
, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5046 o_cmdline
, getpid(), core_id
, worker_pids
[proxy_pid
]);
5047 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5048 is_parent
= CreateProcess(0, cmdline
, 0,0, FALSE
, 0,0,0, &si
, &pi
);
5050 { Uerror("fork failed");
5052 proxy_pid_snd
= pi
.dwProcessId
;
5053 proxy_handle_snd
= pi
.hProcess
;
5055 { cpu_printf("created core %d, pid %d (send proxy)\n",
5056 core_id
, pi
.dwProcessId
);
5058 core_id
= 0; /* reset core_id for root process */
5060 { static char db0
[16]; /* good for up to 10^6 cores */
5061 static char db1
[16];
5062 tprefix
= db0
; sprefix
= db1
;
5063 sprintf(tprefix
, "cpu%d_trail", core_id
); /* avoid conflicts on file access */
5064 sprintf(sprefix
, "cpu%d_rst", core_id
);
5065 memcnt
= 0; /* count only additionally allocated memory */
5068 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
5070 if (core_id
== 0 && !remote_party
)
5071 { new_state(); /* root starts the search */
5073 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5074 nstates
, nstates_put
);
5077 Read_Queue(core_id
); /* all cores */
5080 { cpu_printf("put %6d states into queue -- got %6d\n",
5081 nstates_put
, nstates_get
);
5091 init_SS(unsigned long n
)
5093 SS
= (uchar
*) prep_shmid_S((size_t) n
);
5101 clock_t crash_stamp
;
5103 #if !defined(WIN32) && !defined(WIN64)
5104 struct tms start_tm
;
5110 #if defined(WIN32) || defined(WIN64)
5111 start_time
= clock();
5113 start_time
= times(&start_tm
);
5119 { clock_t stop_time
;
5121 #if !defined(WIN32) && !defined(WIN64)
5123 stop_time
= times(&stop_tm
);
5124 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5126 stop_time
= clock();
5127 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5129 if (readtrail
|| delta_time
< 0.00) return;
5131 if (core_id
== 0 && nstates
> (double) 0)
5132 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id
, delta_time
, nstates
);
5133 if (delta_time
> 0.01)
5134 { printf("cpu%d: rate %g states/second\n", core_id
, nstates
/delta_time
);
5136 { void check_overkill(void);
5140 printf("\npan: elapsed time %.3g seconds\n", delta_time
);
5141 if (delta_time
> 0.01)
5142 { printf("pan: rate %9.8g states/second\n", nstates
/delta_time
);
5144 { printf("pan: avg transition delay %.5g usec\n",
5145 delta_time
/(nstates
+truncs
));
5152 double t_alerts
[17];
5157 printf("crash alert intervals:\n");
5158 for (i
= 0; i
< 17; i
++)
5159 { printf("%d\t%g\n", i
, t_alerts
[i
]);
5166 if (crash_stamp
!= (clock_t) 0)
5171 #if defined(WIN32) || defined(WIN64)
5172 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5174 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5176 for (i
= 0; i
< 16; i
++)
5177 { if (delta_time
<= (i
*30))
5178 { t_alerts
[i
] = delta_time
;
5181 if (i
== 16) t_alerts
[i
] = delta_time
;
5184 printf("cpu%d: crash alert off\n", core_id
);
5186 crash_stamp
= (clock_t) 0;
5190 crash_test(double maxtime
)
5191 { double delta_time
;
5192 if (crash_stamp
== (clock_t) 0)
5193 { /* start timing */
5194 #if defined(WIN32) || defined(WIN64)
5195 crash_stamp
= clock();
5197 crash_stamp
= times(&start_tm
);
5200 { printf("cpu%d: crash detection\n", core_id
);
5204 #if defined(WIN32) || defined(WIN64)
5205 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5207 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5209 return (delta_time
>= maxtime
);
5216 depth
= mreached
= 0;
5219 trpt
->tau
|= 4; /* the claim moves first */
5221 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
5222 { P0
*ptr
= (P0
*) pptr(i
);
5225 && accpstate
[ptr
->_t
][ptr
->_p
])
5230 && progstate
[ptr
->_t
][ptr
->_p
])
5237 if (accpstate
[EVENT_TRACE
][now
._event
])
5241 if (progstate
[EVENT_TRACE
][now
._event
])
5247 Mask
[0] = Mask
[1] = 1; /* _nr_pr, _nr_qs */
5249 { i
= &(now
._a_t
) - (uchar
*) &now
;
5250 Mask
[i
] = 1; /* _a_t */
5255 i
= &(now
._cnt
[0]) - (uchar
*) &now
;
5257 Mask
[i
++] = 1; /* _cnt[] */
5263 && (a_cycles
&& (trpt
->o_pm
&2)))
5264 { now
._a_t
= 2; /* set the A-bit */
5265 now
._cnt
[0] = now
._nr_pr
+ 1;
5267 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5268 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
5272 c_stack_start
= (char *) &i
; /* meant to be read-only */
5273 #if defined(HAS_CODE) && defined (C_INIT)
5274 C_INIT
; /* initialization of data that must precede fork() */
5277 #if defined(C_States) && (HAS_TRACK==1)
5278 /* capture initial state of tracked C objects */
5279 c_update((uchar
*) &(now
.c_state
[0]));
5282 if (readtrail
) getrail(); /* no return */
5288 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5289 /* initial state of tracked & unmatched objects */
5290 c_stack((uchar
*) &(svtack
->c_stack
[0]));
5302 new_state(); /* start 1st DFS */
5308 do_reverse(Trans
*t
, short II
, uchar M
)
5310 int tt
= (int) ((P0
*)this)->_p
;
5311 #include REVERSE_MOVES
5317 static char _tp
= 'n'; static int _qid
= 0;
5320 do_transit(Trans
*t
, short II
)
5322 int tt
= (int) ((P0
*)this)->_p
;
5328 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5329 if (ot
== EVENT_TRACE
) boq
= -1;
5330 #define continue { boq = oboq; return 0; }
5332 #define continue return 0
5334 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5337 #include FORWARD_MOVES
5340 if (ot
== EVENT_TRACE
) boq
= oboq
;
5347 require(char tp
, int qid
)
5349 _tp
= tp
; _qid
= qid
;
5351 if (now
._event
!= endevent
)
5352 for (t
= trans
[EVENT_TRACE
][now
._event
]; t
; t
= t
->nxt
)
5353 { if (do_transit(t
, EVENT_TRACE
))
5354 { now
._event
= t
->st
;
5355 reached
[EVENT_TRACE
][t
->st
] = 1;
5357 printf(" event_trace move to -> %d\n", t
->st
);
5361 if (accpstate
[EVENT_TRACE
][now
._event
])
5362 (trpt
+1)->o_pm
|= 2;
5364 if (progstate
[EVENT_TRACE
][now
._event
])
5365 (trpt
+1)->o_pm
|= 4;
5368 #ifdef NEGATED_TRACE
5369 if (now
._event
== endevent
)
5374 uerror("event_trace error (all events matched)");
5381 for (t
= t
->nxt
; t
; t
= t
->nxt
)
5382 { if (do_transit(t
, EVENT_TRACE
))
5383 Uerror("non-determinism in event-trace");
5389 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5390 tp
, qid
, now
._event
, t
->forw
);
5393 #ifdef NEGATED_TRACE
5394 now
._event
= endevent
; /* only 1st try will count -- fixed 4.2.6 */
5399 uerror("event_trace error (no matching event)");
5407 enabled(int iam
, int pid
)
5408 { Trans
*t
; uchar
*othis
= this;
5409 int res
= 0; int tt
; uchar ot
;
5411 /* if (pid > 0) */ pid
++;
5414 Uerror("used: enabled(pid=thisproc)");
5415 if (pid
< 0 || pid
>= (int) now
._nr_pr
)
5419 tt
= (int) ((P0
*)this)->_p
;
5420 ot
= (uchar
) ((P0
*)this)->_t
;
5421 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
5422 if (do_transit(t
, (short) pid
))
5433 { clock_t stop_time
;
5435 #if !defined(WIN32) && !defined(WIN64)
5437 stop_time
= times(&stop_tm
);
5438 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5440 stop_time
= clock();
5441 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5443 if (delta_time
> 0.01)
5444 { printf("t= %6.3g ", delta_time
);
5445 printf("R= %7.0g", nstates
/delta_time
);
5448 if (quota
> 0.1 && delta_time
> quota
)
5449 { printf("Time limit of %6.3g minutes exceeded\n", quota
/60.0);
5452 leave_critical(GLOBAL_LOCK
);
5453 sudden_stop("time-limit");
5463 enter_critical(GLOBAL_LOCK
); /* snapshot */
5464 printf("cpu%d: ", core_id
);
5466 printf("Depth= %7ld States= %8.3g ",
5468 (long) (nr_handoffs
* z_handoff
) +
5471 printf("Transitions= %8.3g ", nstates
+truncs
);
5473 printf("Nodes= %7d ", nr_states
);
5475 printf("Memory= %9.3f\t", memcnt
/1048576.);
5479 leave_critical(GLOBAL_LOCK
);
5487 && (stackwrite
= creat(stackfile
, TMODE
)) < 0)
5488 Uerror("cannot create stackfile");
5490 if (write(stackwrite
, trail
, DDD
*sizeof(Trail
))
5491 != DDD
*sizeof(Trail
))
5492 Uerror("stackfile write error -- disk is full?");
5494 memmove(trail
, &trail
[DDD
], (HHH
-DDD
+2)*sizeof(Trail
));
5495 memset(&trail
[HHH
-DDD
+2], 0, (omaxdepth
- HHH
+ DDD
- 2)*sizeof(Trail
));
5503 memmove(&trail
[DDD
], trail
, (HHH
-DDD
+2)*sizeof(Trail
));
5506 || lseek(stackwrite
, -DDD
* (off_t
) sizeof(Trail
), SEEK_CUR
) == -1)
5507 Uerror("disk2stack lseek error");
5510 && (stackread
= open(stackfile
, 0)) < 0)
5511 Uerror("cannot open stackfile");
5513 if (lseek(stackread
, (CNT1
-CNT2
)*DDD
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1)
5514 Uerror("disk2stack lseek error");
5516 have
= read(stackread
, trail
, DDD
*sizeof(Trail
));
5517 if (have
!= DDD
*sizeof(Trail
))
5518 Uerror("stackfile read error");
5523 { if (x
< 0 || x
>= MAXPROC
|| !proc_offset
[x
])
5526 return (uchar
*) pptr(x
);
5530 * new_state() is the main DFS search routine in the verifier
5531 * it has a lot of code ifdef-ed together to support
5532 * different search modes, which makes it quite unreadable.
5533 * if you are studying the code, first use the C preprocessor
5534 * to generate a specific version from the pan.c source,
5536 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5537 * and then study the resulting file, rather than this one
5539 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5545 { if (cnt
< 512) N_succ
[cnt
]++;
5546 else printf("tally_succ: cnt %d exceeds range\n", cnt
);
5551 { int i
; double sum
= 0.0;
5553 printf("Successor counts:\n");
5554 for (i
= 0; i
< 512; i
++)
5555 { sum
+= (double) N_succ
[i
];
5557 for (i
= 0; i
< 512; i
++)
5558 { if (N_succ
[i
] > 0)
5559 { printf("%3d %10d (%.4g %% of total)\n",
5560 i
, N_succ
[i
], (100.0 * (double) N_succ
[i
])/sum
);
5561 w_avg
+= (double) i
* (double) N_succ
[i
];
5563 if (sum
> N_succ
[0])
5564 printf("mean %.4g (without 0: %.4g)\n", w_avg
/ sum
, w_avg
/ (sum
- (double) N_succ
[0]));
5578 short II
, JJ
= 0, kk
;
5581 short From
= BASE
, To
= now
._nr_pr
-1;
5583 short From
= now
._nr_pr
-1, To
= BASE
;
5587 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5588 depth
, (trpt
->tau
&4)?"claim":"program",
5589 (trpt
->o_pm
&2)?"":"non-", From
, To
);
5593 { trpt
->sched_limit
= (trpt
-1)->sched_limit
;
5595 { trpt
->sched_limit
= 0;
5599 if (depth
> hiwater
)
5605 printf("zap %d: %d (maxdepth now %d)\n",
5606 CNT1
, hiwater
, maxdepth
);
5609 trpt
->tau
&= ~(16|32|64); /* make sure these are off */
5610 #if defined(FULLSTACK) && defined(MA)
5620 (trpt
+1)->o_n
= 1; /* not a deadlock: as below */
5623 (trpt
-1)->tau
|= 16; /* worstcase guess: as below */
5625 #if NCORE>1 && defined(FULL_TRAIL)
5633 if (depth
>= maxdepth
)
5636 printf("error: max search depth too small\n");
5639 { uerror("depth limit reached");
5643 (trpt
+1)->o_n
= 1; /* not a deadlock */
5646 (trpt
-1)->tau
|= 16; /* worstcase guess */
5648 #if NCORE>1 && defined(FULL_TRAIL)
5656 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5657 /* if atomic or rv move, carry forward previous state */
5658 trpt
->ostate
= (trpt
-1)->ostate
;
5661 if ((trpt
->tau
&4) || ((trpt
-1)->tau
&128))
5663 if (boq
== -1) { /* if not mid-rv */
5665 /* this check should now be redundant
5666 * because the seed state also appears
5667 * on the 1st dfs stack and would be
5668 * matched in hstore below
5670 if ((now
._a_t
&1) && depth
> A_depth
)
5671 { if (!memcmp((char *)&A_Root
,
5672 (char *)&now
, vsize
))
5674 depthfound
= A_depth
;
5676 printf("matches seed\n");
5679 uerror("non-progress cycle");
5681 uerror("acceptance cycle");
5683 #if NCORE>1 && defined(FULL_TRAIL)
5691 printf("not seed\n");
5695 if (!(trpt
->tau
&8)) /* if no atomic move */
5699 II
= bstore((char *)&now
, vsize
);
5700 trpt
->j6
= j1
; trpt
->j7
= j2
;
5701 JJ
= LL
[j1
] && LL
[j2
];
5707 JJ
= II
; /* worstcase guess for p.o. */
5710 II
= bstore((char *)&now
, vsize
);
5714 II
= gstore((char *)&now
, vsize
, 0);
5721 II
= hstore((char *)&now
, vsize
);
5727 kk
= (II
== 1 || II
== 2);
5729 #if NCORE==1 || defined (SEP_STATE)
5730 if (II
== 2 && ((trpt
->o_pm
&2) || ((trpt
-1)->o_pm
&2)))
5733 if (!fairness
|| ((now
._a_t
&1) && now
._cnt
[1] == 1)) /* 5.1.4 */
5735 if (a_cycles
&& !fairness
) /* 5.1.6 -- example by Hirofumi Watanabe */
5739 II
= 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5741 printf("state match on dfs stack\n");
5746 #if defined(FULLSTACK) && defined(BITSTATE)
5747 if (!JJ
&& (now
._a_t
&1) && depth
> A_depth
)
5749 uchar o_a_t
= now
._a_t
;
5750 now
._a_t
&= ~(1|16|32);
5754 printf("state match on 1st dfs stack\n");
5761 if (II
== 3 && a_cycles
&& (now
._a_t
&1))
5764 if (fairness
&& now
._cnt
[1] > 1) /* was != 0 */
5767 printf(" fairness count non-zero\n");
5776 same_case
: if (Lstate
) depthfound
= Lstate
->D
;
5778 uerror("non-progress cycle");
5780 uerror("acceptance cycle");
5782 #if NCORE>1 && defined(FULL_TRAIL)
5793 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5794 if (II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
))
5795 { (trpt
-1)->tau
|= 16;
5798 if ((II
&& JJ
) || (II
== 3))
5799 { /* marker for liveness proviso */
5801 (trpt
-1)->tau
|= 16;
5806 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5807 if (!(II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
)))
5808 { /* treat as stack state */
5809 (trpt
-1)->tau
|= 16;
5811 { /* treat as non-stack state */
5812 (trpt
-1)->tau
|= 64;
5816 { /* successor outside stack */
5817 (trpt
-1)->tau
|= 64;
5823 #if NCORE>1 && defined(FULL_TRAIL)
5833 { static long sdone
= (long) 0; long ndone
;
5835 #if defined(ZAPH) && defined(BITSTATE)
5836 zstates
+= (double) hfns
;
5838 ndone
= (unsigned long) (nstates
/((double) FREQ
));
5842 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5843 if (nstates
> ((double)(ONE_L
<<(ssize
+1))))
5844 { void resize_hashtable(void);
5848 #if defined(ZAPH) && defined(BITSTATE)
5849 if (zstates
> ((double)(ONE_L
<<(ssize
-2))))
5850 { /* more than half the bits set */
5851 void zap_hashtable(void);
5859 if (write(svfd
, (uchar
*) &now
, vprefix
) != vprefix
)
5860 { fprintf(efd
, "writing %s.svd failed\n", PanSource
);
5864 #if defined(MA) && defined(W_XPT)
5865 if ((unsigned long) nstates
%W_XPT
== 0)
5866 { void w_xpoint(void);
5871 #if defined(FULLSTACK) || defined(CNTRSTACK)
5874 #if defined(FULLSTACK) && !defined(MA)
5875 printf("%d: putting %u (%d)\n", depth
,
5877 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
5879 printf("%d: putting\n", depth
);
5884 trpt
->ostate
= Lstate
;
5888 if (depth
> mreached
)
5893 trpt
->tau
&= ~(1|2); /* timeout and -request off */
5899 if (now
._nr_pr
== 0) /* claim terminated */
5900 uerror("end state in claim reached");
5901 check_claim(((P0
*)pptr(0))->_p
);
5903 if (trpt
->tau
&4) /* must make a claimmove */
5906 if ((now
._a_t
&2) /* A-bit set */
5907 && now
._cnt
[now
._a_t
&1] == 1)
5909 now
._cnt
[now
._a_t
&1] = 0;
5912 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5922 /* Look for a process with only safe transitions */
5923 /* (special rules apply in the 2nd dfs) */
5924 if (boq
== -1 && From
!= To
5928 && (depth
< z_handoff
)
5933 && ((a_cycles
) || (!a_cycles
&& depth
< z_handoff
))
5940 !((trpt
-1)->proviso
))
5947 !(((char *)&((trpt
-1)->ostate
->state
))[0] & 128))
5949 !(((char *)&(trpt
->ostate
->state
))[0] & 128))
5955 (trpt
-1)->ostate
->proviso
== 0)
5957 trpt
->ostate
->proviso
== 0)
5964 for (II
= From
; II
<= To
; II
++)
5966 for (II
= From
; II
>= To
; II
--)
5969 Resume
: /* pick up here if preselect fails */
5971 tt
= (int) ((P0
*)this)->_p
;
5972 ot
= (uchar
) ((P0
*)this)->_t
;
5973 if (trans
[ot
][tt
]->atom
& 8)
5974 { t
= trans
[ot
][tt
];
5981 From
= To
= II
; /* the process preselected */
5985 trpt
->tau
|= 32; /* preselect marker */
5988 printf("%3d: proc %d Pre", depth
, II
);
5989 printf("Selected (om=%d, tau=%d)\n",
5992 printf("%3d: proc %d PreSelected (tau=%d)\n",
5993 depth
, II
, trpt
->tau
);
6001 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
6004 /* The Main Expansion Loop over Processes */
6005 trpt
->o_pm
&= ~(8|16|32|64); /* fairness-marks */
6007 if (fairness
&& boq
== -1
6009 && (!(trpt
->tau
&4) && !((trpt
-1)->tau
&128))
6012 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
6015 if (a_cycles
&& (trpt
->o_pm
&2))
6016 { /* Accepting state */
6018 now
._cnt
[now
._a_t
&1] = now
._nr_pr
+ 1;
6021 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
6022 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
6026 { /* A_bit = 0 when Cnt 0 */
6027 if (now
._cnt
[now
._a_t
&1] == 1)
6029 now
._cnt
[now
._a_t
&1] = 0;
6032 printf("%3d: fairness Rule 3: _a_t = %d\n",
6039 for (II
= From
; II
<= To
; II
++)
6041 for (II
= From
; II
>= To
; II
--)
6045 /* no rendezvous with same proc */
6046 if (boq
!= -1 && trpt
->pr
== II
) continue;
6049 /* limit max nr of interleavings */
6055 && (trpt
-1)->pr
!= II
6056 && trpt
->sched_limit
>= sched_max
)
6064 tt
= (int) ((P0
*)this)->_p
;
6065 ot
= (uchar
) ((P0
*)this)->_t
;
6067 /* don't repeat a previous preselected expansion */
6068 /* could hit this if reduction proviso was false */
6079 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6080 continue; /* did it before */
6083 trpt
->o_pm
&= ~1; /* no move in this pid yet */
6085 (trpt
+1)->o_event
= now
._event
;
6087 /* Fairness: Cnt++ when Cnt == II */
6089 trpt
->o_pm
&= ~64; /* didn't apply rule 2 */
6094 && now
._cnt
[now
._a_t
&1] == II
+2)
6095 { now
._cnt
[now
._a_t
&1] -= 1;
6097 /* claim need not participate */
6099 now
._cnt
[now
._a_t
&1] = 1;
6102 printf("%3d: proc %d fairness ", depth
, II
);
6103 printf("Rule 2: --cnt to %d (%d)\n",
6104 now
._cnt
[now
._a_t
&1], now
._a_t
);
6106 trpt
->o_pm
|= (32|64);
6110 if (!provided(II
, ot
, tt
, t
)) continue;
6112 /* check all trans of proc II - escapes first */
6116 (trpt
+1)->pr
= (uchar
) II
;
6119 for (ooi
= eoi
= 0, t
= trans
[ot
][tt
]; t
; t
= t
->nxt
, ooi
++)
6120 { if (strcmp(t
->tp
, "else") == 0)
6125 { t
= trans
[ot
][tt
];
6127 printf("randomizer: suppressed, saw else\n");
6132 printf("randomizer: skip %d in %d\n", eoi
, ooi
);
6134 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6135 if (eoi
-- <= 0) break;
6138 for ( ; t
&& ooi
> 0; t
= t
->nxt
, ooi
--)
6140 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6144 /* exploring all transitions from
6145 * a single escape state suffices
6147 if (trpt
->e_state
> 0
6148 && trpt
->e_state
!= t
->e_trans
)
6151 printf("skip 2nd escape %d (did %d before)\n",
6152 t
->e_trans
, trpt
->e_state
);
6159 #include FORWARD_MOVES
6160 P999
: /* jumps here when move succeeds */
6162 if (!(_m
= do_transit(t
, II
))) continue;
6169 && (trpt
-1)->pr
!= II
)
6170 { trpt
->sched_limit
= 1 + (trpt
-1)->sched_limit
;
6175 /* for branching-time, can accept reduction only if */
6176 /* the persistent set contains just 1 transition */
6177 { if ((trpt
->tau
&32) && (trpt
->o_pm
&1))
6179 trpt
->o_pm
|= 1; /* we moved */
6182 trpt
->o_pm
|= 1; /* we moved */
6185 if (loopstate
[ot
][tt
])
6188 printf("exiting from loopstate:\n");
6197 #if defined(VERBOSE) || defined(CHECK)
6199 cpu_printf("%3d: proc %d exec %d \n", depth
, II
, t
->t_id
);
6201 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6202 depth
, II
, t
->forw
, tt
, t
->st
, t
->tp
,
6203 (t
->atom
&2)?"atomic":"",
6204 (boq
!= -1)?"rendez-vous":"",
6205 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
6208 cpu_printf("\t(escape to state %d)\n", t
->st
);
6212 cpu_printf("\t(randomizer %d)\n", ooi
);
6219 now
._last
= II
- BASE
;
6222 trpt
->e_state
= t
->e_trans
;
6225 trpt
->pr
= (uchar
) II
;
6227 trpt
->o_pm
&= ~(2|4);
6229 { ((P0
*)this)->_p
= t
->st
;
6230 /* moved down reached[ot][t->st] = 1; */
6235 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6238 #define P__Q ((P0 *)pptr(ii))
6241 /* state 1 of np_ claim is accepting */
6242 if (((P0
*)pptr(0))->_p
== 1)
6245 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6246 { if (accpstate
[P__Q
->_t
][P__Q
->_p
])
6252 #if defined(HAS_NP) && PROG_LAB>0
6253 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6254 { if (progstate
[P__Q
->_t
][P__Q
->_p
])
6262 trpt
->o_t
= t
; trpt
->o_n
= _n
;
6263 trpt
->o_ot
= ot
; trpt
->o_tt
= tt
;
6264 trpt
->o_To
= To
; trpt
->o_m
= _m
;
6269 if (boq
!= -1 || (t
->atom
&2))
6272 /* atomic sequence in claim */
6278 { if ((trpt
-1)->tau
&4)
6283 /* if claim allowed timeout, so */
6284 /* does the next program-step: */
6285 if (((trpt
-1)->tau
&1) && !(trpt
->tau
&4))
6291 if (boq
== -1 && (t
->atom
&2))
6292 { From
= To
= II
; nlinks
++;
6295 { From
= BASE
; To
= now
._nr_pr
-1;
6297 { From
= now
._nr_pr
-1; To
= BASE
;
6300 #if NCORE>1 && defined(FULL_TRAIL)
6302 { Push_Stack_Tree(II
, t
->t_id
);
6305 goto Down
; /* pseudo-recursion */
6308 cpu_printf("%d: Up - %s\n", depth
,
6309 (trpt
->tau
&4)?"claim":"program");
6317 #if defined(MA) || NCORE>1
6318 if (depth
<= 0) return;
6319 /* e.g., if first state is old, after a restart */
6323 && depth
< hiwater
- (HHH
-DDD
) + 2)
6330 printf("unzap %d: %d\n", CNT2
, hiwater
);
6334 if (trpt
->o_pm
&128) /* fairness alg */
6335 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
6336 _n
= 1; trpt
->o_pm
&= ~128;
6338 #if defined(VERBOSE) || defined(CHECK)
6339 printf("%3d: reversed fairness default move\n", depth
);
6346 { int d
; Trail
*trl
;
6348 for (d
= 1; d
< depth
; d
++)
6349 { trl
= getframe(depth
-d
); /* was (trpt-d) */
6351 { now
._last
= trl
->pr
- BASE
;
6355 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
6359 now
._event
= trpt
->o_event
;
6362 if ((now
._a_t
&1) && depth
<= A_depth
)
6363 return; /* to checkcycles() */
6365 t
= trpt
->o_t
; _n
= trpt
->o_n
;
6366 ot
= trpt
->o_ot
; II
= trpt
->pr
;
6367 tt
= trpt
->o_tt
; this = pptr(II
);
6368 To
= trpt
->o_To
; _m
= trpt
->o_m
;
6373 _m
= do_reverse(t
, II
, _m
);
6375 #include REVERSE_MOVES
6376 R999
: /* jumps here when done */
6379 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6380 depth
, II
, t
->forw
, tt
, t
->st
);
6381 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6382 t
->tp
, now
._a_t
, A_depth
, trpt
->tau
, (trpt
-1)->tau
);
6385 /* pass the proviso tags */
6386 if ((trpt
->tau
&8) /* rv or atomic */
6388 (trpt
-1)->tau
|= 16;
6390 if ((trpt
->tau
&8) /* rv or atomic */
6392 (trpt
-1)->tau
|= 64;
6401 (trans
[ot
][tt
])->om
= _m
; /* head of list */
6403 /* i.e., not set if rv fails */
6406 #if defined(VERI) && !defined(NP)
6407 if (II
== 0 && verbose
&& !reached
[ot
][t
->st
])
6409 printf("depth %d: Claim reached state %d (line %d)\n",
6410 depth
, t
->st
, src_claim
[t
->st
]);
6414 reached
[ot
][t
->st
] = 1;
6415 reached
[ot
][tt
] = 1;
6418 else trpt
->e_state
= 0; /* undo */
6420 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6421 ((P0
*)this)->_p
= tt
;
6425 { t
= trans
[ot
][tt
];
6427 printf("randomizer: continue for %d more\n", ooi
);
6433 printf("randomizer: done\n");
6437 /* Fairness: undo Rule 2 */
6443 if (now
._cnt
[now
._a_t
&1] == 1)
6444 now
._cnt
[now
._a_t
&1] = 2;
6446 now
._cnt
[now
._a_t
&1] += 1;
6448 printf("%3d: proc %d fairness ", depth
, II
);
6449 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6450 now
._cnt
[now
._a_t
&1], now
._a_t
);
6452 trpt
->o_pm
&= ~(32|64);
6465 if (II
== 0) break; /* never claim */
6467 } /* all processes */
6469 tally_succ(trpt
->n_succ
);
6472 if (_n
== 0 /* no process could move */
6477 && trpt
->sched_limit
>= sched_max
)
6478 { _n
= 1; /* not a deadlock */
6482 /* Fairness: undo Rule 2 */
6483 if (trpt
->o_pm
&32) /* remains if proc blocked */
6486 if (now
._cnt
[now
._a_t
&1] == 1)
6487 now
._cnt
[now
._a_t
&1] = 2;
6489 now
._cnt
[now
._a_t
&1] += 1;
6491 printf("%3d: proc -- fairness ", depth
);
6492 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6493 now
._cnt
[now
._a_t
&1], now
._a_t
);
6499 && _n
== 0 /* nobody moved */
6501 && !(trpt
->tau
&4) /* in program move */
6503 && !(trpt
->tau
&8) /* not an atomic one */
6505 && ((trpt
->tau
&1) || endstate())
6508 && (trpt
->tau
&1) /* already tried timeout */
6513 && !((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6515 && now
._cnt
[now
._a_t
&1] > 0) /* needed more procs */
6517 trpt
->o_pm
|= 128 | ((trpt
-1)->o_pm
&(2|4));
6518 trpt
->bup
.oval
= now
._cnt
[now
._a_t
&1];
6519 now
._cnt
[now
._a_t
&1] = 1;
6526 From
= BASE
; To
= now
._nr_pr
-1;
6528 From
= now
._nr_pr
-1; To
= BASE
;
6530 #if defined(VERBOSE) || defined(CHECK)
6531 printf("%3d: fairness default move ", depth
);
6532 printf("(all procs block)\n");
6537 Q999
: /* returns here with _n>0 when done */;
6540 now
._cnt
[now
._a_t
&1] = 0;
6543 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6549 now
._cnt
[now
._a_t
&1] = 1;
6552 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6560 /* at least one move that was preselected at this */
6561 /* level, blocked or was a loop control flow point */
6562 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6564 /* preselected move - no successors outside stack */
6565 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
6568 { From
= BASE
; To
= now
._nr_pr
-1;
6570 { From
= now
._nr_pr
-1; To
= BASE
;
6573 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6574 depth
, II
+1, _n
, trpt
->tau
);
6576 _n
= 0; trpt
->tau
&= ~(16|32|64);
6578 if (II
<= To
) /* II already decremented */
6580 if (II
>= BASE
) /* II already decremented */
6587 /* at least one move that was preselected at this */
6588 /* level, blocked or truncated at the next level */
6589 /* implied: #ifdef FULLSTACK */
6590 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6593 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6594 depth
, II
+1, (int) _n
, trpt
->tau
);
6596 if (a_cycles
&& (trpt
->tau
&16))
6597 { if (!(now
._a_t
&1))
6600 printf("%3d: setting proviso bit\n", depth
);
6605 (trpt
-1)->proviso
= 1;
6611 if ((trpt
-1)->ostate
)
6612 ((char *)&((trpt
-1)->ostate
->state
))[0] |= 128;
6614 ((char *)&(trpt
->ostate
->state
))[0] |= 128;
6619 if ((trpt
-1)->ostate
)
6620 (trpt
-1)->ostate
->proviso
= 1;
6622 trpt
->ostate
->proviso
= 1;
6626 From
= BASE
; To
= now
._nr_pr
-1;
6628 From
= now
._nr_pr
-1; To
= BASE
;
6630 _n
= 0; trpt
->tau
&= ~(16|32|64);
6631 goto Again
; /* do full search */
6632 } /* else accept reduction */
6635 { From
= BASE
; To
= now
._nr_pr
-1;
6637 { From
= now
._nr_pr
-1; To
= BASE
;
6639 _n
= 0; trpt
->tau
&= ~(16|32|64);
6641 if (II
<= To
) /* already decremented */
6643 if (II
>= BASE
) /* already decremented */
6652 if (_n
== 0 || ((trpt
->tau
&4) && (trpt
->tau
&2)))
6655 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6656 depth
, II
, trpt
->tau
, boq
);
6659 /* ok if a rendez-vous fails: */
6660 if (boq
!= -1) goto Done
;
6662 /* ok if no procs or we're at maxdepth */
6663 if ((now
._nr_pr
== 0 && (!strict
|| qs_empty()))
6667 || depth
>= maxdepth
-1) goto Done
;
6668 if ((trpt
->tau
&8) && !(trpt
->tau
&4))
6669 { trpt
->tau
&= ~(1|8);
6670 /* 1=timeout, 8=atomic */
6672 From
= BASE
; To
= now
._nr_pr
-1;
6674 From
= now
._nr_pr
-1; To
= BASE
;
6677 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth
, II
+1);
6680 trpt
->tau
|= 4; /* switch to claim */
6685 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
6691 if (trpt
->tau
&2) /* requested */
6696 cpu_printf("%d: timeout\n", depth
);
6701 { /* only claim can enable timeout */
6703 && !((trpt
-1)->tau
&4))
6704 /* blocks inside an atomic */ goto BreakOut
;
6706 cpu_printf("%d: req timeout\n",
6709 (trpt
-1)->tau
|= 2; /* request */
6710 #if NCORE>1 && defined(FULL_TRAIL)
6719 cpu_printf("%d: timeout\n", depth
);
6730 { trpt
->tau
|= 4; /* claim stuttering */
6731 trpt
->tau
|= 128; /* stutter mark */
6733 cpu_printf("%d: claim stutter\n", depth
);
6741 if (!noends
&& !a_cycles
&& !endstate())
6742 { depth
--; trpt
--; /* new 4.2.3 */
6743 uerror("invalid end state");
6747 else if (a_cycles
&& (trpt
->o_pm
&2)) /* new 4.2.4 */
6749 uerror("accept stutter");
6756 if (!(trpt
->tau
&8)) /* not in atomic seqs */
6761 /* --after-- a program-step, i.e., */
6762 /* after backtracking a claim-step */
6764 /* with at least one running process */
6765 /* unless in a stuttered accept state */
6766 && ((now
._nr_pr
> 1) || (trpt
->o_pm
&2))
6774 cpu_printf("Consider check %d %d...\n",
6775 now
._a_t
, now
._cnt
[0]);
6777 if ((now
._a_t
&2) /* A-bit */
6778 && (now
._cnt
[0] == 1))
6782 if (a_cycles
&& (trpt
->o_pm
&2))
6787 #if defined(FULLSTACK) || defined(CNTRSTACK)
6790 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6791 || ( (trpt
-1)->tau
&128)))
6797 #if defined(FULLSTACK)
6798 printf("%d: zapping %u (%d)\n",
6799 depth
, trpt
->ostate
,
6800 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
6809 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6810 || ( (trpt
-1)->tau
&128)))
6816 printf("%d: zapping\n", depth
);
6821 gstore((char *) &now
, vsize
, 1);
6828 #if NCORE>1 && defined(FULL_TRAIL)
6838 void new_state(void) { /* place holder */ }
6842 assert(int a
, char *s
, int ii
, int tt
, Trans
*t
)
6844 if (!a
&& !noasserts
)
6846 strcpy(bad
, "assertion violated ");
6847 if (strlen(s
) > 1000)
6848 { strncpy(&bad
[19], (const char *) s
, 1000);
6851 strcpy(&bad
[19], s
);
6855 #ifndef NOBOUNDCHECK
6857 Boundcheck(int x
, int y
, int a1
, int a2
, Trans
*a3
)
6859 assert((x
>= 0 && x
< y
), "- invalid array index",
6868 printf("%9.8g states, stored (%g visited)\n",
6869 nstates
- nShadow
, nstates
);
6871 printf("%9.8g states, stored\n", nstates
);
6874 printf(" %8g nominal states (- rv and atomic)\n", nstates
-midrv
-nlinks
+revrv
);
6875 printf(" %8g rvs succeeded\n", midrv
-failedrv
);
6877 printf(" %8g nominal states (stored-atomic)\n", nstates
-nlinks
);
6880 printf(" %8g midrv\n", midrv
);
6881 printf(" %8g failedrv\n", failedrv
);
6882 printf(" %8g revrv\n", revrv
);
6885 printf("%9.8g states, matched\n", truncs
);
6887 printf("%9.8g matches within stack\n",truncs2
);
6890 printf("%9.8g transitions (= visited+matched)\n",
6893 printf("%9.8g transitions (= stored+matched)\n",
6895 printf("%9.8g atomic steps\n", nlinks
);
6896 if (nlost
) printf("%g lost messages\n", (double) nlost
);
6899 printf("hash conflicts: %9.8g (resolved)\n", hcmp
);
6901 if (hcmp
> (double) (1<<ssize
))
6902 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6903 } /* in multi-core: also reduces lock delays on access to hashtable */
6907 printf("%8g states allocated for dfs stack\n", ngrabs
);
6910 printf("\nhash factor: %4g (best if > 100.)\n\n",
6911 (double)(((double) udmem
) * 8.0) / (double) nstates
);
6913 printf("\nhash factor: %4g (best if > 100.)\n\n",
6914 (double)(1<<(ssize
-8)) / (double) nstates
* 256.0);
6915 printf("bits set per state: %u (-k%u)\n", hfns
, hfns
);
6918 { printf("total bits available: %8g (-M%ld)\n",
6919 ((double) udmem
) * 8.0, udmem
/(1024L*1024L));
6921 printf("total bits available: %8g (-w%d)\n",
6922 ((double) (ONE_L
<< (ssize
-4)) * 16.0), ssize
);
6926 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6927 bfs_dsk_reads
, bfs_dsk_writes
, bfs_dsk_writes
-bfs_dsk_reads
);
6928 if (bfs_dsk_read
>= 0) (void) close(bfs_dsk_read
);
6929 if (bfs_dsk_write
>= 0) (void) close(bfs_dsk_write
);
6930 (void) unlink("pan_bfs_dsk.tmp");
6937 #if defined(BITSTATE) || !defined(NOCOMP)
6938 double nr1
, nr2
, nr3
= 0.0, nr4
, nr5
= 0.0;
6939 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6942 int mverbose
= verbose
;
6946 if (verbose
) cpu_printf("wrapup -- %d error(s)\n", errors
);
6950 void dsk_stats(void);
6953 if (search_terminated
!= NULL
)
6954 { *search_terminated
|= 2; /* wrapup */
6956 exit(0); /* normal termination, not an error */
6959 #if !defined(WIN32) && !defined(WIN64)
6960 signal(SIGINT
, SIG_DFL
);
6962 printf("\n(%s)\n", SpinVersion
);
6963 if (!done
) printf("Warning: Search not completed\n");
6965 (void) unlink((const char *)stackfile
);
6969 { printf(" + Multi-Core (NCORE=%d)\n", NCORE
);
6971 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE
, z_handoff
);
6975 printf(" + Using Breadth-First Search\n");
6978 printf(" + Partial Order Reduction\n");
6981 printf(" + Reverse Depth-First Search Order\n");
6984 printf(" + Reverse Transition Ordering\n");
6987 printf(" + Randomized Transition Ordering\n");
6990 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max
);
6993 printf(" + Compression\n");
6996 printf(" + Graph Encoding (-DMA=%d)\n", MA
);
6998 printf(" Restarted from checkpoint %s.xpt\n", PanSource
);
7003 printf(" + FullStack Matching\n");
7006 printf(" + CntrStack Matching\n");
7010 printf("\nBit statespace search for:\n");
7013 printf("\nHash-Compact %d search for:\n", HC
);
7015 printf("\nFull statespace search for:\n");
7019 #ifdef NEGATED_TRACE
7020 printf(" notrace assertion +\n");
7022 printf(" trace assertion +\n");
7026 printf(" never claim +\n");
7027 printf(" assertion violations ");
7029 printf("- (disabled by -A flag)\n");
7031 printf("+ (if within scope of claim)\n");
7034 printf(" never claim - (not selected)\n");
7036 printf(" never claim - (none specified)\n");
7038 printf(" assertion violations ");
7040 printf("- (disabled by -A flag)\n");
7046 printf(" non-progress cycles ");
7048 printf(" acceptance cycles ");
7051 printf("+ (fairness %sabled)\n",
7052 fairness
?"en":"dis");
7053 else printf("- (not selected)\n");
7055 printf(" cycle checks - (disabled by -DSAFETY)\n");
7058 printf(" invalid end states - ");
7059 printf("(disabled by ");
7061 printf("-E flag)\n\n");
7063 printf("never claim)\n\n");
7065 printf(" invalid end states ");
7067 printf("- (disabled by -E flag)\n\n");
7071 printf("State-vector %d byte, depth reached %ld", hmax
,
7073 (nr_handoffs
* z_handoff
) +
7076 printf(", errors: %d\n", errors
);
7080 { extern void dfa_stats(void);
7081 if (maxgs
+a_cycles
+2 < MA
)
7082 printf("MA stats: -DMA=%d is sufficient\n",
7089 printf("stackframes: %d/%d\n\n", smax
, svmax
);
7090 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7092 printf("check %d holds %d\n", Ccheck
, Cholds
);
7093 printf("stack stats: puts %d, probes %d, zaps %d\n",
7099 #if defined(BITSTATE) || !defined(NOCOMP)
7100 nr1
= (nstates
-nShadow
)*
7101 (double)(hmax
+sizeof(struct H_el
)-sizeof(unsigned));
7105 nr2
= (double) ((maxdepth
+3)*sizeof(Trail
));
7108 #if !defined(MA) || defined(COLLAPSE)
7109 nr3
= (double) (ONE_L
<<ssize
)*sizeof(struct H_el
*);
7113 nr3
= (double) (udmem
);
7115 nr3
= (double) (ONE_L
<<(ssize
-3));
7117 nr5
= (double) (ONE_L
<<(ssize
-3));
7120 nr5
= (double) (maxdepth
*sizeof(struct H_el
*));
7123 nr4
= (double) (svmax
* (sizeof(Svtack
) + hmax
))
7124 + (double) (smax
* (sizeof(Stack
) + Maxbody
));
7126 if (mverbose
|| memcnt
< nr1
+nr2
+nr3
+nr4
+nr5
)
7128 { double remainder
= memcnt
;
7129 double tmp_nr
= memcnt
-nr3
-nr4
-(nr2
-fragment
)-nr5
;
7130 #if NCORE>1 && !defined(SEP_STATE)
7131 tmp_nr
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7133 if (tmp_nr
< 0.0) tmp_nr
= 0.;
7134 printf("Stats on memory usage (in Megabytes):\n");
7135 printf("%9.3f equivalent memory usage for states",
7136 nr1
/1048576.); /* 1024*1024=1048576 */
7137 printf(" (stored*(State-vector + overhead))\n");
7138 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7139 printf("%9.3f shared memory reserved for state storage\n",
7140 mem_reserved
/1048576.);
7142 printf(" in %d local heaps of %7.3f MB each\n",
7143 NCORE
, mem_reserved
/(NCORE
*1048576.));
7149 printf("%9.3f memory used for hash array (-M%ld)\n",
7150 nr3
/1048576., udmem
/(1024L*1024L));
7152 printf("%9.3f memory used for hash array (-w%d)\n",
7153 nr3
/1048576., ssize
);
7155 printf("%9.3f memory used for bit stack\n",
7157 remainder
= remainder
- nr3
- nr5
;
7159 printf("%9.3f actual memory usage for states",
7161 remainder
-= tmp_nr
;
7164 { if (tmp_nr
> nr1
) printf("unsuccessful ");
7165 printf("compression: %.2f%%)\n",
7166 (100.0*tmp_nr
)/nr1
);
7168 printf("less than 1k)\n");
7171 { printf(" state-vector as stored = %.0f byte",
7172 (tmp_nr
)/(nstates
-nShadow
) -
7173 (double) (sizeof(struct H_el
) - sizeof(unsigned)));
7174 printf(" + %ld byte overhead\n",
7175 (long int) sizeof(struct H_el
)-sizeof(unsigned));
7178 #if !defined(MA) || defined(COLLAPSE)
7179 printf("%9.3f memory used for hash table (-w%d)\n",
7180 nr3
/1048576., ssize
);
7185 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7186 nr2
/1048576., maxdepth
);
7190 remainder
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7191 printf("%9.3f shared memory used for work-queues\n",
7192 (GWQ_SIZE
+ (double) NCORE
* LWQ_SIZE
) /1048576.);
7193 printf(" in %d queues of %7.3f MB each",
7194 NCORE
, (double) LWQ_SIZE
/1048576.);
7196 printf(" + a global q of %7.3f MB\n",
7197 (double) GWQ_SIZE
/ 1048576.);
7202 if (remainder
- fragment
> 1048576.)
7203 printf("%9.3f other (proc and chan stacks)\n",
7204 (remainder
-fragment
)/1048576.);
7205 if (fragment
> 1048576.)
7206 printf("%9.3f memory lost to fragmentation\n",
7208 printf("%9.3f total actual memory usage\n\n",
7216 printf("%9.3f memory usage (Mbyte)\n\n",
7220 printf("nr of templates: [ globals chans procs ]\n");
7221 printf("collapse counts: [ ");
7222 { int i
; for (i
= 0; i
< 256+2; i
++)
7224 printf("%d ", ncomps
[i
]);
7228 if ((done
|| verbose
) && !no_rck
) do_reach();
7231 printf("\nPeg Counts (transitions executed):\n");
7232 for (i
= 1; i
< NTRANS
; i
++)
7233 { if (peg
[i
]) putpeg(i
, peg
[i
]);
7240 if (vprefix
> 0) close(svfd
);
7243 printf("%g loopstates hit\n", cnt_loops
);
7248 #if NCORE>1 && defined(T_ALERT)
7256 { printf("Interrupted\n");
7258 was_interrupted
= 1;
7266 * super fast hash, based on Paul Hsieh's function
7267 * http://www.azillionmonkeys.com/qed/hash.html
7271 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7272 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7273 #define get16bits(d) (*((const uint16_t *) (d)))
7277 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7278 +(uint32_t)(((const uint8_t *)(d))[0]) )
7282 d_sfh(const char *s
, int len
)
7283 { uint32_t h
= len
, tmp
;
7289 for ( ; len
> 0; len
--)
7290 { h
+= get16bits(s
);
7291 tmp
= (get16bits(s
+2) << 11) ^ h
;
7292 h
= (h
<< 16) ^ tmp
;
7293 s
+= 2*sizeof(uint16_t);
7297 case 3: h
+= get16bits(s
);
7299 h
^= s
[sizeof(uint16_t)] << 18;
7302 case 2: h
+= get16bits(s
);
7323 #if defined(HASH64) || defined(WIN64)
7324 /* 64-bit Jenkins hash, 1997
7325 * http://burtleburtle.net/bob/c/lookup8.c
7327 #define mix(a,b,c) \
7328 { a -= b; a -= c; a ^= (c>>43); \
7329 b -= c; b -= a; b ^= (a<<9); \
7330 c -= a; c -= b; c ^= (b>>8); \
7331 a -= b; a -= c; a ^= (c>>38); \
7332 b -= c; b -= a; b ^= (a<<23); \
7333 c -= a; c -= b; c ^= (b>>5); \
7334 a -= b; a -= c; a ^= (c>>35); \
7335 b -= c; b -= a; b ^= (a<<49); \
7336 c -= a; c -= b; c ^= (b>>11); \
7337 a -= b; a -= c; a ^= (c>>12); \
7338 b -= c; b -= a; b ^= (a<<18); \
7339 c -= a; c -= b; c ^= (b>>22); \
7342 /* 32-bit Jenkins hash, 2006
7343 * http://burtleburtle.net/bob/c/lookup3.c
7345 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7347 #define mix(a,b,c) \
7348 { a -= c; a ^= rot(c, 4); c += b; \
7349 b -= a; b ^= rot(a, 6); a += c; \
7350 c -= b; c ^= rot(b, 8); b += a; \
7351 a -= c; a ^= rot(c,16); c += b; \
7352 b -= a; b ^= rot(a,19); a += c; \
7353 c -= b; c ^= rot(b, 4); b += a; \
7356 #define final(a,b,c) \
7357 { c ^= b; c -= rot(b,14); \
7358 a ^= c; a -= rot(c,11); \
7359 b ^= a; b -= rot(a,25); \
7360 c ^= b; c -= rot(b,16); \
7361 a ^= c; a -= rot(c,4); \
7362 b ^= a; b -= rot(a,14); \
7363 c ^= b; c -= rot(b,24); \
7368 d_hash(uchar
*kb
, int nbytes
)
7370 #if defined(HASH64) || defined(WIN64)
7371 uint64_t a
= 0, b
, c
, n
;
7372 uint64_t *k
= (uint64_t *) kb
;
7374 uint32_t a
, b
, c
, n
;
7375 uint32_t *k
= (uint32_t *) kb
;
7377 /* extend to multiple of words, if needed */
7378 n
= nbytes
/WS
; /* nr of words */
7379 a
= nbytes
- (n
*WS
);
7384 case 3: *bp
++ = 0; /* fall thru */
7385 case 2: *bp
++ = 0; /* fall thru */
7389 #if defined(HASH64) || defined(WIN64)
7390 b
= HASH_CONST
[HASH_NR
];
7391 c
= 0x9e3779b97f4a7c13LL
; /* arbitrary value */
7400 c
+= (((uint64_t) nbytes
)<<3);
7408 a
= c
= 0xdeadbeef + (n
<<2);
7409 b
= HASH_CONST
[HASH_NR
];
7426 j1
= c
&nmask
; j3
= a
&7; /* 1st bit */
7427 j2
= b
&nmask
; j4
= (a
>>3)&7; /* 2nd bit */
7432 s_hash(uchar
*cp
, int om
)
7435 d_sfh((const char *) cp
, om
); /* sets K1 */
7437 d_hash(cp
, om
); /* sets K1 etc */
7441 j1
= K1
% omaxdepth
;
7454 srand(123); /* fixed startpoint */
7455 prerand
= (int *) emalloc((omaxdepth
+3)*sizeof(int));
7456 for (i
= 0; i
< omaxdepth
+3; i
++)
7457 prerand
[i
] = rand();
7461 { if (!prerand
) inirand();
7462 return prerand
[depth
];
7467 set_masks(void) /* 4.2.5 */
7469 if (WS
== 4 && ssize
>= 32)
7470 { mask
= 0xffffffff;
7473 case 34: nmask
= (mask
>>1); break;
7474 case 33: nmask
= (mask
>>2); break;
7475 default: nmask
= (mask
>>3); break;
7481 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7488 { fprintf(stderr
, "pan: wordsize %ld not supported\n", (long int) WS
);
7490 } else /* WS == 4 and ssize < 32 */
7491 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7496 static long reclaim_size
;
7497 static char *reclaim_mem
;
7498 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7500 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7502 static struct H_el
**N_tab
;
7504 reverse_capture(struct H_el
*p
)
7506 reverse_capture(p
->nxt
);
7507 /* last element of list moves first */
7508 /* to preserve list-order */
7510 if (ssize
< 8*WS
) /* probably always true */
7517 resize_hashtable(void)
7519 if (WS
== 4 && ssize
>= 27 - 1)
7520 { return; /* canot increase further */
7523 ssize
+= 2; /* 4x size */
7525 printf("pan: resizing hashtable to -w%d.. ", ssize
);
7527 N_tab
= (struct H_el
**)
7528 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
7530 set_masks(); /* they changed */
7532 for (j1
= 0; j1
< (ONE_L
<< (ssize
- 2)); j1
++)
7533 { reverse_capture(H_tab
[j1
]);
7535 reclaim_mem
= (char *) H_tab
;
7536 reclaim_size
= (ONE_L
<< (ssize
- 2));
7542 #if defined(ZAPH) && defined(BITSTATE)
7545 { cpu_printf("pan: resetting hashtable\n");
7547 { memset(SS
, 0, udmem
);
7549 { memset(SS
, 0, ONE_L
<<(ssize
-3));
7555 main(int argc
, char *argv
[])
7556 { void to_compile(void);
7558 efd
= stderr
; /* default */
7560 bstore
= bstore_reg
; /* default */
7564 strcpy(o_cmdline
, "");
7565 for (j
= 1; j
< argc
; j
++)
7566 { strcat(o_cmdline
, argv
[j
]);
7567 strcat(o_cmdline
, " ");
7569 /* printf("Command Line: %s\n", o_cmdline); */
7570 if (strlen(o_cmdline
) >= sizeof(o_cmdline
))
7571 { Uerror("option list too long");
7574 while (argc
> 1 && argv
[1][0] == '-')
7575 { switch (argv
[1][1]) {
7578 case 'a': fprintf(efd
, "error: -a disabled");
7581 case 'a': a_cycles
= 1; break;
7584 case 'A': noasserts
= 1; break;
7585 case 'b': bounded
= 1; break;
7587 case 'C': coltrace
= 1; goto samething
;
7589 case 'c': upto
= atoi(&argv
[1][2]); break;
7590 case 'd': state_tables
++; break;
7591 case 'e': every_error
= 1; Nr_Trails
= 1; break;
7592 case 'E': noends
= 1; break;
7594 case 'F': if (strlen(argv
[1]) > 2)
7595 stackfile
= &argv
[1][2];
7598 #if !defined(SAFETY) && !defined(NOFAIR)
7599 case 'f': fairness
= 1; break;
7602 case 'g': gui
= 1; goto samething
;
7604 case 'h': if (!argv
[1][2]) usage(efd
); else
7605 HASH_NR
= atoi(&argv
[1][2])%33; break;
7606 case 'I': iterative
= 2; every_error
= 1; break;
7607 case 'i': iterative
= 1; every_error
= 1; break;
7608 case 'J': like_java
= 1; break; /* Klaus Havelund */
7610 case 'k': hfns
= atoi(&argv
[1][2]); break;
7613 case 'L': sched_max
= atoi(&argv
[1][2]); break;
7617 case 'l': a_cycles
= 1; break;
7619 case 'l': fprintf(efd
, "error: -l disabled");
7624 case 'M': udmem
= atoi(&argv
[1][2]); break;
7625 case 'G': udmem
= atoi(&argv
[1][2]); udmem
*= 1024; break;
7628 fprintf(stderr
, "-M and -G affect only -DBITSTATE\n");
7631 case 'm': maxdepth
= atoi(&argv
[1][2]); break;
7632 case 'n': no_rck
= 1; break;
7633 case 'P': readtrail
= 1; onlyproc
= atoi(&argv
[1][2]);
7634 if (argv
[2][0] != '-') /* check next arg */
7635 { trailfilename
= argv
[2];
7636 argc
--; argv
++; /* skip next arg */
7640 case 'p': vprefix
= atoi(&argv
[1][2]); break;
7643 case 'Q': quota
= (double) 60.0 * (double) atoi(&argv
[1][2]); break;
7645 case 'q': strict
= 1; break;
7646 case 'R': Nrun
= atoi(&argv
[1][2]); break;
7649 samething
: readtrail
= 1;
7650 if (isdigit(argv
[1][2]))
7651 whichtrail
= atoi(&argv
[1][2]);
7652 else if (argc
> 2 && argv
[2][0] != '-') /* check next arg */
7653 { trailfilename
= argv
[2];
7654 argc
--; argv
++; /* skip next arg */
7657 case 'S': silent
= 1; goto samething
;
7660 case 's': hfns
= 1; break;
7662 case 'T': TMODE
= 0444; break;
7663 case 't': if (argv
[1][2]) tprefix
= &argv
[1][2]; break;
7664 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion
);
7665 to_compile(); pan_exit(2); break;
7666 case 'v': verbose
++; break;
7667 case 'w': ssize
= atoi(&argv
[1][2]); break;
7668 case 'Y': signoff
= 1; break;
7669 case 'X': efd
= stdout
; break;
7670 case 'x': exclusive
= 1; break;
7672 /* -B ip is passthru to proxy of remote ip address: */
7673 case 'B': argc
--; argv
++; break;
7674 case 'Q': worker_pids
[0] = atoi(&argv
[1][2]); break;
7675 /* -Un means that the nth worker should be instantiated as a proxy */
7676 case 'U': proxy_pid
= atoi(&argv
[1][2]); break;
7677 /* -W means that this copy is started by a cluster-server as a remote */
7678 /* this flag is passed to ./pan_proxy, which interprets it */
7679 case 'W': remote_party
++; break;
7680 case 'Z': core_id
= atoi(&argv
[1][2]);
7682 { printf("cpu%d: pid %d parent %d\n",
7683 core_id
, getpid(), worker_pids
[0]);
7686 case 'z': z_handoff
= atoi(&argv
[1][2]); break;
7688 case 'z': break; /* ignored for single-core */
7690 default : fprintf(efd
, "saw option -%c\n", argv
[1][1]); usage(efd
); break;
7694 if (iterative
&& TMODE
!= 0666)
7696 fprintf(efd
, "warning: -T ignored when -i or -I is used\n");
7698 #if defined(HASH32) && !defined(SFH)
7700 { fprintf(efd
, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7701 fprintf(efd
, " without -DSFH can slow down performance a lot\n");
7704 #if defined(WIN32) || defined(WIN64)
7706 TMODE
= _S_IWRITE
| _S_IREAD
;
7711 store_proxy_pid
= proxy_pid
; /* for checks in mem_file() and someone_crashed() */
7712 if (core_id
!= 0) { proxy_pid
= 0; }
7714 if (core_id
== 0 && a_cycles
)
7715 { fprintf(efd
, "hint: this search may be more efficient ");
7716 fprintf(efd
, "if pan.c is compiled -DSEP_STATE\n");
7720 { z_handoff
= 20; /* conservative default - for non-liveness checks */
7722 #if defined(NGQ) || defined(LWQ_FIXED)
7723 LWQ_SIZE
= (double) (128.*1048576.);
7725 LWQ_SIZE
= (double) ( z_handoff
+ 2.) * (double) sizeof(SM_frame
);
7729 { fprintf(efd
, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7731 fprintf(efd
, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7736 #error cannot use hidden variables when compiling multi-core
7742 fprintf(efd
, "warning: using -k%d as minimal usable value\n", hfns
);
7745 omaxdepth
= maxdepth
;
7747 if (WS
== 4 && ssize
> 34)
7749 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7751 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7752 * power of 2 that can be represented in an unsigned long
7756 if (WS
== 4 && ssize
> 27)
7758 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7760 * for emalloc, the lookup table size multiplies by 4 for the pointers
7761 * the largest power of 2 that can be represented in a ulong is 1^31
7762 * hence the largest number of lookup table slots is 31-4 = 27
7767 hiwater
= HHH
= maxdepth
-10;
7770 { stackfile
= (char *) emalloc(strlen(PanSource
)+4+1);
7771 sprintf(stackfile
, "%s._s_", PanSource
);
7774 { fprintf(efd
, "error: cannot use -i or -I with -DSC\n");
7778 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7779 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7781 if (iterative
&& a_cycles
)
7782 fprintf(efd
, "warning: -i or -I work for safety properties only\n");
7785 #error -DBFS not compatible with -DSC
7788 #error -DBFS not compatible with _last
7791 #error cannot use c_track UnMatched with BFS
7794 #warning -DREACH is redundant when -DBFS is used
7797 #if defined(MERGED) && defined(PEG)
7798 #error to use -DPEG use: spin -o3 -a
7802 #error cannot combine -DHC and -DSFH
7803 /* use of NOCOMP is the real reason */
7806 #error cannot combine -DHC and -DNOCOMP
7810 #error cannot combine -DHC and -DBITSTATE
7813 #if defined(SAFETY) && defined(NP)
7814 #error cannot combine -DNP and -DBFS or -DSAFETY
7818 #error cannot combine -DMA and -DBITSTATE
7821 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7826 #error cannot combine -DBITSTATE and -DCOLLAPSE
7829 #error cannot combine -DCOLLAPSE and -DSFH
7830 /* use of NOCOMP is the real reason */
7833 #error cannot combine -DCOLLAPSE and -DNOCOMP
7837 if (maxdepth
<= 0 || ssize
<= 1) usage(efd
);
7838 #if SYNC>0 && !defined(NOREDUCE)
7839 if (a_cycles
&& fairness
)
7840 { fprintf(efd
, "error: p.o. reduction not compatible with ");
7841 fprintf(efd
, "fairness (-f) in models\n");
7842 fprintf(efd
, " with rendezvous operations: ");
7843 fprintf(efd
, "recompile with -DNOREDUCE\n");
7847 #if defined(REM_VARS) && !defined(NOREDUCE)
7848 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7850 #if defined(NOCOMP) && !defined(BITSTATE)
7852 { fprintf(efd
, "error: use of -DNOCOMP voids -l and -a\n");
7857 memlim
= ((double) MEMLIM
) * (double) (1<<20); /* size in Mbyte */
7860 if (Nrun
> 1) HASH_NR
= Nrun
- 1;
7862 if (Nrun
< 1 || Nrun
> 32)
7863 { fprintf(efd
, "error: invalid arg for -R\n");
7867 if (fairness
&& !a_cycles
)
7868 { fprintf(efd
, "error: -f requires -a or -l\n");
7873 { fprintf(efd
, "error: no accept labels defined ");
7874 fprintf(efd
, "in model (for option -a)\n");
7881 #error use of enabled() requires -DNOREDUCE
7884 #error use of pcvalue() requires -DNOREDUCE
7887 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7890 #error use of _last requires -DNOREDUCE
7893 #if SYNC>0 && !defined(NOREDUCE)
7895 fprintf(efd
, "warning: use of a rendezvous stmnts in the escape\n");
7896 fprintf(efd
, " of an unless clause, if present, could make p.o. reduction\n");
7897 fprintf(efd
, " invalid (use -DNOREDUCE to avoid this)\n");
7899 fprintf(efd
, " (this type of rv is also not compatible with -DBFS)\n");
7903 #if SYNC>0 && defined(BFS)
7904 #warning use of rendezvous with BFS does not preserve all invalid endstates
7906 #if !defined(REACH) && !defined(BITSTATE)
7907 if (iterative
!= 0 && a_cycles
== 0)
7908 { fprintf(efd
, "warning: -i and -I need -DREACH to work accurately\n");
7911 #if defined(BITSTATE) && defined(REACH)
7912 #warning -DREACH is voided by -DBITSTATE
7914 #if defined(MA) && defined(REACH)
7915 #warning -DREACH is voided by -DMA
7917 #if defined(FULLSTACK) && defined(CNTRSTACK)
7918 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7931 { fprintf(efd
, "warning: never claim + accept labels ");
7932 fprintf(efd
, "requires -a flag to fully verify\n");
7940 { fprintf(efd
, "warning: verification in BFS mode ");
7941 fprintf(efd
, "is restricted to safety properties\n");
7955 { fprintf(efd
, "hint: this search is more efficient ");
7956 fprintf(efd
, "if pan.c is compiled -DSAFETY\n");
7963 { S_A
= 1; /* _a_t */
7965 } else /* _a_t and _cnt[NFAIR] */
7966 { S_A
= (&(now
._cnt
[0]) - (uchar
*) &now
) + NFAIR
- 2;
7967 /* -2 because first two uchars in now are masked */
7972 signal(SIGINT
, stopped
);
7975 trail
= (Trail
*) emalloc(6*sizeof(Trail
));
7978 trail
= (Trail
*) emalloc((maxdepth
+3)*sizeof(Trail
));
7979 trail
++; /* protect trpt-1 refs at depth 0 */
7984 sprintf(nm
, "%s.svd", PanSource
);
7985 if ((svfd
= creat(nm
, TMODE
)) < 0)
7986 { fprintf(efd
, "couldn't create %s\n", nm
);
7993 #if SYNC>0 && ASYNC==0
8005 fprintf(fd
, "%s\n", SpinVersion
);
8006 fprintf(fd
, "Valid Options are:\n");
8009 fprintf(fd
, " -a -> is disabled by -DNP ");
8010 fprintf(fd
, "(-DNP compiles for -l only)\n");
8012 fprintf(fd
, " -a find acceptance cycles\n");
8015 fprintf(fd
, " -a,-l,-f -> are disabled by -DSAFETY\n");
8017 fprintf(fd
, " -A ignore assert() violations\n");
8018 fprintf(fd
, " -b consider it an error to exceed the depth-limit\n");
8019 fprintf(fd
, " -cN stop at Nth error ");
8020 fprintf(fd
, "(defaults to -c1)\n");
8021 fprintf(fd
, " -d print state tables and stop\n");
8022 fprintf(fd
, " -e create trails for all errors\n");
8023 fprintf(fd
, " -E ignore invalid end states\n");
8025 fprintf(fd
, " -Ffile use 'file' to store disk-stack\n");
8028 fprintf(fd
, " -f add weak fairness (to -a or -l)\n");
8030 fprintf(fd
, " -hN use different hash-seed N:1..32\n");
8031 fprintf(fd
, " -i search for shortest path to error\n");
8032 fprintf(fd
, " -I like -i, but approximate and faster\n");
8033 fprintf(fd
, " -J reverse eval order of nested unlesses\n");
8035 fprintf(fd
, " -kN set N bits per state (defaults to 3)\n");
8038 fprintf(fd
, " -LN set scheduling restriction to N (default 10)\n");
8042 fprintf(fd
, " -l find non-progress cycles\n");
8044 fprintf(fd
, " -l find non-progress cycles -> ");
8045 fprintf(fd
, "disabled, requires ");
8046 fprintf(fd
, "compilation with -DNP\n");
8050 fprintf(fd
, " -MN use N Megabytes for bitstate hash array\n");
8051 fprintf(fd
, " -GN use N Gigabytes for bitstate hash array\n");
8053 fprintf(fd
, " -mN max depth N steps (default=10k)\n");
8054 fprintf(fd
, " -n no listing of unreached states\n");
8056 fprintf(fd
, " -pN create svfile (save N bytes per state)\n");
8058 fprintf(fd
, " -QN set time-limit on execution of N minutes\n");
8059 fprintf(fd
, " -q require empty chans in valid end states\n");
8061 fprintf(fd
, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8062 fprintf(fd
, " -rN read and execute N-th error trail\n");
8063 fprintf(fd
, " -C read and execute trail - columnated output (can add -v,-n)\n");
8064 fprintf(fd
, " -PN read and execute trail - restrict trail output to proc N\n");
8065 fprintf(fd
, " -g read and execute trail + msc gui support\n");
8066 fprintf(fd
, " -S silent replay: only user defined printfs show\n");
8069 fprintf(fd
, " -RN repeat run Nx with N ");
8070 fprintf(fd
, "[1..32] independent hash functions\n");
8071 fprintf(fd
, " -s same as -k1 (single bit per state)\n");
8073 fprintf(fd
, " -T create trail files in read-only mode\n");
8074 fprintf(fd
, " -tsuf replace .trail with .suf on trailfiles\n");
8075 fprintf(fd
, " -V print SPIN version number\n");
8076 fprintf(fd
, " -v verbose -- filenames in unreached state listing\n");
8077 fprintf(fd
, " -wN hashtable of 2^N entries ");
8078 fprintf(fd
, "(defaults to -w%d)\n", ssize
);
8079 fprintf(fd
, " -x do not overwrite an existing trail file\n");
8081 fprintf(fd
, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8084 fprintf(fd
, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8085 fprintf(fd
, " a filename argument, as in '-r filename', naming the trailfile\n");
8094 Malloc(unsigned long n
)
8097 if (memcnt
+ (double) n
> memlim
) goto err
;
8100 tmp
= (char *) malloc(n
);
8103 tmp
= (char *) sbrk(n
);
8104 if (tmp
== (char *) -ONE_L
)
8110 printf("pan: out of memory\n");
8112 printf(" %g bytes used\n", memcnt
);
8113 printf(" %g bytes more needed\n", (double) n
);
8114 printf(" %g bytes limit\n",
8118 printf("hint: to reduce memory, recompile with\n");
8120 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8122 printf(" -DBITSTATE # supertrace, approximation\n");
8125 printf("hint: to reduce memory, recompile with\n");
8127 printf(" -DCOLLAPSE # good, fast compression, or\n");
8129 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8131 printf(" -DHC # hash-compaction, approximation\n");
8133 printf(" -DBITSTATE # supertrace, approximation\n");
8138 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8141 printf("hint: to reduce memory, recompile without\n");
8142 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8147 memcnt
+= (double) n
;
8151 #define CHUNK (100*VECTORSZ)
8154 emalloc(unsigned long n
) /* never released or reallocated */
8157 return (char *) NULL
;
8158 if (n
&(sizeof(void *)-1)) /* for proper alignment */
8159 n
+= sizeof(void *)-(n
&(sizeof(void *)-1));
8160 if ((unsigned long) left
< n
)
8161 { grow
= (n
< CHUNK
) ? CHUNK
: n
;
8162 have
= Malloc(grow
);
8163 fragment
+= (double) left
;
8174 { /* always fatal */
8177 sudden_stop("Uerror");
8182 #if defined(MA) && !defined(SAFETY)
8185 { Trans
*t
; uchar ot
, _m
; int tt
; short II
;
8189 uchar oat
= now
._a_t
;
8190 now
._a_t
&= ~(1|16|32);
8191 memcpy((char *) &comp_now
, (char *) &now
, vsize
);
8195 trpt
= getframe(depth
);
8198 printf("%d State: ", depth
);
8199 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
8200 ((char *)&now
)[i
], Mask
[i
]?"*":"");
8204 if (trpt
->o_pm
&128) /* fairness alg */
8205 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
8208 trpt
= getframe(depth
);
8217 { int d
; Trail
*trl
;
8219 for (d
= 1; d
< depth
; d
++)
8220 { trl
= getframe(depth
-d
); /* was trl = (trpt-d); */
8222 { now
._last
= trl
->pr
- BASE
;
8226 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
8230 now
._event
= trpt
->o_event
;
8232 if ((now
._a_t
&1) && depth
<= A_depth
)
8233 { now
._a_t
&= ~(1|16|32);
8234 if (fairness
) now
._a_t
|= 2; /* ? */
8236 goto CameFromHere
; /* checkcycles() */
8239 ot
= trpt
->o_ot
; II
= trpt
->pr
;
8240 tt
= trpt
->o_tt
; this = pptr(II
);
8241 _m
= do_reverse(t
, II
, trpt
->o_m
);
8243 printf("%3d: proc %d ", depth
, II
);
8244 printf("reverses %d, %d to %d,",
8245 t
->forw
, tt
, t
->st
);
8246 printf(" %s [abit=%d,adepth=%d,",
8247 t
->tp
, now
._a_t
, A_depth
);
8248 printf("tau=%d,%d] <unwind>\n",
8249 trpt
->tau
, (trpt
-1)->tau
);
8253 trpt
= getframe(depth
);
8257 /* reached[ot][t->st] = 1; 3.4.13 */
8258 ((P0
*)this)->_p
= tt
;
8260 if ((trpt
->o_pm
&32))
8263 if (now
._cnt
[now
._a_t
&1] == 0)
8264 now
._cnt
[now
._a_t
&1] = 1;
8266 now
._cnt
[now
._a_t
&1] += 1;
8271 now
._cnt
[now
._a_t
&1] = 0;
8277 if (memcmp((char *) &now
, (char *) &comp_now
, vsize
) == 0)
8279 if (depth
> 0) goto Up
;
8283 static char unwinding
;
8286 { static char laststr
[256];
8289 if (unwinding
) return; /* 1.4.2 */
8290 if (strncmp(str
, laststr
, 254))
8292 cpu_printf("pan: %s (at depth %ld)\n", str
,
8294 printf("pan: %s (at depth %ld)\n", str
,
8297 (nr_handoffs
* z_handoff
) +
8299 ((depthfound
==-1)?depth
:depthfound
));
8300 strncpy(laststr
, str
, 254);
8303 if (readtrail
) { wrap_trail(); return; }
8305 is_cycle
= (strstr(str
, " cycle") != (char *) 0);
8309 if ((every_error
!= 0)
8312 #if defined(MA) && !defined(SAFETY)
8316 depthfound
= Unwind();
8325 if (depth
> 1) trpt
--;
8327 if (depth
> 1) trpt
++;
8331 #if defined(MA) && !defined(SAFETY)
8332 if (strstr(str
, " cycle"))
8334 printf("sorry: MA writes 1 trail max\n");
8335 wrapup(); /* no recovery from unwind */
8339 if (search_terminated
!= NULL
)
8340 { *search_terminated
|= 4; /* uerror */
8346 { depth
--; trpt
--; /* undo */
8349 if (iterative
!= 0 && maxdepth
> 0)
8350 { maxdepth
= (iterative
== 1)?(depth
-1):(depth
/2);
8352 printf("pan: reducing search depth to %ld\n",
8356 if (errors
>= upto
&& upto
!= 0)
8359 sudden_stop("uerror");
8367 xrefsrc(int lno
, S_F_MAP
*mp
, int M
, int i
)
8368 { Trans
*T
; int j
, retval
=1;
8369 for (T
= trans
[M
][i
]; T
; T
= T
->nxt
)
8371 { if (strcmp(T
->tp
, ".(goto)") == 0
8372 || strncmp(T
->tp
, "goto :", 6) == 0)
8373 return 1; /* not reported */
8375 printf("\tline %d", lno
);
8377 for (j
= 0; j
< sizeof(mp
); j
++)
8378 if (i
>= mp
[j
].from
&& i
<= mp
[j
].upto
)
8379 { printf(", \"%s\"", mp
[j
].fnm
);
8382 printf(", state %d", i
);
8383 if (strcmp(T
->tp
, "") != 0)
8385 q
= transmognify(T
->tp
);
8386 printf(", \"%s\"", q
?q
:"");
8387 } else if (stopstate
[M
][i
])
8388 printf(", -end state-");
8390 retval
= 0; /* reported */
8396 r_ck(uchar
*which
, int N
, int M
, short *src
, S_F_MAP
*mp
)
8400 if (M
== VERI
&& !verbose
) return;
8402 printf("unreached in proctype %s\n", procname
[M
]);
8403 for (i
= 1; i
< N
; i
++)
8405 && (mapstate
[M
][i
] == 0
8406 || which
[mapstate
[M
][i
]] == 0))
8407 m
+= xrefsrc((int) src
[i
], mp
, M
, i
);
8410 printf(" (%d of %d states)\n", N
-1-m
, N
-1);
8412 #if NCORE>1 && !defined(SEP_STATE)
8413 static long rev_trail_cnt
;
8417 rev_trail(int fd
, volatile Stack_Tree
*st_tr
)
8418 { long j
; char snap
[64];
8423 rev_trail(fd
, st_tr
->prv
);
8425 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8426 depth
, rev_trail_cnt
, st_tr
->pr
, st_tr
->t_id
, st_tr
, stack_last
[core_id
]);
8428 if (st_tr
->pr
!= 255)
8429 { sprintf(snap
, "%ld:%d:%d\n",
8430 rev_trail_cnt
++, st_tr
->pr
, st_tr
->t_id
);
8432 if (write(fd
, snap
, j
) != j
)
8433 { printf("pan: error writing trailfile\n");
8438 } else /* handoff point */
8440 { write(fd
, "-1:-1:-1\n", 9);
8449 #if defined VERI || defined(MERGED)
8452 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8459 sprintf(snap
, "-2:%d:-2\n", VERI
);
8460 write(fd
, snap
, strlen(snap
));
8463 sprintf(snap
, "-4:-4:-4\n");
8464 write(fd
, snap
, strlen(snap
));
8466 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8468 enter_critical(GLOBAL_LOCK
);
8469 rev_trail(fd
, stack_last
[core_id
]);
8470 leave_critical(GLOBAL_LOCK
);
8472 i
= 1; /* trail starts at position 1 */
8473 #if NCORE>1 && defined(SEP_STATE)
8474 if (cur_Root
.m_vsize
> 0) { i
++; depth
++; }
8476 for ( ; i
<= depth
; i
++)
8477 { if (i
== depthfound
+1)
8478 write(fd
, "-1:-1:-1\n", 9);
8480 if (!trl
->o_t
) continue;
8481 if (trl
->o_pm
&128) continue;
8482 sprintf(snap
, "%ld:%d:%d\n",
8483 i
, trl
->pr
, trl
->o_t
->t_id
);
8485 if (write(fd
, snap
, j
) != j
)
8486 { printf("pan: error writing trailfile\n");
8493 cpu_printf("pan: wrote trailfile\n");
8498 sv_save(void) /* push state vector onto save stack */
8500 { svtack
->nxt
= (Svtack
*) emalloc(sizeof(Svtack
));
8501 svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8502 svtack
->nxt
->lst
= svtack
;
8503 svtack
->nxt
->m_delta
= vsize
;
8505 } else if (vsize
> svtack
->nxt
->m_delta
)
8506 { svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8507 svtack
->nxt
->lst
= svtack
;
8508 svtack
->nxt
->m_delta
= vsize
;
8511 svtack
= svtack
->nxt
;
8513 svtack
->o_boq
= boq
;
8515 svtack
->o_delta
= vsize
; /* don't compress */
8516 memcpy((char *)(svtack
->body
), (char *) &now
, vsize
);
8517 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8518 c_stack((uchar
*) &(svtack
->c_stack
[0]));
8521 cpu_printf("%d: sv_save\n", depth
);
8526 sv_restor(void) /* pop state vector from save stack */
8528 memcpy((char *)&now
, svtack
->body
, svtack
->o_delta
);
8530 boq
= svtack
->o_boq
;
8532 #if defined(C_States) && (HAS_TRACK==1)
8534 c_unstack((uchar
*) &(svtack
->c_stack
[0]));
8536 c_revert((uchar
*) &(now
.c_state
[0]));
8538 if (vsize
!= svtack
->o_delta
)
8539 Uerror("sv_restor");
8541 Uerror("error: v_restor");
8542 svtack
= svtack
->lst
;
8544 cpu_printf(" sv_restor\n");
8550 { int i
; char *z
= (char *) &now
;
8552 proc_offset
[h
] = stack
->o_offset
;
8553 proc_skip
[h
] = (uchar
) stack
->o_skip
;
8555 p_name
[h
] = stack
->o_name
;
8558 for (i
= vsize
+ stack
->o_skip
; i
> vsize
; i
--)
8559 Mask
[i
-1] = 1; /* align */
8561 vsize
+= stack
->o_skip
;
8562 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8563 vsize
+= stack
->o_delta
;
8568 for (i
= 1; i
<= Air
[((P0
*)pptr(h
))->_t
]; i
++)
8569 Mask
[vsize
- i
] = 1; /* pad */
8570 Mask
[proc_offset
[h
]] = 1; /* _pid */
8572 if (BASE
> 0 && h
> 0)
8573 ((P0
*)pptr(h
))->_pid
= h
-BASE
;
8575 ((P0
*)pptr(h
))->_pid
= h
;
8578 if (!stack
->lst
) /* debugging */
8579 Uerror("error: p_restor");
8588 { char *z
= (char *) &now
;
8592 q_offset
[now
._nr_qs
] = stack
->o_offset
;
8593 q_skip
[now
._nr_qs
] = (uchar
) stack
->o_skip
;
8595 q_name
[now
._nr_qs
] = stack
->o_name
;
8597 vsize
+= stack
->o_skip
;
8598 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8599 vsize
+= stack
->o_delta
;
8605 k_end
= stack
->o_offset
;
8606 k
= k_end
- stack
->o_skip
;
8609 if (q_zero(now
._nr_qs
)) k_end
+= stack
->o_delta
;
8612 for ( ; k
< k_end
; k
++)
8615 if (!stack
->lst
) /* debugging */
8616 Uerror("error: q_restor");
8619 typedef struct IntChunks
{
8621 struct IntChunks
*nxt
;
8623 IntChunks
*filled_chunks
[512];
8624 IntChunks
*empty_chunks
[512];
8628 if (nr
>= 512) Uerror("cannot happen grab_int");
8629 if (filled_chunks
[nr
])
8630 { z
= filled_chunks
[nr
];
8631 filled_chunks
[nr
] = filled_chunks
[nr
]->nxt
;
8633 { z
= (IntChunks
*) emalloc(sizeof(IntChunks
));
8634 z
->ptr
= (int *) emalloc(nr
* sizeof(int));
8636 z
->nxt
= empty_chunks
[nr
];
8637 empty_chunks
[nr
] = z
;
8641 ungrab_ints(int *p
, int nr
)
8643 if (!empty_chunks
[nr
]) Uerror("cannot happen ungrab_int");
8644 z
= empty_chunks
[nr
];
8645 empty_chunks
[nr
] = empty_chunks
[nr
]->nxt
;
8647 z
->nxt
= filled_chunks
[nr
];
8648 filled_chunks
[nr
] = z
;
8651 delproc(int sav
, int h
)
8654 int o_vsize
= vsize
;
8656 if (h
+1 != (int) now
._nr_pr
) return 0;
8659 && q_offset
[now
._nr_qs
-1] > proc_offset
[h
])
8663 d
= vsize
- proc_offset
[h
];
8666 { stack
->nxt
= (Stack
*)
8667 emalloc(sizeof(Stack
));
8669 emalloc(Maxbody
*sizeof(char));
8670 stack
->nxt
->lst
= stack
;
8674 stack
->o_offset
= proc_offset
[h
];
8676 stack
->o_skip
= (int) proc_skip
[h
];
8678 stack
->o_skip
= (short) proc_skip
[h
];
8681 stack
->o_name
= p_name
[h
];
8685 memcpy(stack
->body
, (char *)pptr(h
), d
);
8687 vsize
= proc_offset
[h
];
8688 now
._nr_pr
= now
._nr_pr
- 1;
8689 memset((char *)pptr(h
), 0, d
);
8690 vsize
-= (int) proc_skip
[h
];
8695 for (i
= vsize
; i
< o_vsize
; i
++)
8696 Mask
[i
] = 0; /* reset */
8703 { int h
= now
._nr_qs
- 1;
8704 int d
= vsize
- q_offset
[now
._nr_qs
- 1];
8706 int k
, o_vsize
= vsize
;
8710 { stack
->nxt
= (Stack
*)
8711 emalloc(sizeof(Stack
));
8713 emalloc(Maxbody
*sizeof(char));
8714 stack
->nxt
->lst
= stack
;
8718 stack
->o_offset
= q_offset
[h
];
8720 stack
->o_skip
= (int) q_skip
[h
];
8722 stack
->o_skip
= (short) q_skip
[h
];
8725 stack
->o_name
= q_name
[h
];
8728 memcpy(stack
->body
, (char *)qptr(h
), d
);
8730 vsize
= q_offset
[h
];
8731 now
._nr_qs
= now
._nr_qs
- 1;
8732 memset((char *)qptr(h
), 0, d
);
8733 vsize
-= (int) q_skip
[h
];
8738 for (k
= vsize
; k
< o_vsize
; k
++)
8739 Mask
[k
] = 0; /* reset */
8746 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
8756 for (i
= BASE
; i
< (int) now
._nr_pr
; i
++)
8757 { ptr
= (P0
*) pptr(i
);
8758 if (!stopstate
[ptr
->_t
][ptr
->_p
])
8761 if (strict
) return qs_empty();
8762 #if defined(EVENT_TRACE) && !defined(OTIM)
8763 if (!stopstate
[EVENT_TRACE
][now
._event
] && !a_cycles
)
8764 { printf("pan: event_trace not completed\n");
8774 { uchar o_a_t
= now
._a_t
;
8779 uchar o_cnt
= now
._cnt
[1];
8783 struct H_el
*sv
= trpt
->ostate
; /* save */
8785 uchar prov
= trpt
->proviso
; /* save */
8789 { int i
; uchar
*v
= (uchar
*) &now
;
8790 printf(" set Seed state ");
8792 if (fairness
) printf("(cnt = %d:%d, nrpr=%d) ",
8793 now
._cnt
[0], now
._cnt
[1], now
._nr_pr
);
8795 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8798 printf("%d: cycle check starts\n", depth
);
8800 now
._a_t
|= (1|16|32);
8801 /* 1 = 2nd DFS; (16|32) to help hasher */
8803 now
._cnt
[1] = now
._cnt
[0];
8805 memcpy((char *)&A_Root
, (char *)&now
, vsize
);
8806 A_depth
= depthfound
= depth
;
8811 o_limit
= trpt
->sched_limit
;
8812 trpt
->sched_limit
= 0;
8814 new_state(); /* start 2nd DFS */
8816 trpt
->sched_limit
= o_limit
;
8821 now
._cnt
[1] = o_cnt
;
8823 A_depth
= 0; depthfound
= -1;
8825 printf("%d: cycle check returns\n", depth
);
8829 trpt
->ostate
= sv
; /* restore */
8831 trpt
->proviso
= prov
;
8837 #if defined(FULLSTACK) && defined(BITSTATE)
8838 struct H_el
*Free_list
= (struct H_el
*) 0;
8840 onstack_init(void) /* to store stack states in a bitstate search */
8841 { S_Tab
= (struct H_el
**) emalloc(maxdepth
*sizeof(struct H_el
*));
8845 { struct H_el
*v
, *last
= 0;
8847 { for (v
= Free_list
; v
&& ((int) v
->tagged
>= n
); v
=v
->nxt
)
8848 { if ((int) v
->tagged
== n
)
8852 gotcha
: Free_list
= v
->nxt
;
8862 /* new: second try */
8864 if (v
&& ((int) v
->tagged
>= n
))
8868 return (struct H_el
*)
8869 emalloc(sizeof(struct H_el
)+n
-sizeof(unsigned));
8876 { struct H_el
*grab_shared(int);
8877 return grab_shared(sizeof(struct H_el
)+n
-sizeof(unsigned));
8881 #define grab_state(n) (struct H_el *) \
8882 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8887 int cnt
= sizeof(struct H_el
)+n
-sizeof(unsigned long);
8889 if (reclaim_size
>= cnt
+WS
)
8890 { if ((cnt
& (WS
-1)) != 0) /* alignment */
8891 { cnt
+= WS
- (cnt
& (WS
-1));
8893 p
= (struct H_el
*) reclaim_mem
;
8895 reclaim_size
-= cnt
;
8898 { p
= (struct H_el
*) emalloc(cnt
);
8907 ordinal(char *v
, long n
, short tp
)
8908 { struct H_el
*tmp
, *ntmp
; long m
;
8909 struct H_el
*olst
= (struct H_el
*) 0;
8910 s_hash((uchar
*)v
, n
);
8911 #if NCORE>1 && !defined(SEP_STATE)
8912 enter_critical(CS_ID
); /* uses spinlock - 1..128 */
8916 { tmp
= grab_state(n
);
8919 for ( ;; olst
= tmp
, tmp
= tmp
->nxt
)
8920 { m
= memcmp(((char *)&(tmp
->state
)), v
, n
);
8927 Insert
: ntmp
= grab_state(n
);
8935 } else if (!tmp
->nxt
)
8937 Append
: tmp
->nxt
= grab_state(n
);
8954 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8957 memcpy(((char *)&(tmp
->state
)), v
, n
);
8960 #if NCORE>1 && !defined(SEP_STATE)
8961 leave_critical(CS_ID
); /* uses spinlock */
8971 compress(char *vin
, int nin
) /* collapse compression */
8972 { char *w
, *v
= (char *) &comp_now
;
8976 static uchar nbytes
[513]; /* 1 + 256 + 256 */
8977 static unsigned short nbytelen
;
8978 long col_q(int, char *);
8979 long col_p(int, char *);
8985 for (i
= 0; i
< NFAIR
; i
++)
8991 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
8992 { n
= col_p(i
, (char *) 0);
8994 nbytes
[nbytelen
] = 0;
8996 nbytes
[nbytelen
] = 1;
8997 *v
++ = ((P0
*) pptr(i
))->_t
;
9001 { nbytes
[nbytelen
]++;
9005 { nbytes
[nbytelen
]++;
9009 { nbytes
[nbytelen
]++;
9016 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
9018 n
= ordinal(scratch
, x
-scratch
, 2); /* procs */
9020 nbytes
[nbytelen
] = 0;
9022 { nbytes
[nbytelen
]++;
9026 { nbytes
[nbytelen
]++;
9030 { nbytes
[nbytelen
]++;
9036 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9037 { n
= col_q(i
, (char *) 0);
9038 nbytes
[nbytelen
] = 0;
9041 { nbytes
[nbytelen
]++;
9045 { nbytes
[nbytelen
]++;
9049 { nbytes
[nbytelen
]++;
9056 /* 3 = _a_t, _nr_pr, _nr_qs */
9057 w
= (char *) &now
+ 3 * sizeof(uchar
);
9063 w
= (char *) &(now
._vsz
) + sizeof(unsigned short);
9065 w
= (char *) &(now
._vsz
) + sizeof(unsigned long);
9071 if (now
._nr_qs
> 0 && qptr(0) < pptr(0))
9072 n
= qptr(0) - (uchar
*) w
;
9074 n
= pptr(0) - (uchar
*) w
;
9075 j
= w
- (char *) &now
;
9076 for (i
= 0; i
< (int) n
; i
++, w
++)
9077 if (!Mask
[j
++]) *x
++ = *w
;
9079 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9083 for (i
= 0, j
= 6; i
< nbytelen
; i
++)
9089 *x
|= (nbytes
[i
] << j
);
9092 for (j
= 0; j
< WS
-1; j
++)
9095 n
= ordinal(scratch
, x
-scratch
, 0); /* globals */
9097 if (n
>= (1<< 8)) { *v
++ = (n
>> 8)&255; j
++; }
9098 if (n
>= (1<<16)) { *v
++ = (n
>>16)&255; j
++; }
9099 if (n
>= (1<<24)) { *v
++ = (n
>>24)&255; j
++; }
9100 *v
++ = j
; /* add last count as a byte */
9101 for (i
= 0; i
< WS
-1; i
++)
9105 printf("collapse %d -> %d\n",
9106 vsize
, v
- (char *)&comp_now
);
9108 return v
- (char *)&comp_now
;
9111 #if !defined(NOCOMP)
9113 compress(char *vin
, int n
) /* default compression */
9117 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9120 { delta
++; /* _a_t */
9123 delta
+= NFAIR
; /* _cnt[] */
9127 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9130 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, HC
);
9136 char *v
= (char *) &comp_now
;
9139 int r
= 0, unroll
= n
/8;
9142 while (r
++ < unroll
)
9143 { /* unroll 8 times, avoid ifs */
9161 r
= n
- i
; /* the rest, at most 7 */
9163 case 7: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9164 case 6: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9165 case 5: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9166 case 4: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9167 case 3: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9168 case 2: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9169 case 1: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9172 r
= (n
+WS
-1)/WS
; /* words rounded up */
9173 r
*= WS
; /* bytes */
9174 i
= r
- i
; /* remainder */
9176 case 7: *v
++ = 0; /* fall thru */
9184 default: Uerror("unexpected wordsize");
9189 { for (i
= 0; i
< n
; i
++, vv
++)
9190 if (!Mask
[i
]) *v
++ = *vv
;
9191 for (i
= 0; i
< WS
-1; i
++)
9196 printf("compress %d -> %d\n",
9197 n
, v
- (char *)&comp_now
);
9199 return v
- (char *)&comp_now
;
9204 #if defined(FULLSTACK) && defined(BITSTATE)
9206 #if !defined(onstack_now)
9207 int onstack_now(void) {}
9209 #if !defined(onstack_put)
9210 void onstack_put(void) {}
9212 #if !defined(onstack_zap)
9213 void onstack_zap(void) {}
9218 { struct H_el
*v
, *w
, *last
= 0;
9219 struct H_el
**tmp
= H_tab
;
9222 static char warned
= 0;
9226 nv
= (char *) &comp_now
;
9227 n
= compress((char *)&now
, vsize
);
9229 #if defined(BITSTATE) && defined(LC)
9230 nv
= (char *) &comp_now
;
9231 n
= compact_stack((char *)&now
, vsize
);
9237 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9238 s_hash((uchar
*)nv
, n
);
9241 for (v
= S_Tab
[j1
]; v
; Zh
++, last
=v
, v
=v
->nxt
)
9242 { m
= memcmp(&(v
->state
), nv
, n
);
9250 #if defined(BITSTATE) && NCORE>1
9251 /* seen this happen, likely harmless, but not yet understood */
9254 { /* Uerror("stack out of wack - zap"); */
9255 cpu_printf("pan: warning, stack incomplete\n");
9266 v
->tagged
= (unsigned) n
;
9267 #if !defined(NOREDUCE) && !defined(SAFETY)
9270 v
->nxt
= last
= (struct H_el
*) 0;
9271 for (w
= Free_list
; w
; Fa
++, last
=w
, w
= w
->nxt
)
9272 { if ((int) w
->tagged
<= n
)
9277 { v
->nxt
= Free_list
;
9290 { struct H_el
**tmp
= H_tab
;
9292 if (hstore((char *)&now
, vsize
) != 0)
9293 #if defined(BITSTATE) && defined(LC)
9294 printf("pan: warning, double stack entry\n");
9297 Uerror("cannot happen - unstack_put");
9301 trpt
->ostate
= Lstate
;
9307 struct H_el
**tmp2
= H_tab
;
9308 char *v
; int n
, m
= 1;
9312 #if defined(BITSTATE) && defined(LC)
9313 v
= (char *) &comp_now
;
9314 n
= compact_stack((char *)&now
, vsize
);
9320 v
= (char *) &comp_now
;
9321 n
= compress((char *)&now
, vsize
);
9323 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9324 s_hash((uchar
*)v
, n
);
9327 for (tmp
= S_Tab
[j1
]; tmp
; Zn
++, tmp
= tmp
->nxt
)
9328 { m
= memcmp(((char *)&(tmp
->state
)),v
,n
);
9330 { Lstate
= (struct H_el
*) tmp
;
9344 { void r_xpoint(void);
9348 dfa_init((unsigned short) (MA
+a_cycles
));
9349 #if NCORE>1 && !defined(COLLAPSE)
9351 { void init_HT(unsigned long);
9357 #if !defined(MA) || defined(COLLAPSE)
9360 { void init_HT(unsigned long);
9361 init_HT((unsigned long) (ONE_L
<<ssize
)*sizeof(struct H_el
*));
9364 H_tab
= (struct H_el
**)
9365 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
9370 #if !defined(BITSTATE) || defined(FULLSTACK)
9373 dumpstate(int wasnew
, char *v
, int n
, int tag
)
9377 { printf(" state tags %d (%d::%d): ",
9380 printf(" %d ", tag
);
9388 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
9389 ((char *)&now
)[i
], Mask
[i
]?"*":"");
9391 printf("\n Vector: ");
9392 for (i
= 0; i
< n
; i
++) printf("%d,", v
[i
]);
9399 gstore(char *vin
, int nin
, uchar pbit
)
9403 static uchar Info
[MA
+1];
9405 n
= compress(vin
, nin
);
9406 v
= (uchar
*) &comp_now
;
9412 { printf("pan: error, MA too small, recompile pan.c");
9413 printf(" with -DMA=N with N>%d\n", n
);
9416 if (n
> (int) maxgs
)
9417 { maxgs
= (unsigned int) n
;
9419 for (i
= 0; i
< n
; i
++)
9422 for ( ; i
< MA
-1; i
++)
9426 if (a_cycles
) /* place _a_t at the end */
9427 { Info
[MA
] = Info
[0];
9431 #if NCORE>1 && !defined(SEP_STATE)
9432 enter_critical(GLOBAL_LOCK
); /* crude, but necessary */
9433 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9436 if (!dfa_store(Info
))
9440 { Info
[MA
] &= ~(1|16|32); /* _a_t */
9442 { Info
[MA
-1] = 4; /* off-stack bit */
9444 if (!dfa_member(MA
-1))
9447 printf("intersected 1st dfs stack\n");
9453 printf("new state\n");
9459 { Info
[MA
-1] = 1; /* proviso bit */
9461 trpt
->proviso
= dfa_member(MA
-1);
9463 Info
[MA
-1] = 4; /* off-stack bit */
9464 if (dfa_member(MA
-1))
9465 { ret_val
= 1; /* off-stack */
9467 printf("old state\n");
9470 { ret_val
= 2; /* on-stack */
9472 printf("on-stack\n");
9480 printf("old state\n");
9483 #if NCORE>1 && !defined(SEP_STATE)
9484 leave_critical(GLOBAL_LOCK
);
9486 return ret_val
; /* old state */
9489 #if defined(BITSTATE) && defined(LC)
9491 compact_stack(char *vin
, int n
)
9493 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9495 delta
++; /* room for state[0] |= 128 */
9497 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9499 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, WS
);
9500 delta
+= WS
; /* use all available bits */
9505 hstore(char *vin
, int nin
) /* hash table storage */
9506 { struct H_el
*ntmp
;
9507 struct H_el
*tmp
, *olst
= (struct H_el
*) 0;
9508 char *v
; int n
, m
=0;
9513 #if defined(BITSTATE) && defined(LC)
9515 { v
= (char *) &comp_now
;
9516 n
= compact_stack(vin
, nin
);
9524 v
= (char *) &comp_now
;
9529 n
= compress(vin
, nin
);
9535 { v
[0] = 0; /* _a_t */
9538 for (m
= 0; m
< NFAIR
; m
++)
9539 v
[m
+1] = 0; /* _cnt[] */
9545 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9546 s_hash((uchar
*)v
, n
);
9548 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9549 enter_critical(CS_ID
); /* uses spinlock */
9553 { tmp
= grab_state(n
);
9556 { /* if we get here -- we've already issued a warning */
9557 /* but we want to allow the normal distributed termination */
9558 /* to collect the stats on all cpus in the wrapup */
9559 #if !defined(SEP_STATE) && !defined(BITSTATE)
9560 leave_critical(CS_ID
);
9562 return 1; /* allow normal termination */
9567 { for (;; hcmp
++, olst
= tmp
, tmp
= tmp
->nxt
)
9568 { /* skip the _a_t and the _cnt bytes */
9571 { if (!tmp
->nxt
) goto Append
;
9575 m
= memcmp(((char *)&(tmp
->state
)) + S_A
,
9586 { if ((((char *)&(tmp
->state
))[0] & V_A
) != V_A
)
9587 { wasnew
= 1; nShadow
++;
9588 ((char *)&(tmp
->state
))[0] |= V_A
;
9592 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9593 unsigned ci
, bp
; /* index, bit pos */
9594 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9595 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9596 if (now
._a_t
&1) /* use tail-bits in _cnt */
9597 { ci
= (NFAIR
- 1) - ci
;
9598 bp
= 7 - bp
; /* bp = 0..7 */
9600 ci
++; /* skip over _a_t */
9601 bp
= 1 << bp
; /* the bit mask */
9602 if ((((char *)&(tmp
->state
))[ci
] & bp
)==0)
9607 ((char *)&(tmp
->state
))[ci
] |= bp
;
9610 /* else: wasnew == 0, i.e., old state */
9616 Lstate
= (struct H_el
*) tmp
;
9621 { Lstate
= (struct H_el
*) tmp
;
9624 && (tmp
->tagged
&A_V
)
9630 printf("cpu%d: ", core_id
);
9632 printf("1st dfs-stack intersected on state %d+\n",
9635 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9636 leave_critical(CS_ID
);
9642 printf("cpu%d: ", core_id
);
9644 printf(" New state %d+\n", (int) tmp
->st_id
);
9647 dumpstate(1, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9649 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9650 leave_critical(CS_ID
);
9655 if ((S_A
)?(tmp
->tagged
&V_A
):tmp
->tagged
)
9656 { Lstate
= (struct H_el
*) tmp
;
9658 /* already on current dfs stack */
9659 /* but may also be on 1st dfs stack */
9661 && (tmp
->tagged
&A_V
)
9664 && (!fairness
|| now
._cnt
[1] <= 1)
9671 printf("cpu%d: ", core_id
);
9673 printf(" Stack state %d\n", (int) tmp
->st_id
);
9676 dumpstate(0, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9678 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9679 leave_critical(CS_ID
);
9681 return 2; /* match on stack */
9688 printf("cpu%d: ", core_id
);
9690 printf(" New state %d+\n", (int) tmp
->st_id
);
9693 dumpstate(1, (char *)&(tmp
->state
), n
, 0);
9695 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9696 leave_critical(CS_ID
);
9703 printf("cpu%d: ", core_id
);
9705 printf(" Old state %d\n", (int) tmp
->st_id
);
9708 dumpstate(0, (char *)&(tmp
->state
), n
, 0);
9715 printf("cpu%d: ", core_id
);
9717 printf(" ReVisiting (from smaller depth)\n");
9720 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9721 leave_critical(CS_ID
);
9726 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9727 Lstate
= (struct H_el
*) tmp
;
9729 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9730 leave_critical(CS_ID
);
9732 return 1; /* match outside stack */
9734 { /* insert state before tmp */
9735 ntmp
= grab_state(n
);
9739 #if !defined(SEP_STATE) && !defined(BITSTATE)
9740 leave_critical(CS_ID
);
9742 return 1; /* allow normal termination */
9752 } else if (!tmp
->nxt
)
9753 { /* append after tmp */
9757 tmp
->nxt
= grab_state(n
);
9761 #if !defined(SEP_STATE) && !defined(BITSTATE)
9762 leave_critical(CS_ID
);
9764 return 1; /* allow normal termination */
9772 tmp
->st_id
= (unsigned) nstates
;
9774 printf("cpu%d: ", core_id
);
9777 printf(" Push state %d\n", ((int) nstates
) - 1);
9779 printf(" New state %d\n", (int) nstates
);
9782 #if !defined(SAFETY) || defined(REACH)
9791 { unsigned ci
, bp
; /* as above */
9792 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9793 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9795 { ci
= (NFAIR
- 1) - ci
;
9796 bp
= 7 - bp
; /* bp = 0..7 */
9804 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9807 memcpy(((char *)&(tmp
->state
)), v
, n
);
9809 tmp
->tagged
= (S_A
)?V_A
:(depth
+1);
9811 dumpstate(-1, v
, n
, tmp
->tagged
);
9813 Lstate
= (struct H_el
*) tmp
;
9816 dumpstate(-1, v
, n
, 0);
9819 Lstate
= (struct H_el
*) tmp
;
9822 /* #if NCORE>1 && !defined(SEP_STATE) */
9825 tmp
->cpu_id
= core_id
;
9827 #if !defined(SEP_STATE) && !defined(BITSTATE)
9828 leave_critical(CS_ID
);
9834 #include TRANSITIONS
9838 r_ck(reached0
, nstates0
, 0, src_ln0
, src_file0
);
9839 r_ck(reached1
, nstates1
, 1, src_ln1
, src_file1
);
9840 r_ck(reached2
, nstates2
, 2, src_ln2
, src_file2
);
9841 r_ck(reached3
, nstates3
, 3, src_ln3
, src_file3
);
9842 r_ck(reached4
, nstates4
, 4, src_ln4
, src_file4
);
9843 r_ck(reached5
, nstates5
, 5, src_ln5
, src_file5
);
9851 for (l_in
= 0; l_in
< 8; l_in
++)
9853 now
.buffer_use
[l_in
] = 0;
9858 for (l_in
= 0; l_in
< 2; l_in
++)
9860 now
.commit_count
[l_in
] = 0;
9863 now
._commit_sum
= 0;
9865 now
.events_lost
= 0;
9869 for (l_in
= 0; l_in
< 8; l_in
++)
9871 logval("buffer_use[l_in]", now
.buffer_use
[l_in
]);
9874 logval("write_off", now
.write_off
);
9876 for (l_in
= 0; l_in
< 2; l_in
++)
9878 logval("commit_count[l_in]", now
.commit_count
[l_in
]);
9881 logval("_commit_sum", now
._commit_sum
);
9882 logval("read_off", now
.read_off
);
9883 logval("events_lost", now
.events_lost
);
9884 logval("refcount", now
.refcount
);
9886 Maxbody
= max(Maxbody
, sizeof(State
)-VECTORSZ
);
9890 addqueue(int n
, int is_rv
)
9891 { int j
=0, i
= now
._nr_qs
;
9896 Uerror("too many queues");
9898 default: Uerror("bad queue - addqueue");
9901 q_skip
[i
] = WS
-(vsize
%WS
);
9909 for (k
+= (int) q_skip
[i
]; k
> vsize
; k
--)
9912 vsize
+= (int) q_skip
[i
];
9913 q_offset
[i
] = vsize
;
9919 hmax
= max(hmax
, vsize
);
9920 if (vsize
>= VECTORSZ
)
9921 Uerror("VECTORSZ is too small, edit pan.h");
9922 memset((char *)qptr(i
), 0, j
);
9923 ((Q0
*)qptr(i
))->_t
= n
;
9929 qsend(int into
, int sorted
, int args_given
)
9936 uerror("ref to uninitialized chan name (sending)");
9937 if (into
>= (int) now
._nr_qs
|| into
< 0)
9938 Uerror("qsend bad queue#");
9940 j
= ((Q0
*)qptr(into
))->Qlen
;
9941 switch (((Q0
*)qptr(into
))->_t
) {
9942 case 0: printf("queue %d was deleted\n", into
+1);
9943 default: Uerror("bad queue - qsend");
9946 if (in_s_scope(into
+1))
9956 { uerror("ref to uninitialized chan name (q_zero)");
9959 switch(((Q0
*)qptr(from
))->_t
) {
9960 case 0: printf("queue %d was deleted\n", from
+1);
9962 Uerror("bad queue q-zero");
9968 { printf("==>> a test of the contents of a rv ");
9969 printf("channel always returns FALSE\n");
9970 uerror("error to poll rendezvous channel");
9977 setq_claim(int x
, int m
, char *s
, int y
, char *p
)
9979 uerror("x[rs] claim on uninitialized channel");
9980 if (x
< 0 || x
> MAXQ
)
9981 Uerror("cannot happen setq_claim");
9985 if (m
&2) q_S_check(x
, y
);
9986 if (m
&1) q_R_check(x
, y
);
9988 short q_sender
[MAXQ
+1];
9990 q_S_check(int x
, int who
)
9992 { q_sender
[x
] = who
+1;
9995 { printf("chan %s (%d), ",
9997 printf("sndr proc %s (%d)\n",
9999 uerror("xs chans cannot be used for rv");
10003 if (q_sender
[x
] != who
+1)
10004 { printf("pan: xs assertion violated: ");
10005 printf("access to chan <%s> (%d)\npan: by ",
10007 if (q_sender
[x
] > 0 && p_name
[q_sender
[x
]-1])
10008 printf("%s (proc %d) and by ",
10009 p_name
[q_sender
[x
]-1], q_sender
[x
]-1);
10010 printf("%s (proc %d)\n",
10012 uerror("error, partial order reduction invalid");
10016 short q_recver
[MAXQ
+1];
10018 q_R_check(int x
, int who
)
10019 { if (!q_recver
[x
])
10020 { q_recver
[x
] = who
+1;
10023 { printf("chan %s (%d), ",
10025 printf("recv proc %s (%d)\n",
10027 uerror("xr chans cannot be used for rv");
10031 if (q_recver
[x
] != who
+1)
10032 { printf("pan: xr assertion violated: ");
10033 printf("access to chan %s (%d)\npan: ",
10035 if (q_recver
[x
] > 0 && p_name
[q_recver
[x
]-1])
10036 printf("by %s (proc %d) and ",
10037 p_name
[q_recver
[x
]-1], q_recver
[x
]-1);
10038 printf("by %s (proc %d)\n",
10040 uerror("error, partial order reduction invalid");
10048 uerror("ref to uninitialized chan name (len)");
10049 return ((Q0
*)qptr(x
))->Qlen
;
10055 uerror("ref to uninitialized chan name (qfull)");
10056 switch(((Q0
*)qptr(from
))->_t
) {
10057 case 0: printf("queue %d was deleted\n", from
+1);
10059 Uerror("bad queue - q_full");
10066 { /* empty or full */
10067 return !q_len(from
) || q_full(from
);
10072 qrecv(int from
, int slot
, int fld
, int done
)
10077 uerror("ref to uninitialized chan name (receiving)");
10078 if (from
>= (int) now
._nr_qs
|| from
< 0)
10079 Uerror("qrecv bad queue#");
10082 if (done
&& (in_r_scope(from
+1)))
10083 require('r', from
);
10085 switch (((Q0
*)qptr(from
))->_t
) {
10086 case 0: printf("queue %d was deleted\n", from
+1);
10087 default: Uerror("bad queue - qrecv");
10096 col_q(int i
, char *z
)
10099 Q0
*ptr
= (Q0
*) qptr(i
);
10101 default: Uerror("bad qtype - collapse");
10103 if (z
) x
= z
; else x
= scratch
;
10104 y
= (char *) ptr
; k
= q_offset
[i
];
10105 /* no need to store the empty slots at the end */
10106 j
-= (q_max
[ptr
->_t
] - ptr
->Qlen
) * ((j
- 2)/q_max
[ptr
->_t
]);
10107 for ( ; j
> 0; j
--, y
++)
10108 if (!Mask
[k
++]) *x
++ = *y
;
10109 for (j
= 0; j
< WS
-1; j
++)
10112 if (z
) return (long) (x
- z
);
10113 return ordinal(scratch
, x
-scratch
, 1); /* chan */
10119 { int _m
=0, j
; uchar
*z
;
10125 uerror("ref to uninitialized chan (unsend)");
10127 j
= ((Q0
*)z
)->Qlen
;
10128 ((Q0
*)z
)->Qlen
= --j
;
10129 switch (((Q0
*)qptr(into
))->_t
) {
10130 default: Uerror("bad queue - unsend");
10136 unrecv(int from
, int slot
, int fld
, int fldvar
, int strt
)
10140 uerror("ref to uninitialized chan (unrecv)");
10142 j
= ((Q0
*)z
)->Qlen
;
10143 if (strt
) ((Q0
*)z
)->Qlen
= j
+1;
10144 switch (((Q0
*)qptr(from
))->_t
) {
10145 default: Uerror("bad queue - qrecv");
10149 q_cond(short II
, Trans
*t
)
10151 for (i
= 0; i
< 6; i
++)
10152 { if (t
->ty
[i
] == TIMEOUT_F
) return 1;
10153 if (t
->ty
[i
] == ALPHA_F
)
10157 return (II
+1 == (short) now
._nr_pr
&& II
+1 < MAXPROC
);
10159 switch (t
->qu
[i
]) {
10161 default: Uerror("unknown qid - q_cond");
10169 { char ctd
[1024], carg
[64];
10171 strcpy(ctd
, "-DBITSTATE ");
10176 strcat(ctd
, "-DNOVSZ ");
10179 strcat(ctd
, "-DREVERSE ");
10182 strcat(ctd
, "-DT_REVERSE ");
10186 sprintf(carg
, "-DRANDOMIZE=%d ", RANDOMIZE
);
10189 strcat(ctd
, "-DRANDOMIZE ");
10193 sprintf(carg
, "-DSCHED=%d ", SCHED
);
10197 strcat(ctd
, "-DBFS ");
10200 sprintf(carg
, "-DMEMLIM=%d ", MEMLIM
);
10204 sprintf(carg
, "-DMEMCNT=%d ", MEMCNT
);
10209 strcat(ctd
, "-DNOCLAIM ");
10212 strcat(ctd
, "-DSAFETY ");
10215 strcat(ctd
, "-DNOFAIR ");
10219 { sprintf(carg
, "-DNFAIR=%d ", NFAIR
);
10226 strcat(ctd
, "-DNOREDUCE ");
10229 strcat(ctd
, "-DXUSAFE ");
10233 strcat(ctd
, "-DNP ");
10236 strcat(ctd
, "-DPEG ");
10239 strcat(ctd
, "-DVAR_RANGES ");
10242 strcat(ctd
, "-DHC0 ");
10245 strcat(ctd
, "-DHC1 ");
10248 strcat(ctd
, "-DHC2 ");
10251 strcat(ctd
, "-DHC3 ");
10254 strcat(ctd
, "-DHC4 ");
10257 strcat(ctd
, "-DCHECK ");
10260 strcat(ctd
, "-DCTL ");
10263 strcat(ctd
, "-DNIBIS ");
10265 #ifdef NOBOUNDCHECK
10266 strcat(ctd
, "-DNOBOUNDCHECK ");
10269 strcat(ctd
, "-DNOSTUTTER ");
10272 strcat(ctd
, "-DREACH ");
10275 strcat(ctd
, "-DPRINTF ");
10278 strcat(ctd
, "-DOTIM ");
10281 strcat(ctd
, "-DCOLLAPSE ");
10284 sprintf(carg
, "-DMA=%d ", MA
);
10288 strcat(ctd
, "-DSVDUMP ");
10291 if (VECTORSZ
!= 1024)
10292 { sprintf(carg
, "-DVECTORSZ=%d ", VECTORSZ
);
10297 strcat(ctd
, "-DVERBOSE ");
10300 strcat(ctd
, "-DCHECK ");
10303 strcat(ctd
, "-DSDUMP ");
10306 sprintf(carg
, "-DNCORE=%d ", NCORE
);
10310 sprintf(carg
, "-DSFH ");
10315 { sprintf(carg
, "-DVMAX=%d ", VMAX
);
10321 { sprintf(carg
, "-DPMAX=%d ", PMAX
);
10327 { sprintf(carg
, "-DQMAX=%d ", QMAX
);
10332 sprintf(carg
, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE
);
10335 printf("Compiled as: cc -o pan %span.c\n", ctd
);
10349 #define uchar unsigned char
10351 #define ulong unsigned long
10352 #define ushort unsigned short
10355 #define HASH(y,n) (n)*(((long)y))
10356 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10358 extern char *emalloc(unsigned long); /* imported routine */
10359 extern void dfa_init(ushort
); /* 4 exported routines */
10360 extern int dfa_member(ulong
);
10361 extern int dfa_store(uchar
*);
10362 extern void dfa_stats(void);
10364 typedef struct Edge
{
10365 uchar From
, To
; /* max range 0..255 */
10366 uchar s
, S
; /* if s=1, S is singleton */
10367 struct Vertex
*Dst
;
10371 typedef struct Vertex
{
10372 ulong key
, num
; /* key for splay tree, nr incoming edges */
10373 uchar from
[2], to
[2]; /* in-node predefined edge info */
10374 struct Vertex
*dst
[2];/* most nodes have 2 or more edges */
10375 struct Edge
*Succ
; /* in case there are more edges */
10376 struct Vertex
*lnk
, *left
, *right
; /* splay tree plumbing */
10379 static Edge
*free_edges
;
10380 static Vertex
*free_vertices
;
10381 static Vertex
**layers
; /* one splay tree of nodes per layer */
10382 static Vertex
**path
; /* run of word in the DFA */
10383 static Vertex
*R
, *F
, *NF
; /* Root, Final, Not-Final */
10384 static uchar
*word
, *lastword
;/* string, and last string inserted */
10385 static int dfa_depth
, iv
=0, nv
=0, pfrst
=0, Tally
;
10387 static void insert_it(Vertex
*, int); /* splay-tree code */
10388 static void delete_it(Vertex
*, int);
10389 static Vertex
*find_it(Vertex
*, Vertex
*, uchar
, int);
10392 recyc_edges(Edge
*e
)
10395 recyc_edges(e
->Nxt
);
10396 e
->Nxt
= free_edges
;
10401 new_edge(Vertex
*dst
)
10406 free_edges
= e
->Nxt
;
10407 e
->From
= e
->To
= e
->s
= e
->S
= 0;
10408 e
->Nxt
= (Edge
*) 0;
10410 e
= (Edge
*) emalloc(sizeof(Edge
));
10417 recyc_vertex(Vertex
*v
)
10419 recyc_edges(v
->Succ
);
10420 v
->Succ
= (Edge
*) free_vertices
;
10430 { v
= free_vertices
;
10431 free_vertices
= (Vertex
*) v
->Succ
;
10432 v
->Succ
= (Edge
*) 0;
10435 v
= (Vertex
*) emalloc(sizeof(Vertex
));
10442 allDelta(Vertex
*v
, int n
)
10443 { Vertex
*dst
= new_vertex();
10454 insert_edge(Vertex
*v
, Edge
*e
)
10455 { /* put new edge first */
10457 { v
->dst
[0] = e
->Dst
;
10458 v
->from
[0] = e
->From
;
10464 { v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10465 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10466 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10470 { int f
= v
->from
[1];
10472 Vertex
*d
= v
->dst
[1];
10473 v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10474 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10475 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10485 copyRecursive(Vertex
*v
, Edge
*e
)
10487 if (e
->Nxt
) copyRecursive(v
, e
->Nxt
);
10488 f
= new_edge(e
->Dst
);
10498 copyEdges(Vertex
*to
, Vertex
*from
)
10500 for (i
= 0; i
< 2; i
++)
10501 { to
->from
[i
] = from
->from
[i
];
10502 to
->to
[i
] = from
->to
[i
];
10503 to
->dst
[i
] = from
->dst
[i
];
10505 if (from
->Succ
) copyRecursive(to
, from
->Succ
);
10509 cacheDelta(Vertex
*v
, int h
, int first
)
10510 { static Edge
*ov
, tmp
; int i
;
10512 if (!first
&& INRANGE(ov
,h
))
10513 return ov
; /* intercepts about 10% */
10514 for (i
= 0; i
< 2; i
++)
10515 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10516 { tmp
.From
= v
->from
[i
];
10518 tmp
.Dst
= v
->dst
[i
];
10523 for (ov
= v
->Succ
; ov
; ov
= ov
->Nxt
)
10524 if (INRANGE(ov
,h
)) return ov
;
10526 Uerror("cannot get here, cacheDelta");
10531 Delta(Vertex
*v
, int h
) /* v->delta[h] */
10534 if (v
->dst
[0] && h
>= v
->from
[0] && h
<= v
->to
[0])
10535 return v
->dst
[0]; /* oldest edge */
10536 if (v
->dst
[1] && h
>= v
->from
[1] && h
<= v
->to
[1])
10538 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10541 Uerror("cannot happen Delta");
10542 return (Vertex
*) 0;
10546 numDelta(Vertex
*v
, int d
)
10551 for (i
= 0; i
< 2; i
++)
10553 { cnt
= v
->dst
[i
]->num
+ d
*(1 + v
->to
[i
] - v
->from
[i
]);
10554 if (d
== 1 && cnt
< v
->dst
[i
]->num
) goto bad
;
10555 v
->dst
[i
]->num
= cnt
;
10557 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10558 { cnt
= e
->Dst
->num
+ d
*(1 + e
->To
- e
->From
+ e
->s
);
10559 if (d
== 1 && cnt
< e
->Dst
->num
)
10560 bad
: Uerror("too many incoming edges");
10566 setDelta(Vertex
*v
, int h
, Vertex
*newdst
) /* v->delta[h] = newdst; */
10567 { Edge
*e
, *f
= (Edge
*) 0, *g
;
10570 /* remove the old entry, if there */
10571 for (i
= 0; i
< 2; i
++)
10572 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10573 { if (h
== v
->from
[i
])
10574 { if (h
== v
->to
[i
])
10575 { v
->dst
[i
] = (Vertex
*) 0;
10576 v
->from
[i
] = v
->to
[i
] = 0;
10579 } else if (h
== v
->to
[i
])
10582 { g
= new_edge(v
->dst
[i
]);/* same dst */
10583 g
->From
= v
->from
[i
];
10584 g
->To
= h
-1; /* left half */
10585 v
->from
[i
] = h
+1; /* right half */
10590 for (e
= v
->Succ
; e
; f
= e
, e
= e
->Nxt
)
10591 { if (e
->s
== 1 && e
->S
== h
)
10595 if (h
>= e
->From
&& h
<= e
->To
)
10596 { if (h
== e
->From
)
10599 { e
->From
= e
->To
= e
->S
;
10606 } else if (h
== e
->To
)
10609 { g
= new_edge(e
->Dst
); /* same dst */
10611 g
->To
= h
-1; /* g=left half */
10612 e
->From
= h
+1; /* e=right half */
10613 g
->Nxt
= e
->Nxt
; /* insert g */
10614 e
->Nxt
= g
; /* behind e */
10618 rem_tst
: if (e
->From
> e
->To
)
10624 e
->Nxt
= (Edge
*) 0;
10627 { e
->From
= e
->To
= e
->S
;
10633 /* check if newdst is already there */
10634 for (i
= 0; i
< 2; i
++)
10635 if (v
->dst
[i
] == newdst
)
10636 { if (h
+1 == (int) v
->from
[i
])
10640 if (h
== (int) v
->to
[i
]+1)
10644 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10645 { if (e
->Dst
== newdst
)
10646 { if (h
+1 == (int) e
->From
)
10648 if (e
->s
== 1 && e
->S
+1 == e
->From
)
10654 if (h
== (int) e
->To
+1)
10656 if (e
->s
== 1 && e
->S
== e
->To
+1)
10667 /* add as a new edge */
10668 e
= new_edge(newdst
);
10669 e
->From
= e
->To
= h
;
10674 cheap_key(Vertex
*v
)
10678 { vk2
= (ulong
) v
->dst
[0];
10679 if ((ulong
) v
->dst
[1] > vk2
)
10680 vk2
= (ulong
) v
->dst
[1];
10681 } else if (v
->dst
[1])
10682 vk2
= (ulong
) v
->dst
[1];
10685 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10686 if ((ulong
) e
->Dst
> vk2
)
10687 vk2
= (ulong
) e
->Dst
;
10689 Tally
= (vk2
>>2)&(TWIDTH
-1);
10694 mk_key(Vertex
*v
) /* not sensitive to order */
10695 { ulong m
= 0, vk2
= 0;
10699 { m
+= HASH(v
->dst
[0], v
->to
[0] - v
->from
[0] + 1);
10700 vk2
= (ulong
) v
->dst
[0];
10703 { m
+= HASH(v
->dst
[1], v
->to
[1] - v
->from
[1] + 1);
10704 if ((ulong
) v
->dst
[1] > vk2
) vk2
= (ulong
) v
->dst
[1];
10706 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10707 { m
+= HASH(e
->Dst
, e
->To
- e
->From
+ 1 + e
->s
);
10708 if ((ulong
) e
->Dst
> vk2
) vk2
= (ulong
) e
->Dst
;
10710 Tally
= (vk2
>>2)&(TWIDTH
-1);
10715 mk_special(int sigma
, Vertex
*n
, Vertex
*v
)
10716 { ulong m
= 0, vk2
= 0;
10720 for (i
= 0; i
< 2; i
++)
10722 { if (sigma
>= v
->from
[i
] && sigma
<= v
->to
[i
])
10723 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]);
10724 if ((ulong
) v
->dst
[i
] > vk2
10725 && v
->to
[i
] > v
->from
[i
])
10726 vk2
= (ulong
) v
->dst
[i
];
10728 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]+1);
10729 if ((ulong
) v
->dst
[i
] > vk2
)
10730 vk2
= (ulong
) v
->dst
[i
];
10732 for (f
= v
->Succ
; f
; f
= f
->Nxt
)
10733 { if (sigma
>= f
->From
&& sigma
<= f
->To
)
10734 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ f
->s
);
10735 if ((ulong
) f
->Dst
> vk2
10736 && f
->To
- f
->From
+ f
->s
> 0)
10737 vk2
= (ulong
) f
->Dst
;
10738 } else if (f
->s
== 1 && sigma
== f
->S
)
10739 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1);
10740 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10742 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1 + f
->s
);
10743 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10746 if ((ulong
) n
> vk2
) vk2
= (ulong
) n
;
10747 Tally
= (vk2
>>2)&(TWIDTH
-1);
10753 dfa_init(ushort nr_layers
)
10754 { int i
; Vertex
*r
, *t
;
10756 dfa_depth
= nr_layers
; /* one byte per layer */
10757 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
10758 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
10759 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
10760 lastword
[dfa_depth
] = lastword
[0] = 255;
10761 path
[0] = R
= new_vertex(); F
= new_vertex();
10763 for (i
= 1, r
= R
; i
< dfa_depth
; i
++, r
= t
)
10764 t
= allDelta(r
, i
-1);
10765 NF
= allDelta(r
, i
-1);
10769 static void complement_dfa(void) { Vertex
*tmp
= F
; F
= NF
; NF
= tmp
; }
10773 tree_stats(Vertex
*t
)
10774 { Edge
*e
; double cnt
=0.0;
10776 if (!t
->key
) return 0;
10777 t
->key
= 0; /* precaution */
10778 if (t
->dst
[0]) cnt
++;
10779 if (t
->dst
[1]) cnt
++;
10780 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
10782 cnt
+= tree_stats(t
->lnk
);
10783 cnt
+= tree_stats(t
->left
);
10784 cnt
+= tree_stats(t
->right
);
10790 { int i
, j
; double cnt
= 0.0;
10791 for (j
= 0; j
< TWIDTH
; j
++)
10792 for (i
= 0; i
< dfa_depth
+1; i
++)
10793 cnt
+= tree_stats(layers
[i
*TWIDTH
+j
]);
10794 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10799 dfa_member(ulong n
)
10801 uchar
*w
= &word
[n
];
10804 p
= &path
[n
]; q
= (p
+1);
10805 for (i
= n
; i
< dfa_depth
; i
++)
10806 *q
++ = Delta(*p
++, *w
++);
10811 dfa_store(uchar
*sv
)
10812 { Vertex
**p
, **q
, *s
, *y
, *old
, *new = F
;
10813 uchar
*w
, *u
= lastword
;
10817 while (*w
++ == *u
++) /* find first byte that differs */
10819 pfrst
= (int) (u
- lastword
) - 1;
10820 memcpy(&lastword
[pfrst
], &sv
[pfrst
], dfa_depth
-pfrst
);
10821 if (pfrst
> iv
) pfrst
= iv
;
10822 if (pfrst
> nv
) pfrst
= nv
;
10824 p
= &path
[pfrst
]; q
= (p
+1); w
= &word
[pfrst
];
10825 for (i
= pfrst
; i
< dfa_depth
; i
++)
10826 *q
++ = Delta(*p
++, *w
++); /* (*p)->delta[*w++]; */
10828 if (*p
== F
) return 1; /* it's already there */
10833 new = find_it(path
[iv
], old
, word
[iv
], iv
);
10834 } while (new && iv
> 0);
10837 nv
= k
= 0; s
= path
[0];
10838 for (j
= 1; j
<= iv
; ++j
)
10839 if (path
[j
]->num
> 1)
10840 { y
= new_vertex();
10841 copyEdges(y
, path
[j
]);
10845 setDelta(s
, word
[j
-1], y
);
10847 y
->num
= 1; /* initial value 1 */
10849 path
[j
]->num
--; /* only 1 moved from j to y */
10855 y
= Delta(s
, word
[iv
]);
10858 setDelta(s
, word
[iv
], old
);
10862 for (j
= iv
+1; j
< dfa_depth
; j
++)
10863 if (path
[j
]->num
== 0)
10864 { numDelta(path
[j
], -1);
10865 delete_it(path
[j
], j
);
10866 recyc_vertex(path
[j
]);
10873 splay(ulong i
, Vertex
*t
)
10874 { Vertex N
, *l
, *r
, *y
;
10877 N
.left
= N
.right
= (Vertex
*) 0;
10881 { if (!t
->left
) break;
10882 if (i
< t
->left
->key
)
10884 t
->left
= y
->right
;
10887 if (!t
->left
) break;
10892 } else if (i
> t
->key
)
10893 { if (!t
->right
) break;
10894 if (i
> t
->right
->key
)
10896 t
->right
= y
->left
;
10899 if (!t
->right
) break;
10907 l
->right
= t
->left
;
10908 r
->left
= t
->right
;
10915 insert_it(Vertex
*v
, int L
)
10920 nr
= ((L
*TWIDTH
)+Tally
);
10931 new->left
= t
->left
;
10933 t
->left
= (Vertex
*) 0;
10934 } else if (i
> t
->key
)
10936 new->right
= t
->right
;
10938 t
->right
= (Vertex
*) 0;
10939 } else /* it's already there */
10940 { v
->lnk
= t
->lnk
; /* put in linked list off v */
10948 checkit(Vertex
*h
, Vertex
*v
, Vertex
*n
, uchar sigma
)
10952 for (k
= 0; k
< 2; k
++)
10954 { if (sigma
>= h
->from
[k
] && sigma
<= h
->to
[k
])
10955 { if (h
->dst
[k
] != n
) goto no_match
;
10957 for (i
= h
->from
[k
]; i
<= h
->to
[k
]; i
++)
10958 { if (i
== sigma
) continue;
10959 g
= cacheDelta(v
, i
, j
); j
= 0;
10960 if (h
->dst
[k
] != g
->Dst
)
10962 if (g
->s
== 0 || g
->S
!= i
)
10965 for (f
= h
->Succ
; f
; f
= f
->Nxt
)
10966 { if (INRANGE(f
,sigma
))
10967 { if (f
->Dst
!= n
) goto no_match
;
10969 for (i
= f
->From
; i
<= f
->To
; i
++)
10970 { if (i
== sigma
) continue;
10971 g
= cacheDelta(v
, i
, j
); j
= 0;
10972 if (f
->Dst
!= g
->Dst
)
10974 if (g
->s
== 1 && i
== g
->S
)
10978 if (f
->s
&& f
->S
!= sigma
)
10979 { g
= cacheDelta(v
, f
->S
, 1);
10980 if (f
->Dst
!= g
->Dst
)
10984 if (h
->Succ
|| h
->dst
[0] || h
->dst
[1]) return 1;
10990 find_it(Vertex
*v
, Vertex
*n
, uchar sigma
, int L
)
10994 i
= mk_special(sigma
,n
,v
);
10995 nr
= ((L
*TWIDTH
)+Tally
);
10998 if (!t
) return (Vertex
*) 0;
10999 layers
[nr
] = t
= splay(i
, t
);
11001 for (z
= t
; z
; z
= z
->lnk
)
11002 if (checkit(z
, v
, n
, sigma
))
11005 return (Vertex
*) 0;
11009 delete_it(Vertex
*v
, int L
)
11014 nr
= ((L
*TWIDTH
)+Tally
);
11020 { Vertex
*z
, *y
= (Vertex
*) 0;
11021 for (z
= t
; z
&& z
!= v
; y
= z
, z
= z
->lnk
)
11023 if (z
!= v
) goto bad
;
11026 z
->lnk
= (Vertex
*) 0;
11029 } else if (z
->lnk
) /* z == t == v */
11032 y
->right
= t
->right
;
11033 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11037 /* delete the node itself */
11041 { x
= splay(i
, t
->left
);
11042 x
->right
= t
->right
;
11044 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11048 bad
: Uerror("cannot happen delete");
11051 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11052 static Vertex
**temptree
;
11053 static char wbuf
[4096];
11054 static int WCNT
= 4096, wcnt
=0;
11055 static uchar stacker
[MA
+1];
11056 static ulong stackcnt
= 0;
11057 extern double nstates
, nlinks
, truncs
, truncs2
;
11060 xwrite(int fd
, char *b
, int n
)
11062 if (wcnt
+n
>= 4096)
11063 { write(fd
, wbuf
, wcnt
);
11066 memcpy(&wbuf
[wcnt
], b
, n
);
11074 write(fd
, wbuf
, wcnt
);
11080 w_vertex(int fd
, Vertex
*v
)
11081 { char t
[3]; int i
; Edge
*e
;
11083 xwrite(fd
, (char *) &v
, sizeof(Vertex
*));
11085 for (i
= 0; i
< 2; i
++)
11087 { t
[1] = v
->from
[i
], t
[2] = v
->to
[i
];
11089 xwrite(fd
, (char *) &(v
->dst
[i
]), sizeof(Vertex
*));
11091 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
11092 { t
[1] = e
->From
, t
[2] = e
->To
;
11094 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11097 { t
[1] = t
[2] = e
->S
;
11099 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11104 w_layer(int fd
, Vertex
*v
)
11108 xwrite(fd
, (char *) &c
, 1);
11110 w_layer(fd
, v
->lnk
);
11111 w_layer(fd
, v
->left
);
11112 w_layer(fd
, v
->right
);
11117 { int fd
; char nm
[64];
11119 static uchar xwarned
= 0;
11121 sprintf(nm
, "%s.xpt", PanSource
);
11122 if ((fd
= creat(nm
, 0666)) <= 0)
11125 printf("cannot creat checkpoint file\n");
11128 xwrite(fd
, (char *) &nstates
, sizeof(double));
11129 xwrite(fd
, (char *) &truncs
, sizeof(double));
11130 xwrite(fd
, (char *) &truncs2
, sizeof(double));
11131 xwrite(fd
, (char *) &nlinks
, sizeof(double));
11132 xwrite(fd
, (char *) &dfa_depth
, sizeof(int));
11133 xwrite(fd
, (char *) &R
, sizeof(Vertex
*));
11134 xwrite(fd
, (char *) &F
, sizeof(Vertex
*));
11135 xwrite(fd
, (char *) &NF
, sizeof(Vertex
*));
11137 for (j
= 0; j
< TWIDTH
; j
++)
11138 for (i
= 0; i
< dfa_depth
+1; i
++)
11139 { w_layer(fd
, layers
[i
*TWIDTH
+j
]);
11140 c
= 2; xwrite(fd
, (char *) &c
, 1);
11146 xread(int fd
, char *b
, int n
)
11147 { int m
= wcnt
; int delta
= 0;
11149 { if (m
> 0) memcpy(b
, &wbuf
[WCNT
-m
], m
);
11151 WCNT
= wcnt
= read(fd
, wbuf
, 4096);
11153 Uerror("xread failed -- insufficient data");
11156 memcpy(&b
[delta
], &wbuf
[WCNT
-wcnt
], n
);
11161 x_cleanup(Vertex
*c
)
11162 { Edge
*e
; /* remove the tree and edges from c */
11164 for (e
= c
->Succ
; e
; e
= e
->Nxt
)
11171 { Vertex
*tmp
; int i
, s
;
11173 /* double-check: */
11174 stacker
[dfa_depth
-1] = 0; r
= dfa_store(stacker
);
11175 stacker
[dfa_depth
-1] = 4; j
= dfa_member(dfa_depth
-1);
11176 if (r
!= 1 || j
!= 0)
11177 { printf("%d: ", stackcnt
);
11178 for (i
= 0; i
< dfa_depth
; i
++)
11179 printf("%d,", stacker
[i
]);
11180 printf(" -- not a stackstate <o:%d,4:%d>\n", r
, j
);
11183 stacker
[dfa_depth
-1] = 1;
11184 s
= dfa_member(dfa_depth
-1);
11186 { tmp
= F
; F
= NF
; NF
= tmp
; } /* complement */
11187 if (s
) dfa_store(stacker
);
11188 stacker
[dfa_depth
-1] = 0;
11189 dfa_store(stacker
);
11191 { tmp
= F
; F
= NF
; NF
= tmp
; }
11195 x_rm_stack(Vertex
*t
, int k
)
11203 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11204 { for (j
= e
->From
; j
<= (int) e
->To
; j
++)
11205 { stacker
[k
] = (uchar
) j
;
11206 x_rm_stack(e
->Dst
, k
-1);
11209 { stacker
[k
] = e
->S
;
11210 x_rm_stack(e
->Dst
, k
-1);
11215 insert_withkey(Vertex
*v
, int L
)
11216 { Vertex
*new, *t
= temptree
[L
];
11218 if (!t
) { temptree
[L
] = v
; return v
; }
11219 t
= splay(v
->key
, t
);
11220 if (v
->key
< t
->key
)
11222 new->left
= t
->left
;
11224 t
->left
= (Vertex
*) 0;
11225 } else if (v
->key
> t
->key
)
11227 new->right
= t
->right
;
11229 t
->right
= (Vertex
*) 0;
11231 { if (t
!= R
&& t
!= F
&& t
!= NF
)
11232 Uerror("double insert, bad checkpoint data");
11243 find_withkey(Vertex
*v
, int L
)
11244 { Vertex
*t
= temptree
[L
];
11246 { temptree
[L
] = t
= splay((ulong
) v
, t
);
11247 if (t
->key
== (ulong
) v
)
11250 Uerror("not found error, bad checkpoint data");
11251 return (Vertex
*) 0;
11255 r_layer(int fd
, int n
)
11261 { xread(fd
, &c
, 1);
11264 { v
= new_vertex();
11265 xread(fd
, (char *) &(v
->key
), sizeof(Vertex
*));
11266 v
= insert_withkey(v
, n
);
11267 } else /* c == 0 */
11268 { e
= new_edge((Vertex
*) 0);
11272 xread(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11278 v_fix(Vertex
*t
, int nr
)
11283 for (i
= 0; i
< 2; i
++)
11285 t
->dst
[i
] = find_withkey(t
->dst
[i
], nr
);
11287 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11288 e
->Dst
= find_withkey(e
->Dst
, nr
);
11290 v_fix(t
->left
, nr
);
11291 v_fix(t
->right
, nr
);
11295 v_insert(Vertex
*t
, int nr
)
11299 v_insert(t
->left
, nr
);
11300 v_insert(t
->right
, nr
);
11302 /* remove only leafs from temptree */
11303 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11304 insert_it(t
, nr
); /* into layers */
11305 for (i
= 0; i
< 2; i
++)
11307 t
->dst
[i
]->num
+= (t
->to
[i
] - t
->from
[i
] + 1);
11308 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11309 e
->Dst
->num
+= (e
->To
- e
->From
+ 1 + e
->s
);
11316 for (i
= 0; i
< dfa_depth
; i
++)
11317 v_fix(temptree
[i
], (i
+1));
11319 for (i
= dfa_depth
; i
>= 0; i
--)
11320 v_insert(temptree
[i
], i
);
11324 x_tail(Vertex
*t
, ulong want
)
11325 { int i
, yes
, no
; Edge
*e
; Vertex
*v
= (Vertex
*) 0;
11330 for (i
= 0; i
< 2; i
++)
11331 if ((ulong
) t
->dst
[i
] == want
)
11332 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11333 /* but from and to are uchar */
11334 if (t
->from
[i
] == 0)
11337 if (t
->from
[i
] <= 4 && t
->to
[i
] >= 4)
11341 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11342 if ((ulong
) e
->Dst
== want
)
11343 { /* was INRANGE(e,0) but From and To are uchar */
11344 if ((e
->From
== 0) || (e
->s
==1 && e
->S
==0))
11346 else if (INRANGE(e
, 4))
11349 if (yes
&& !no
) return t
;
11350 v
= x_tail(t
->left
, want
); if (v
) return v
;
11351 v
= x_tail(t
->right
, want
); if (v
) return v
;
11352 return (Vertex
*) 0;
11356 x_anytail(Vertex
*t
, Vertex
*c
, int nr
)
11357 { int i
; Edge
*e
, *f
; Vertex
*v
;
11361 for (i
= 0; i
< 2; i
++)
11362 if ((ulong
) t
->dst
[i
] == c
->key
)
11363 { v
= new_vertex(); v
->key
= t
->key
;
11365 f
->From
= t
->from
[i
];
11370 x_anytail(temptree
[nr
-1], v
, nr
-1);
11373 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11374 if ((ulong
) e
->Dst
== c
->key
)
11375 { v
= new_vertex(); v
->key
= t
->key
;
11383 x_anytail(temptree
[nr
-1], v
, nr
-1);
11386 x_anytail(t
->left
, c
, nr
);
11387 x_anytail(t
->right
, c
, nr
);
11392 { Vertex
*c
, *v
; /* find 0 and !4 predecessor of F */
11394 v
= x_tail(temptree
[dfa_depth
-1], F
->key
);
11395 if (!v
) return (Vertex
*) 0;
11397 c
= new_vertex(); c
->key
= v
->key
;
11399 /* every node on dfa_depth-2 that has v->key as succ */
11400 /* make copy and let c point to these (reversing ptrs) */
11402 x_anytail(temptree
[dfa_depth
-2], c
, dfa_depth
-2);
11409 { int fd
; char nm
[64]; Vertex
*d
;
11413 sprintf(nm
, "%s.xpt", PanSource
);
11414 if ((fd
= open(nm
, 0)) < 0) /* O_RDONLY */
11415 Uerror("cannot open checkpoint file");
11417 xread(fd
, (char *) &nstates
, sizeof(double));
11418 xread(fd
, (char *) &truncs
, sizeof(double));
11419 xread(fd
, (char *) &truncs2
, sizeof(double));
11420 xread(fd
, (char *) &nlinks
, sizeof(double));
11421 xread(fd
, (char *) &dfa_depth
, sizeof(int));
11423 if (dfa_depth
!= MA
+a_cycles
)
11424 Uerror("bad dfa_depth in checkpoint file");
11426 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
11427 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
11428 temptree
= (Vertex
**) emalloc((dfa_depth
+2)*sizeof(Vertex
*));
11429 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
11430 lastword
[dfa_depth
] = lastword
[0] = 255;
11432 path
[0] = R
= new_vertex();
11433 xread(fd
, (char *) &R
->key
, sizeof(Vertex
*));
11434 R
= insert_withkey(R
, 0);
11437 xread(fd
, (char *) &F
->key
, sizeof(Vertex
*));
11438 F
= insert_withkey(F
, dfa_depth
);
11441 xread(fd
, (char *) &NF
->key
, sizeof(Vertex
*));
11442 NF
= insert_withkey(NF
, dfa_depth
);
11444 for (j
= 0; j
< TWIDTH
; j
++)
11445 for (i
= 0; i
< dfa_depth
+1; i
++)
11448 if (wcnt
!= 0) Uerror("bad count in checkpoint file");
11452 stacker
[dfa_depth
-1] = 0;
11453 x_rm_stack(d
, dfa_depth
-2);
11457 printf("pan: removed %d stackstates\n", stackcnt
);
11458 nstates
-= (double) stackcnt
;
11463 check_claim(int st
)
11465 if (st
== endclaim
)
11466 uerror("claim violated!");
11467 if (stopstate
[VERI
][st
])
11468 uerror("end state in claim reached");
11474 printf("global vars:\n");
11475 printf(" byte write_off: %d\n", now
.write_off
);
11477 for (l_in
= 0; l_in
< 2; l_in
++)
11479 printf(" byte commit_count[%d]: %d\n", l_in
, now
.commit_count
[l_in
]);
11482 printf(" byte _commit_sum: %d\n", now
._commit_sum
);
11483 printf(" byte read_off: %d\n", now
.read_off
);
11484 printf(" byte events_lost: %d\n", now
.events_lost
);
11485 printf(" byte refcount: %d\n", now
.refcount
);
11487 for (l_in
= 0; l_in
< 8; l_in
++)
11489 printf(" bit buffer_use[%d]: %d\n", l_in
, now
.buffer_use
[l_in
]);
11494 c_locals(int pid
, int tp
)
11501 printf("local vars proc %d (:init:):\n", pid
);
11502 printf(" byte i: %d\n", ((P4
*)pptr(pid
))->i
);
11503 printf(" byte j: %d\n", ((P4
*)pptr(pid
))->j
);
11504 printf(" byte sum: %d\n", ((P4
*)pptr(pid
))->sum
);
11505 printf(" byte commit_sum: %d\n", ((P4
*)pptr(pid
))->commit_sum
);
11511 printf("local vars proc %d (reader):\n", pid
);
11512 printf(" byte i: %d\n", ((P2
*)pptr(pid
))->i
);
11513 printf(" byte j: %d\n", ((P2
*)pptr(pid
))->j
);
11516 printf("local vars proc %d (tracer):\n", pid
);
11517 printf(" byte size: %d\n", ((P1
*)pptr(pid
))->size
);
11518 printf(" byte prev_off: %d\n", ((P1
*)pptr(pid
))->prev_off
);
11519 printf(" byte new_off: %d\n", ((P1
*)pptr(pid
))->new_off
);
11520 printf(" byte tmp_commit: %d\n", ((P1
*)pptr(pid
))->tmp_commit
);
11521 printf(" byte i: %d\n", ((P1
*)pptr(pid
))->i
);
11522 printf(" byte j: %d\n", ((P1
*)pptr(pid
))->j
);
11525 printf("local vars proc %d (switcher):\n", pid
);
11526 printf(" byte prev_off: %d\n", ((P0
*)pptr(pid
))->prev_off
);
11527 printf(" byte new_off: %d\n", ((P0
*)pptr(pid
))->new_off
);
11528 printf(" byte tmp_commit: %d\n", ((P0
*)pptr(pid
))->tmp_commit
);
11529 printf(" byte size: %d\n", ((P0
*)pptr(pid
))->size
);
11537 default: Printf("%d", x
);
11541 c_chandump(int unused
) { unused
++; /* avoid complaints */ }