test_ID
stringlengths 3
3
| test_file
stringlengths 14
119
| ground_truth
stringlengths 70
28.7k
| hints_removed
stringlengths 58
28.7k
|
---|---|---|---|
700 | ironsync-osdi2023_tmp_tmpx80antoe_lib_Math_div_def.dfy | //- Specs/implements mathematical div and mod, not the C version.
//- This may produce "surprising" results for negative values
//- For example, -3 div 5 is -1 and -3 mod 5 is 2.
//- Note this is consistent: -3 * -1 + 2 == 5
module Math__div_def_i {
/*
function mod(x:int, m:int) : int
requires m > 0;
decreases if x < 0 then (m - x) else x;
{
if x < 0 then
mod(m + x, m)
else if x < m then
x
else
mod(x - m, m)
}
*/
function div(x:int, d:int) : int
requires d != 0;
{
x/d
}
function mod(x:int, d:int) : int
requires d != 0;
{
x%d
}
function div_recursive(x:int, d:int) : int
requires d != 0;
{ INTERNAL_div_recursive(x,d) }
function mod_recursive(x:int, d:int) : int
requires d > 0;
{ INTERNAL_mod_recursive(x,d) }
function mod_boogie(x:int, y:int) : int
requires y != 0;
{ x % y } //- INTERNAL_mod_boogie(x,y) }
function div_boogie(x:int, y:int) : int
requires y != 0;
{ x / y } //-{ INTERNAL_div_boogie(x,y) }
function my_div_recursive(x:int, d:int) : int
requires d != 0;
{
if d > 0 then
my_div_pos(x, d)
else
-1 * my_div_pos(x, -1*d)
}
function my_div_pos(x:int, d:int) : int
requires d > 0;
decreases if x < 0 then (d - x) else x;
{
if x < 0 then
-1 + my_div_pos(x+d, d)
else if x < d then
0
else
1 + my_div_pos(x-d, d)
}
function my_mod_recursive(x:int, m:int) : int
requires m > 0;
decreases if x < 0 then (m - x) else x;
{
if x < 0 then
my_mod_recursive(m + x, m)
else if x < m then
x
else
my_mod_recursive(x - m, m)
}
//- Kept for legacy reasons:
//-static function INTERNAL_mod_boogie(x:int, m:int) : int { x % y }
function INTERNAL_mod_recursive(x:int, m:int) : int
requires m > 0;
{ my_mod_recursive(x, m) }
//-static function INTERNAL_div_boogie(x:int, m:int) : int { x / m }
function INTERNAL_div_recursive(x:int, d:int) : int
requires d != 0;
{ my_div_recursive(x, d) }
/*
ghost method mod_test()
{
assert -3 % 5 == 2;
assert 10 % -5 == 0;
assert 1 % -5 == 1;
assert -3 / 5 == -1;
}
*/
}
| //- Specs/implements mathematical div and mod, not the C version.
//- This may produce "surprising" results for negative values
//- For example, -3 div 5 is -1 and -3 mod 5 is 2.
//- Note this is consistent: -3 * -1 + 2 == 5
module Math__div_def_i {
/*
function mod(x:int, m:int) : int
requires m > 0;
{
if x < 0 then
mod(m + x, m)
else if x < m then
x
else
mod(x - m, m)
}
*/
function div(x:int, d:int) : int
requires d != 0;
{
x/d
}
function mod(x:int, d:int) : int
requires d != 0;
{
x%d
}
function div_recursive(x:int, d:int) : int
requires d != 0;
{ INTERNAL_div_recursive(x,d) }
function mod_recursive(x:int, d:int) : int
requires d > 0;
{ INTERNAL_mod_recursive(x,d) }
function mod_boogie(x:int, y:int) : int
requires y != 0;
{ x % y } //- INTERNAL_mod_boogie(x,y) }
function div_boogie(x:int, y:int) : int
requires y != 0;
{ x / y } //-{ INTERNAL_div_boogie(x,y) }
function my_div_recursive(x:int, d:int) : int
requires d != 0;
{
if d > 0 then
my_div_pos(x, d)
else
-1 * my_div_pos(x, -1*d)
}
function my_div_pos(x:int, d:int) : int
requires d > 0;
{
if x < 0 then
-1 + my_div_pos(x+d, d)
else if x < d then
0
else
1 + my_div_pos(x-d, d)
}
function my_mod_recursive(x:int, m:int) : int
requires m > 0;
{
if x < 0 then
my_mod_recursive(m + x, m)
else if x < m then
x
else
my_mod_recursive(x - m, m)
}
//- Kept for legacy reasons:
//-static function INTERNAL_mod_boogie(x:int, m:int) : int { x % y }
function INTERNAL_mod_recursive(x:int, m:int) : int
requires m > 0;
{ my_mod_recursive(x, m) }
//-static function INTERNAL_div_boogie(x:int, m:int) : int { x / m }
function INTERNAL_div_recursive(x:int, d:int) : int
requires d != 0;
{ my_div_recursive(x, d) }
/*
ghost method mod_test()
{
}
*/
}
|
701 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_c++_arrays.dfy | // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
method returnANullArray() returns (a: array?<uint32>)
ensures a == null
{
a := null;
}
method returnANonNullArray() returns (a: array?<uint32>)
ensures a != null
ensures a.Length == 5
{
a := new uint32[5];
a[0] := 1;
a[1] := 2;
a[2] := 3;
a[3] := 4;
a[4] := 5;
}
method LinearSearch(a: array<uint32>, len:uint32, key: uint32) returns (n: uint32)
requires a.Length == len as int
ensures 0 <= n <= len
ensures n == len || a[n] == key
{
n := 0;
while n < len
invariant n <= len
{
if a[n] == key {
return;
}
n := n + 1;
}
}
method PrintArray<A>(a:array?<A>, len:uint32)
requires a != null ==> len as int == a.Length
{
if (a == null) {
print "It's null\n";
} else {
var i:uint32 := 0;
while i < len {
print a[i], " ";
i := i + 1;
}
print "\n";
}
}
datatype ArrayDatatype = AD(ar: array<uint32>)
method Main() {
var a := new uint32[23];
var i := 0;
while i < 23 {
a[i] := i;
i := i + 1;
}
PrintArray(a, 23);
var n := LinearSearch(a, 23, 17);
print n, "\n";
var s : seq<uint32> := a[..];
print s, "\n";
s := a[2..16];
print s, "\n";
s := a[20..];
print s, "\n";
s := a[..8];
print s, "\n";
// Conversion to sequence should copy elements (sequences are immutable!)
a[0] := 42;
print s, "\n";
PrintArray<uint32>(null, 0);
print "Null array:\n";
var a1 := returnANullArray();
PrintArray<uint32>(a1, 5);
print "Non-Null array:\n";
var a2 := returnANonNullArray();
PrintArray<uint32>(a2, 5);
print "Array in datatype:\n";
var someAr := new uint32[3];
someAr[0] := 1;
someAr[1] := 3;
someAr[2] := 9;
var ad := AD(someAr);
PrintArray<uint32>(ad.ar, 3);
}
| // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
method returnANullArray() returns (a: array?<uint32>)
ensures a == null
{
a := null;
}
method returnANonNullArray() returns (a: array?<uint32>)
ensures a != null
ensures a.Length == 5
{
a := new uint32[5];
a[0] := 1;
a[1] := 2;
a[2] := 3;
a[3] := 4;
a[4] := 5;
}
method LinearSearch(a: array<uint32>, len:uint32, key: uint32) returns (n: uint32)
requires a.Length == len as int
ensures 0 <= n <= len
ensures n == len || a[n] == key
{
n := 0;
while n < len
{
if a[n] == key {
return;
}
n := n + 1;
}
}
method PrintArray<A>(a:array?<A>, len:uint32)
requires a != null ==> len as int == a.Length
{
if (a == null) {
print "It's null\n";
} else {
var i:uint32 := 0;
while i < len {
print a[i], " ";
i := i + 1;
}
print "\n";
}
}
datatype ArrayDatatype = AD(ar: array<uint32>)
method Main() {
var a := new uint32[23];
var i := 0;
while i < 23 {
a[i] := i;
i := i + 1;
}
PrintArray(a, 23);
var n := LinearSearch(a, 23, 17);
print n, "\n";
var s : seq<uint32> := a[..];
print s, "\n";
s := a[2..16];
print s, "\n";
s := a[20..];
print s, "\n";
s := a[..8];
print s, "\n";
// Conversion to sequence should copy elements (sequences are immutable!)
a[0] := 42;
print s, "\n";
PrintArray<uint32>(null, 0);
print "Null array:\n";
var a1 := returnANullArray();
PrintArray<uint32>(a1, 5);
print "Non-Null array:\n";
var a2 := returnANonNullArray();
PrintArray<uint32>(a2, 5);
print "Array in datatype:\n";
var someAr := new uint32[3];
someAr[0] := 1;
someAr[1] := 3;
someAr[2] := 9;
var ad := AD(someAr);
PrintArray<uint32>(ad.ar, 3);
}
|
702 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_c++_maps.dfy | // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
method Test(name:string, b:bool)
requires b
{
if b {
print name, ": This is expected\n";
} else {
print name, ": This is *** UNEXPECTED *** !!!!\n";
}
}
datatype map_holder = map_holder(m:map<bool, bool>)
method Basic() {
var f:map_holder;
var s:map<uint32,uint32> := map[1 := 0, 2 := 1, 3 := 2, 4 := 3];
var t:map<uint32,uint32> := map[1 := 0, 2 := 1, 3 := 2, 4 := 3];
Test("Identity", s == s);
Test("ValuesIdentity", s == t);
Test("KeyMembership", 1 in s);
Test("Value1", s[1] == 0);
Test("Value2", t[2] == 1);
var u := s[1 := 42];
Test("Update Inequality", s != u);
Test("Update Immutable 1", s == s);
Test("Update Immutable 2", s[1] == 0);
Test("Update Result", u[1] == 42);
Test("Update Others", u[2] == 1);
var s_keys := s.Keys;
var t_keys := t.Keys;
Test("Keys equal", s_keys == t_keys);
Test("Keys membership 1", 1 in s_keys);
Test("Keys membership 2", 2 in s_keys);
Test("Keys membership 3", 3 in s_keys);
Test("Keys membership 4", 4 in s_keys);
}
method Main() {
Basic();
TestMapMergeSubtraction();
}
method TestMapMergeSubtraction() {
TestMapMerge();
TestMapSubtraction();
TestNullsAmongKeys();
TestNullsAmongValues();
}
method TestMapMerge() {
var a := map["ronald" := 66 as uint32, "jack" := 70, "bk" := 8];
var b := map["wendy" := 52, "bk" := 67];
var ages := a + b;
assert ages["jack"] == 70;
assert ages["bk"] == 67;
assert "sanders" !in ages;
print |a|, " ", |b|, " ", |ages|, "\n"; // 3 2 4
print ages["jack"], " ", ages["wendy"], " ", ages["ronald"], "\n"; // 70 52 66
print a["bk"], " ", b["bk"], " ", ages["bk"], "\n"; // 8 67 67
}
method TestMapSubtraction() {
var ages := map["ronald" := 66 as uint32, "jack" := 70, "bk" := 67, "wendy" := 52];
var d := ages - {};
var e := ages - {"jack", "sanders"};
print |ages|, " ", |d|, " ", |e|, "\n"; // 4 4 3
print "ronald" in d, " ", "sanders" in d, " ", "jack" in d, " ", "sibylla" in d, "\n"; // true false true false
print "ronald" in e, " ", "sanders" in e, " ", "jack" in e, " ", "sibylla" in e, "\n"; // true false false false
}
class MyClass {
const name: string
constructor (name: string) {
this.name := name;
}
}
method TestNullsAmongKeys() {
var a := new MyClass("ronald");
var b := new MyClass("wendy");
var c: MyClass? := null;
var d := new MyClass("jack");
var e := new MyClass("sibylla");
var m := map[a := 0 as uint32, b := 1, c := 2, d := 3];
var n := map[a := 0, b := 10, c := 20, e := 4];
var o := map[b := 199, a := 198];
var o' := map[b := 199, c := 55, a := 198];
var o'' := map[b := 199, c := 56, a := 198];
var o3 := map[c := 3, d := 16];
var x0, x1, x2 := o == o', o' == o'', o' == o';
print x0, " " , x1, " ", x2, "\n"; // false false true
var p := m + n;
var q := n + o;
var r := o + m;
var s := o3 + o;
var y0, y1, y2, y3 := p == n + m, q == o + n, r == m + o, s == o + o3;
print y0, " " , y1, " ", y2, " ", y3, "\n"; // false false false true
print p[a], " ", p[c], " ", p[e], "\n"; // 0 20 4
print q[a], " ", q[c], " ", q[e], "\n"; // 198 20 4
print r[a], " ", r[c], " ", e in r, "\n"; // 0 2 false
p, q, r := GenericMap(m, n, o, a, e);
print p[a], " ", p[c], " ", p[e], "\n"; // 0 20 4
print q[a], " ", q[c], " ", q[e], "\n"; // 198 20 4
print r[a], " ", r[c], " ", e in r, "\n"; // 0 2 false
}
method GenericMap<K, V>(m: map<K, V>, n: map<K, V>, o: map<K, V>, a: K, b: K)
returns (p: map<K, V>, q: map<K, V>, r: map<K, V>)
requires a in m.Keys && a in n.Keys
requires b !in m.Keys && b !in o.Keys
ensures p == m + n && q == n + o && r == o + m
{
p := m + n;
q := n + o;
r := o + m;
print a in m.Keys, " ", a in n.Keys, " ", a in p, " ", b in r, "\n"; // true true true false
assert p.Keys == m.Keys + n.Keys;
assert q.Keys == o.Keys + n.Keys;
assert r.Keys == m.Keys + o.Keys;
}
method TestNullsAmongValues() {
var a := new MyClass("ronald");
var b := new MyClass("wendy");
var d := new MyClass("jack");
var e := new MyClass("sibylla");
var m: map<uint32, MyClass?> := map[0 := a, 1 := b, 2 := null, 3 := null];
var n: map<uint32, MyClass?> := map[0 := d, 10 := b, 20 := null, 4 := e];
var o: map<uint32, MyClass?> := map[199 := null, 198 := a];
var o': map<uint32, MyClass?> := map[199 := b, 55 := null, 198 := a];
var o'': map<uint32, MyClass?> := map[199 := b, 56 := null, 198 := a];
var o3: map<uint32, MyClass?> := map[3 := null, 16 := d];
var x0, x1, x2 := o == o', o' == o'', o' == o';
print x0, " " , x1, " ", x2, "\n"; // false false true
var p := m + n;
var q := n + o;
var r := o + m;
var s := o3 + o;
var y0, y1, y2, y3 := p == n + m, q == o + n, r == m + o, s == o + o3;
print y0, " " , y1, " ", y2, " ", y3, "\n"; // false true true true
print p[0].name, " ", p[1].name, " ", p[20], "\n"; // jack wendy null
print q[0].name, " ", q[199], " ", q[20], "\n"; // jack null null
print r[0].name, " ", r[198].name, " ", 20 in r, "\n"; // ronald ronald false
p, q, r := GenericMap(m, n, o, 0, 321);
print p[0].name, " ", p[1].name, " ", p[20], "\n"; // jack wendy null
print q[0].name, " ", q[199], " ", q[20], "\n"; // jack null null
print r[0].name, " ", r[198].name, " ", 20 in r, "\n"; // ronald ronald false
}
| // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
method Test(name:string, b:bool)
requires b
{
if b {
print name, ": This is expected\n";
} else {
print name, ": This is *** UNEXPECTED *** !!!!\n";
}
}
datatype map_holder = map_holder(m:map<bool, bool>)
method Basic() {
var f:map_holder;
var s:map<uint32,uint32> := map[1 := 0, 2 := 1, 3 := 2, 4 := 3];
var t:map<uint32,uint32> := map[1 := 0, 2 := 1, 3 := 2, 4 := 3];
Test("Identity", s == s);
Test("ValuesIdentity", s == t);
Test("KeyMembership", 1 in s);
Test("Value1", s[1] == 0);
Test("Value2", t[2] == 1);
var u := s[1 := 42];
Test("Update Inequality", s != u);
Test("Update Immutable 1", s == s);
Test("Update Immutable 2", s[1] == 0);
Test("Update Result", u[1] == 42);
Test("Update Others", u[2] == 1);
var s_keys := s.Keys;
var t_keys := t.Keys;
Test("Keys equal", s_keys == t_keys);
Test("Keys membership 1", 1 in s_keys);
Test("Keys membership 2", 2 in s_keys);
Test("Keys membership 3", 3 in s_keys);
Test("Keys membership 4", 4 in s_keys);
}
method Main() {
Basic();
TestMapMergeSubtraction();
}
method TestMapMergeSubtraction() {
TestMapMerge();
TestMapSubtraction();
TestNullsAmongKeys();
TestNullsAmongValues();
}
method TestMapMerge() {
var a := map["ronald" := 66 as uint32, "jack" := 70, "bk" := 8];
var b := map["wendy" := 52, "bk" := 67];
var ages := a + b;
print |a|, " ", |b|, " ", |ages|, "\n"; // 3 2 4
print ages["jack"], " ", ages["wendy"], " ", ages["ronald"], "\n"; // 70 52 66
print a["bk"], " ", b["bk"], " ", ages["bk"], "\n"; // 8 67 67
}
method TestMapSubtraction() {
var ages := map["ronald" := 66 as uint32, "jack" := 70, "bk" := 67, "wendy" := 52];
var d := ages - {};
var e := ages - {"jack", "sanders"};
print |ages|, " ", |d|, " ", |e|, "\n"; // 4 4 3
print "ronald" in d, " ", "sanders" in d, " ", "jack" in d, " ", "sibylla" in d, "\n"; // true false true false
print "ronald" in e, " ", "sanders" in e, " ", "jack" in e, " ", "sibylla" in e, "\n"; // true false false false
}
class MyClass {
const name: string
constructor (name: string) {
this.name := name;
}
}
method TestNullsAmongKeys() {
var a := new MyClass("ronald");
var b := new MyClass("wendy");
var c: MyClass? := null;
var d := new MyClass("jack");
var e := new MyClass("sibylla");
var m := map[a := 0 as uint32, b := 1, c := 2, d := 3];
var n := map[a := 0, b := 10, c := 20, e := 4];
var o := map[b := 199, a := 198];
var o' := map[b := 199, c := 55, a := 198];
var o'' := map[b := 199, c := 56, a := 198];
var o3 := map[c := 3, d := 16];
var x0, x1, x2 := o == o', o' == o'', o' == o';
print x0, " " , x1, " ", x2, "\n"; // false false true
var p := m + n;
var q := n + o;
var r := o + m;
var s := o3 + o;
var y0, y1, y2, y3 := p == n + m, q == o + n, r == m + o, s == o + o3;
print y0, " " , y1, " ", y2, " ", y3, "\n"; // false false false true
print p[a], " ", p[c], " ", p[e], "\n"; // 0 20 4
print q[a], " ", q[c], " ", q[e], "\n"; // 198 20 4
print r[a], " ", r[c], " ", e in r, "\n"; // 0 2 false
p, q, r := GenericMap(m, n, o, a, e);
print p[a], " ", p[c], " ", p[e], "\n"; // 0 20 4
print q[a], " ", q[c], " ", q[e], "\n"; // 198 20 4
print r[a], " ", r[c], " ", e in r, "\n"; // 0 2 false
}
method GenericMap<K, V>(m: map<K, V>, n: map<K, V>, o: map<K, V>, a: K, b: K)
returns (p: map<K, V>, q: map<K, V>, r: map<K, V>)
requires a in m.Keys && a in n.Keys
requires b !in m.Keys && b !in o.Keys
ensures p == m + n && q == n + o && r == o + m
{
p := m + n;
q := n + o;
r := o + m;
print a in m.Keys, " ", a in n.Keys, " ", a in p, " ", b in r, "\n"; // true true true false
}
method TestNullsAmongValues() {
var a := new MyClass("ronald");
var b := new MyClass("wendy");
var d := new MyClass("jack");
var e := new MyClass("sibylla");
var m: map<uint32, MyClass?> := map[0 := a, 1 := b, 2 := null, 3 := null];
var n: map<uint32, MyClass?> := map[0 := d, 10 := b, 20 := null, 4 := e];
var o: map<uint32, MyClass?> := map[199 := null, 198 := a];
var o': map<uint32, MyClass?> := map[199 := b, 55 := null, 198 := a];
var o'': map<uint32, MyClass?> := map[199 := b, 56 := null, 198 := a];
var o3: map<uint32, MyClass?> := map[3 := null, 16 := d];
var x0, x1, x2 := o == o', o' == o'', o' == o';
print x0, " " , x1, " ", x2, "\n"; // false false true
var p := m + n;
var q := n + o;
var r := o + m;
var s := o3 + o;
var y0, y1, y2, y3 := p == n + m, q == o + n, r == m + o, s == o + o3;
print y0, " " , y1, " ", y2, " ", y3, "\n"; // false true true true
print p[0].name, " ", p[1].name, " ", p[20], "\n"; // jack wendy null
print q[0].name, " ", q[199], " ", q[20], "\n"; // jack null null
print r[0].name, " ", r[198].name, " ", 20 in r, "\n"; // ronald ronald false
p, q, r := GenericMap(m, n, o, 0, 321);
print p[0].name, " ", p[1].name, " ", p[20], "\n"; // jack wendy null
print q[0].name, " ", q[199], " ", q[20], "\n"; // jack null null
print r[0].name, " ", r[198].name, " ", 20 in r, "\n"; // ronald ronald false
}
|
703 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_c++_sets.dfy | // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
datatype Example0 = Example0(u:uint32, b:bool)
method Test0(e0:Example0)
{
var s := { e0 };
}
datatype Example1 = Ex1a(u:uint32) | Ex1b(b:bool)
method Test1(t0:Example1)
{
var t := { t0 };
}
method Test(name:string, b:bool)
requires b
{
if b {
print name, ": This is expected\n";
} else {
print name, ": This is *** UNEXPECTED *** !!!!\n";
}
}
method Basic() {
var s:set<uint32> := {1, 2, 3, 4};
var t:set<uint32> := {1, 2, 3, 4};
Test("Identity", s == s);
Test("ValuesIdentity", s == t);
Test("DiffIdentity", s - {1} == t - {1});
Test("DiffIdentitySelf", s - {2} != s - {1});
Test("ProperSubsetIdentity", !(s < s));
Test("ProperSubset", !(s < t));
Test("SelfSubset", s <= s);
Test("OtherSubset", t <= s && s <= t);
Test("UnionIdentity", s + s == s);
Test("Membership", 1 in s);
Test("NonMembership1", !(5 in s));
Test("NonMembership2", !(1 in (s - {1})));
}
method SetSeq() {
var m1:seq<uint32> := [1];
var m2:seq<uint32> := [1, 2];
var m3:seq<uint32> := [1, 2, 3];
var m4:seq<uint32> := [1, 2, 3, 4];
var n1:seq<uint32> := [1];
var n2:seq<uint32> := [1, 2];
var n3:seq<uint32> := [1, 2, 3];
var s1:set<seq<uint32>> := { m1, m2, m3 };
var s2:set<seq<uint32>> := s1 - { m1 };
Test("SeqMembership1", m1 in s1);
Test("SeqMembership2", m2 in s1);
Test("SeqMembership3", m3 in s1);
Test("SeqNonMembership1", !(m1 in s2));
Test("SeqNonMembership2", !(m4 in s1));
Test("SeqNonMembership3", !(m4 in s2));
Test("SeqMembershipValue1", n1 in s1);
Test("SeqMembershipValue2", n2 in s1);
Test("SeqMembershipValue3", n3 in s1);
}
method SetComprehension(s:set<uint32>)
requires forall i :: 0 <= i < 10 ==> i in s
requires |s| == 10
{
var t:set<uint32> := set y:uint32 | y in s;
Test("SetComprehensionInEquality", t == s);
Test("SetComprehensionInMembership", 0 in t);
}
method LetSuchThat() {
var s:set<uint32> := { 0, 1, 2, 3 };
var e:uint32 :| e in s;
//print e, "\n";
Test("LetSuchThatMembership", e in s);
Test("LetSuchThatValue", e == 0 || e == 1 || e == 2 || e == 3);
}
method Contains() {
var m1:seq<uint32> := [1];
var m2:seq<uint32> := [1, 2];
var m3:seq<uint32> := [1, 2, 3];
var m3identical:seq<uint32> := [1, 2, 3];
var mm := [m1, m3, m1];
if m1 in mm {
print "Membership 1: This is expected\n";
} else {
print "Membership 1: This is unexpected\n";
assert false;
}
if m2 in mm {
print "Membership 2: This is unexpected\n";
assert false;
} else {
print "Membership 2: This is expected\n";
}
if m3 in mm {
print "Membership 3: This is expected\n";
} else {
print "Membership 3: This is unexpected\n";
assert false;
}
if m3identical in mm {
print "Membership 3 value equality: This is expected\n";
} else {
print "Membership 3 value equality: This is unexpected\n";
assert false;
}
}
method Main() {
Basic();
SetSeq();
var s := { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
SetComprehension(s);
LetSuchThat();
}
| // RUN: %dafny /compile:3 /spillTargetCode:2 /compileTarget:cpp "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
newtype uint32 = i:int | 0 <= i < 0x100000000
datatype Example0 = Example0(u:uint32, b:bool)
method Test0(e0:Example0)
{
var s := { e0 };
}
datatype Example1 = Ex1a(u:uint32) | Ex1b(b:bool)
method Test1(t0:Example1)
{
var t := { t0 };
}
method Test(name:string, b:bool)
requires b
{
if b {
print name, ": This is expected\n";
} else {
print name, ": This is *** UNEXPECTED *** !!!!\n";
}
}
method Basic() {
var s:set<uint32> := {1, 2, 3, 4};
var t:set<uint32> := {1, 2, 3, 4};
Test("Identity", s == s);
Test("ValuesIdentity", s == t);
Test("DiffIdentity", s - {1} == t - {1});
Test("DiffIdentitySelf", s - {2} != s - {1});
Test("ProperSubsetIdentity", !(s < s));
Test("ProperSubset", !(s < t));
Test("SelfSubset", s <= s);
Test("OtherSubset", t <= s && s <= t);
Test("UnionIdentity", s + s == s);
Test("Membership", 1 in s);
Test("NonMembership1", !(5 in s));
Test("NonMembership2", !(1 in (s - {1})));
}
method SetSeq() {
var m1:seq<uint32> := [1];
var m2:seq<uint32> := [1, 2];
var m3:seq<uint32> := [1, 2, 3];
var m4:seq<uint32> := [1, 2, 3, 4];
var n1:seq<uint32> := [1];
var n2:seq<uint32> := [1, 2];
var n3:seq<uint32> := [1, 2, 3];
var s1:set<seq<uint32>> := { m1, m2, m3 };
var s2:set<seq<uint32>> := s1 - { m1 };
Test("SeqMembership1", m1 in s1);
Test("SeqMembership2", m2 in s1);
Test("SeqMembership3", m3 in s1);
Test("SeqNonMembership1", !(m1 in s2));
Test("SeqNonMembership2", !(m4 in s1));
Test("SeqNonMembership3", !(m4 in s2));
Test("SeqMembershipValue1", n1 in s1);
Test("SeqMembershipValue2", n2 in s1);
Test("SeqMembershipValue3", n3 in s1);
}
method SetComprehension(s:set<uint32>)
requires forall i :: 0 <= i < 10 ==> i in s
requires |s| == 10
{
var t:set<uint32> := set y:uint32 | y in s;
Test("SetComprehensionInEquality", t == s);
Test("SetComprehensionInMembership", 0 in t);
}
method LetSuchThat() {
var s:set<uint32> := { 0, 1, 2, 3 };
var e:uint32 :| e in s;
//print e, "\n";
Test("LetSuchThatMembership", e in s);
Test("LetSuchThatValue", e == 0 || e == 1 || e == 2 || e == 3);
}
method Contains() {
var m1:seq<uint32> := [1];
var m2:seq<uint32> := [1, 2];
var m3:seq<uint32> := [1, 2, 3];
var m3identical:seq<uint32> := [1, 2, 3];
var mm := [m1, m3, m1];
if m1 in mm {
print "Membership 1: This is expected\n";
} else {
print "Membership 1: This is unexpected\n";
}
if m2 in mm {
print "Membership 2: This is unexpected\n";
} else {
print "Membership 2: This is expected\n";
}
if m3 in mm {
print "Membership 3: This is expected\n";
} else {
print "Membership 3: This is unexpected\n";
}
if m3identical in mm {
print "Membership 3 value equality: This is expected\n";
} else {
print "Membership 3 value equality: This is unexpected\n";
}
}
method Main() {
Basic();
SetSeq();
var s := { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
SetComprehension(s);
LetSuchThat();
}
|
704 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_git-issues_git-issue-1158.dfy | // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
type Id(==)
function F(s: set<Id>): int
lemma Test(x: Id)
{
var X := {x};
var a := map i | i <= X :: F(i);
var b := map[{} := F({}), X := F(X)];
assert a.Keys == b.Keys by { // spurious error reported here
forall i
ensures i in a.Keys <==> i in b.Keys
{
calc {
i in a.Keys;
==
i <= X;
== { assert i <= X <==> i == {} || i == X; }
i in b.Keys;
}
}
}
}
| // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
type Id(==)
function F(s: set<Id>): int
lemma Test(x: Id)
{
var X := {x};
var a := map i | i <= X :: F(i);
var b := map[{} := F({}), X := F(X)];
forall i
ensures i in a.Keys <==> i in b.Keys
{
calc {
i in a.Keys;
==
i <= X;
== { assert i <= X <==> i == {} || i == X; }
i in b.Keys;
}
}
}
}
|
705 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_git-issues_git-issue-283.dfy | // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:cs "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:js "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:go "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:java "%s" >> "%t"
// RUN: %diff "%s.expect" "%t"
datatype Result<T> =
| Success(value: T)
| Failure(error: string)
datatype C = C1 | C2(x: int)
trait Foo
{
method FooMethod1(r: Result<()>)
ensures
match r {
case Success(()) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(()) => x := 1;
case Failure(e) => x := 2;
}
assert x > 0;
expect x == 1;
}
method FooMethod2(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1()) => x := 1;
case Success(C2(_)) => x := 2;
case Failure(e) => x := 3;
}
assert x > 0;
expect x == 1;
}
method FooMethod2q(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1()) => x := 1;
case Success(C2(x)) => x := 2; // x is local variable
case Failure(e) => x := 3;
}
assert x == 0 || x == 1 || x == 3;
expect x == 0 || x == 1 || x == 3;
}
method FooMethod2r(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: real := 0.0;
match r {
case Success(C1()) => x := 1.0;
case Success(C2(x)) => x := 2; // x is local variable
case Failure(e) => x := 3.0;
}
assert x == 0.0 || x == 1.0 || x == 3.0;
expect x == 0.0 || x == 1.0 || x == 3.0;
}
method FooMethod3(r: Result<C>)
ensures
match r {
case Success(C1) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1) => x := 1;
case Success(C2(_)) => x := 2; // BUG - problem if _ is x
case Failure(e) => x := 3;
}
assert x > 0;
expect x == 1;
}
method FooMethod4(r: Result<C>)
ensures
match r {
case Success(C2) => true // OK -- C2 is a variable
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C2) => x := 1;
case Failure(e) => x := 2;
}
assert x > 0;
expect x == 1;
}
method FooMethod5(r: Result<string>)
ensures
match r {
case Success(C1) => true // OK -- C1 is a variable
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1) => x := 1;
case Failure(e) => x := 2;
}
assert x > 0;
expect x == 1;
}
}
class CL extends Foo {}
method Main() {
var t := new CL;
m(t);
}
method m(t: Foo) {
t.FooMethod1(Result.Success(()));
t.FooMethod2(Result<C>.Success(C1));
t.FooMethod2q(Result<C>.Success(C1));
t.FooMethod2r(Result<C>.Success(C1));
t.FooMethod3(Result<C>.Success(C1));
t.FooMethod4(Result<C>.Success(C1));
t.FooMethod5(Result<string>.Success(""));
print "Done\n";
}
| // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:cs "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:js "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:go "%s" >> "%t"
// RUN: %dafny /noVerify /compile:4 /compileTarget:java "%s" >> "%t"
// RUN: %diff "%s.expect" "%t"
datatype Result<T> =
| Success(value: T)
| Failure(error: string)
datatype C = C1 | C2(x: int)
trait Foo
{
method FooMethod1(r: Result<()>)
ensures
match r {
case Success(()) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(()) => x := 1;
case Failure(e) => x := 2;
}
expect x == 1;
}
method FooMethod2(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1()) => x := 1;
case Success(C2(_)) => x := 2;
case Failure(e) => x := 3;
}
expect x == 1;
}
method FooMethod2q(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1()) => x := 1;
case Success(C2(x)) => x := 2; // x is local variable
case Failure(e) => x := 3;
}
expect x == 0 || x == 1 || x == 3;
}
method FooMethod2r(r: Result<C>)
ensures
match r {
case Success(C1()) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: real := 0.0;
match r {
case Success(C1()) => x := 1.0;
case Success(C2(x)) => x := 2; // x is local variable
case Failure(e) => x := 3.0;
}
expect x == 0.0 || x == 1.0 || x == 3.0;
}
method FooMethod3(r: Result<C>)
ensures
match r {
case Success(C1) => true // OK
case Success(C2(x)) => true // OK
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1) => x := 1;
case Success(C2(_)) => x := 2; // BUG - problem if _ is x
case Failure(e) => x := 3;
}
expect x == 1;
}
method FooMethod4(r: Result<C>)
ensures
match r {
case Success(C2) => true // OK -- C2 is a variable
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C2) => x := 1;
case Failure(e) => x := 2;
}
expect x == 1;
}
method FooMethod5(r: Result<string>)
ensures
match r {
case Success(C1) => true // OK -- C1 is a variable
case Failure(e) => true
}
{
var x: int := 0;
match r {
case Success(C1) => x := 1;
case Failure(e) => x := 2;
}
expect x == 1;
}
}
class CL extends Foo {}
method Main() {
var t := new CL;
m(t);
}
method m(t: Foo) {
t.FooMethod1(Result.Success(()));
t.FooMethod2(Result<C>.Success(C1));
t.FooMethod2q(Result<C>.Success(C1));
t.FooMethod2r(Result<C>.Success(C1));
t.FooMethod3(Result<C>.Success(C1));
t.FooMethod4(Result<C>.Success(C1));
t.FooMethod5(Result<string>.Success(""));
print "Done\n";
}
|
706 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_git-issues_git-issue-506.dfy | // RUN: %dafny /compile:4 /compileTarget:cs "%s" > "%t"
// RUN: %dafny /compile:4 /compileTarget:js "%s" >> "%t"
// RUN: %dafny /compile:4 /compileTarget:go "%s" >> "%t"
// RUN: %dafny /compile:4 /compileTarget:java "%s" >> "%t"
// RUN: %diff "%s.expect" "%t"
method Main() {
var a := new int[10];
var index := 6;
a[8] := 1;
a[index], index := 3, index+1;
assert a[6] == 3;
assert index == 7;
print index, " ", a[6], a[7], a[8], "\n"; // Should be: "7 301"
index, a[index] := index+1, 9;
assert index == 8;
assert a[7] == 9;
assert a[8] == 1; // Assertion is OK
expect a[8] == 1; // This failed before the bug fix
print index, " ", a[6], a[7], a[8], "\n"; // Should be "8 391" not "8 309"
a[index+1], index := 7, 6;
assert a[9] == 7 && index == 6;
expect a[9] == 7 && index == 6;
var o := new F(2);
var oo := o;
print o.f, " ", oo.f, "\n";
assert o.f == 2;
assert oo.f == 2;
var ooo := new F(4);
o.f, o := 5, ooo;
print o.f, " ", oo.f, "\n";
assert o.f == 4;
assert oo.f == 5;
var oooo := new F(6);
o, o.f := oooo, 7;
assert o.f == 6;
assert ooo.f == 7;
expect ooo.f == 7; // This failed before the bug fix
print o.f, " ", ooo.f, "\n";
var aa := new int[9,9];
var j := 4;
var k := 5;
aa[j,k] := 8;
j, k, aa[j,k] := 2, 3, 7;
print j, " ", k, " ", aa[4,5], " ", aa[2,3], "\n"; // Should be 2 3 7 0
assert aa[4,5] == 7;
expect aa[4,5] == 7; // This failed before the bug fix
j, aa[j,k], k := 5, 6, 1;
assert j == 5 && aa[2,3] == 6 && k == 1;
expect j == 5 && aa[2,3] == 6 && k == 1; // This failed before the bug fix
aa[j,k], k, j := 5, 6, 1;
assert j == 1 && aa[5,1] == 5 && k == 6;
expect j == 1 && aa[5,1] == 5 && k == 6;
}
class F {
var f: int;
constructor (f: int) ensures this.f == f { this.f := f; }
}
| // RUN: %dafny /compile:4 /compileTarget:cs "%s" > "%t"
// RUN: %dafny /compile:4 /compileTarget:js "%s" >> "%t"
// RUN: %dafny /compile:4 /compileTarget:go "%s" >> "%t"
// RUN: %dafny /compile:4 /compileTarget:java "%s" >> "%t"
// RUN: %diff "%s.expect" "%t"
method Main() {
var a := new int[10];
var index := 6;
a[8] := 1;
a[index], index := 3, index+1;
print index, " ", a[6], a[7], a[8], "\n"; // Should be: "7 301"
index, a[index] := index+1, 9;
expect a[8] == 1; // This failed before the bug fix
print index, " ", a[6], a[7], a[8], "\n"; // Should be "8 391" not "8 309"
a[index+1], index := 7, 6;
expect a[9] == 7 && index == 6;
var o := new F(2);
var oo := o;
print o.f, " ", oo.f, "\n";
var ooo := new F(4);
o.f, o := 5, ooo;
print o.f, " ", oo.f, "\n";
var oooo := new F(6);
o, o.f := oooo, 7;
expect ooo.f == 7; // This failed before the bug fix
print o.f, " ", ooo.f, "\n";
var aa := new int[9,9];
var j := 4;
var k := 5;
aa[j,k] := 8;
j, k, aa[j,k] := 2, 3, 7;
print j, " ", k, " ", aa[4,5], " ", aa[2,3], "\n"; // Should be 2 3 7 0
expect aa[4,5] == 7; // This failed before the bug fix
j, aa[j,k], k := 5, 6, 1;
expect j == 5 && aa[2,3] == 6 && k == 1; // This failed before the bug fix
aa[j,k], k, j := 5, 6, 1;
expect j == 1 && aa[5,1] == 5 && k == 6;
}
class F {
var f: int;
constructor (f: int) ensures this.f == f { this.f := f; }
}
|
707 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_Test_git-issues_git-issue-975.dfy | // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
function f():nat
ensures f() == 0
{ // no problem for methods
var x := 0; // no problem without this
assert true by {}
0
}
| // RUN: %dafny /compile:0 "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
function f():nat
ensures f() == 0
{ // no problem for methods
var x := 0; // no problem without this
0
}
|
708 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_docs_DafnyRef_examples_Example-Old.dfy | class A {
var value: int
method m(i: int)
requires i == 6
requires value == 42
modifies this
{
var j: int := 17;
value := 43;
label L:
j := 18;
value := 44;
label M:
assert old(i) == 6; // i is local, but can't be changed anyway
assert old(j) == 18; // j is local and not affected by old
assert old@L(j) == 18; // j is local and not affected by old
assert old(value) == 42;
assert old@L(value) == 43;
assert old@M(value) == 44 && this.value == 44;
// value is this.value; 'this' is the same
// same reference in current and pre state but the
// values stored in the heap as its fields are different;
// '.value' evaluates to 42 in the pre-state, 43 at L,
// and 44 in the current state
}
}
| class A {
var value: int
method m(i: int)
requires i == 6
requires value == 42
modifies this
{
var j: int := 17;
value := 43;
label L:
j := 18;
value := 44;
label M:
// value is this.value; 'this' is the same
// same reference in current and pre state but the
// values stored in the heap as its fields are different;
// '.value' evaluates to 42 in the pre-state, 43 at L,
// and 44 in the current state
}
}
|
709 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_docs_DafnyRef_examples_Example-Old2.dfy | class A {
var value: int
constructor ()
ensures value == 10
{
value := 10;
}
}
class B {
var a: A
constructor () { a := new A(); }
method m()
requires a.value == 11
modifies this, this.a
{
label L:
a.value := 12;
label M:
a := new A(); // Line X
label N:
a.value := 20;
label P:
assert old(a.value) == 11;
assert old(a).value == 12; // this.a is from pre-state,
// but .value in current state
assert old@L(a.value) == 11;
assert old@L(a).value == 12; // same as above
assert old@M(a.value) == 12; // .value in M state is 12
assert old@M(a).value == 12;
assert old@N(a.value) == 10; // this.a in N is the heap
// reference at Line X
assert old@N(a).value == 20; // .value in current state is 20
assert old@P(a.value) == 20;
assert old@P(a).value == 20;
}
}
| class A {
var value: int
constructor ()
ensures value == 10
{
value := 10;
}
}
class B {
var a: A
constructor () { a := new A(); }
method m()
requires a.value == 11
modifies this, this.a
{
label L:
a.value := 12;
label M:
a := new A(); // Line X
label N:
a.value := 20;
label P:
// but .value in current state
// reference at Line X
}
}
|
710 | ironsync-osdi2023_tmp_tmpx80antoe_linear-dafny_docs_DafnyRef_examples_Example-Old3.dfy | class A {
var z1: array<nat>
var z2: array<nat>
method mm()
requires z1.Length > 10 && z1[0] == 7
requires z2.Length > 10 && z2[0] == 17
modifies z2
{
var a: array<nat> := z1;
assert a[0] == 7;
a := z2;
assert a[0] == 17;
assert old(a[0]) == 17; // a is local with value z2
z2[0] := 27;
assert old(a[0]) == 17; // a is local, with current value of
// z2; in pre-state z2[0] == 17
assert old(a)[0] == 27; // a is local, with current value of
// z2; z2[0] is currently 27
}
}
| class A {
var z1: array<nat>
var z2: array<nat>
method mm()
requires z1.Length > 10 && z1[0] == 7
requires z2.Length > 10 && z2[0] == 17
modifies z2
{
var a: array<nat> := z1;
a := z2;
z2[0] := 27;
// z2; in pre-state z2[0] == 17
// z2; z2[0] is currently 27
}
}
|
711 | laboratory_tmp_tmps8ws6mu2_dafny-tutorial_exercise12.dfy | method FindMax(a: array<int>) returns (i: int)
// Annotate this method with pre- and postconditions
// that ensure it behaves as described.
requires 0 < a.Length
ensures 0 <= i < a.Length
ensures forall k: int :: 0 <= k < a.Length ==> a[k] <= a[i]
{
// Fill in the body that calculates the INDEX of the maximum.
var j := 0;
var max := a[0];
i := 1;
while i < a.Length
invariant 1 <= i <= a.Length
invariant forall k: int :: 0 <= k < i ==> max >= a[k]
invariant 0 <= j < a.Length
invariant a[j] == max
decreases a.Length - i
{
if max < a[i] {
max := a[i];
j := i;
}
i := i + 1;
}
i := j;
}
| method FindMax(a: array<int>) returns (i: int)
// Annotate this method with pre- and postconditions
// that ensure it behaves as described.
requires 0 < a.Length
ensures 0 <= i < a.Length
ensures forall k: int :: 0 <= k < a.Length ==> a[k] <= a[i]
{
// Fill in the body that calculates the INDEX of the maximum.
var j := 0;
var max := a[0];
i := 1;
while i < a.Length
{
if max < a[i] {
max := a[i];
j := i;
}
i := i + 1;
}
i := j;
}
|
712 | laboratory_tmp_tmps8ws6mu2_dafny-tutorial_exercise9.dfy | function fib(n: nat): nat
{
if n == 0 then 0 else
if n == 1 then 1 else
fib(n - 1) + fib(n - 2)
}
method ComputeFib(n: nat) returns (b: nat)
ensures b == fib(n) // Do not change this postcondition
{
// Change the method body to instead use c as described.
// You will need to change both the initialization and the loop.
var i: int := 0;
b := 0;
var c := 1;
while i < n
invariant 0 <= i <= n
invariant b == fib(i)
invariant c == fib(i + 1)
{
b, c := c, c + b;
i := i + 1;
}
}
| function fib(n: nat): nat
{
if n == 0 then 0 else
if n == 1 then 1 else
fib(n - 1) + fib(n - 2)
}
method ComputeFib(n: nat) returns (b: nat)
ensures b == fib(n) // Do not change this postcondition
{
// Change the method body to instead use c as described.
// You will need to change both the initialization and the loop.
var i: int := 0;
b := 0;
var c := 1;
while i < n
{
b, c := c, c + b;
i := i + 1;
}
}
|
713 | lets-prove-blocking-queue_tmp_tmptd_aws1k_dafny_prod-cons.dfy | /**
* A proof in Dafny of the non blocking property of a queue.
* @author Franck Cassez.
*
* @note: based off Modelling Concurrency in Dafny, K.R.M. Leino
* @link{http://leino.science/papers/krml260.pdf}
*/
module ProdCons {
// A type for process id that supports equality (i.e. p == q is defined).
type Process(==)
// A type for the elemets in the buffer.
type T
/**
* The producer/consumer problem.
* The set of processes is actuall irrelevant (included here because part of the
* original problem statement ...)
*/
class ProdCons {
/**
* Set of processes in the system.
*/
const P: set<Process>
/**
* The maximal size of the buffer.
*/
var maxBufferSize : nat
/**
* The buffer.
*/
var buffer : seq<T>
/**
* Invariant.
*
* Buffer should always less than maxBufferSize elements,
* Set of processes is not empty
*
*/
predicate valid()
reads this
{
maxBufferSize > 0 && P != {} &&
0 <= |buffer| <= maxBufferSize
}
/**
* Initialise set of processes and buffer and maxBufferSize
*/
constructor (processes: set<Process>, m: nat )
requires processes != {} // Non empty set of processes.
requires m >= 1 // Buffer as at least one cell.
ensures valid() // After initilisation the invariant is true
{
P := processes;
buffer := [];
maxBufferSize := m;
}
/**
* Enabledness of a put operation.
* If enabled any process can perform a put.
*/
predicate putEnabled(p : Process)
reads this
{
|buffer| < maxBufferSize
}
/** Event: a process puts an element in the queue. */
method put(p: Process, t : T)
requires valid()
requires putEnabled(p) // |buffer| < maxBufferSize
modifies this
{
buffer := buffer + [t] ;
}
/**
* Enabledness of a get operation.
* If enabled, any process can perform a get.
*/
predicate getEnabled(p : Process)
reads this
{
|buffer| >= 1
}
/** Event: a process gets an element from the queue. */
method get(p: Process)
requires getEnabled(p)
requires valid() // Invariant is inductive
ensures |buffer| == |old(buffer)| - 1 // this invariant is not needed and can be omitted
modifies this
{
// remove the first element of buffer.
// note: Dafny implcitly proves that the tail operation can be performed
// as a consequence of |buffer| >= 1 (getEnabled()).
// To see this, comment out the
// requires and an error shows up.
buffer := buffer[1..];
}
/** Correctness theorem: no deadlock.
* From any valid state, at least one process is enabled.
*/
lemma noDeadlock()
requires valid()
ensures exists p :: p in P && (getEnabled(p) || putEnabled(p))
// as processes are irrelevant, this could be simplified
// into isBufferNotFull() or isBufferNotEmpty()
{
// Dafny automatically proves this. so we can leave the
// body of this lemma empty.
// But for the sake of clarity, here is the proof.
// P is not empty so there is a process p in P
// Reads as: select a p of type Process such that p in P
var p: Process :| p in P ;
// Now we have a p.
// We are going to use the fact that valid() must hold as it is a pre-condition
if ( |buffer| > 0 ) {
assert (getEnabled(p));
}
else {
// You may comment out the following asserts and Dafny
// can figure out the proof from the constraints that are
// true in this case.
// Becas=use |buffer| == 0 and maxBufferSize >= 1, we can do a put
assert(|buffer| == 0);
assert (|buffer| < maxBufferSize);
assert(putEnabled(p));
}
}
}
}
| /**
* A proof in Dafny of the non blocking property of a queue.
* @author Franck Cassez.
*
* @note: based off Modelling Concurrency in Dafny, K.R.M. Leino
* @link{http://leino.science/papers/krml260.pdf}
*/
module ProdCons {
// A type for process id that supports equality (i.e. p == q is defined).
type Process(==)
// A type for the elemets in the buffer.
type T
/**
* The producer/consumer problem.
* The set of processes is actuall irrelevant (included here because part of the
* original problem statement ...)
*/
class ProdCons {
/**
* Set of processes in the system.
*/
const P: set<Process>
/**
* The maximal size of the buffer.
*/
var maxBufferSize : nat
/**
* The buffer.
*/
var buffer : seq<T>
/**
* Invariant.
*
* Buffer should always less than maxBufferSize elements,
* Set of processes is not empty
*
*/
predicate valid()
reads this
{
maxBufferSize > 0 && P != {} &&
0 <= |buffer| <= maxBufferSize
}
/**
* Initialise set of processes and buffer and maxBufferSize
*/
constructor (processes: set<Process>, m: nat )
requires processes != {} // Non empty set of processes.
requires m >= 1 // Buffer as at least one cell.
ensures valid() // After initilisation the invariant is true
{
P := processes;
buffer := [];
maxBufferSize := m;
}
/**
* Enabledness of a put operation.
* If enabled any process can perform a put.
*/
predicate putEnabled(p : Process)
reads this
{
|buffer| < maxBufferSize
}
/** Event: a process puts an element in the queue. */
method put(p: Process, t : T)
requires valid()
requires putEnabled(p) // |buffer| < maxBufferSize
modifies this
{
buffer := buffer + [t] ;
}
/**
* Enabledness of a get operation.
* If enabled, any process can perform a get.
*/
predicate getEnabled(p : Process)
reads this
{
|buffer| >= 1
}
/** Event: a process gets an element from the queue. */
method get(p: Process)
requires getEnabled(p)
requires valid() // Invariant is inductive
ensures |buffer| == |old(buffer)| - 1 // this invariant is not needed and can be omitted
modifies this
{
// remove the first element of buffer.
// note: Dafny implcitly proves that the tail operation can be performed
// as a consequence of |buffer| >= 1 (getEnabled()).
// To see this, comment out the
// requires and an error shows up.
buffer := buffer[1..];
}
/** Correctness theorem: no deadlock.
* From any valid state, at least one process is enabled.
*/
lemma noDeadlock()
requires valid()
ensures exists p :: p in P && (getEnabled(p) || putEnabled(p))
// as processes are irrelevant, this could be simplified
// into isBufferNotFull() or isBufferNotEmpty()
{
// Dafny automatically proves this. so we can leave the
// body of this lemma empty.
// But for the sake of clarity, here is the proof.
// P is not empty so there is a process p in P
// Reads as: select a p of type Process such that p in P
var p: Process :| p in P ;
// Now we have a p.
// We are going to use the fact that valid() must hold as it is a pre-condition
if ( |buffer| > 0 ) {
}
else {
// You may comment out the following asserts and Dafny
// can figure out the proof from the constraints that are
// true in this case.
// Becas=use |buffer| == 0 and maxBufferSize >= 1, we can do a put
}
}
}
}
|
714 | libraries_tmp_tmp9gegwhqj_examples_MutableMap_MutableMapDafny.dfy | /*******************************************************************************
* Copyright by the contributors to the Dafny Project
* SPDX-License-Identifier: MIT
*******************************************************************************/
// RUN: %verify "%s"
/**
* Implements mutable maps in Dafny to guard against inconsistent specifications.
* Only exists to verify feasability; not meant for actual usage.
*/
module {:options "-functionSyntax:4"} MutableMapDafny {
/**
* NOTE: Only here because of #2500; once resolved import "MutableMapTrait.dfy".
*/
trait {:termination false} MutableMapTrait<K(==),V(==)> {
function content(): map<K, V>
reads this
method Put(k: K, v: V)
modifies this
ensures this.content() == old(this.content())[k := v]
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values + {v}
ensures k !in old(this.content()).Keys ==> this.content().Values == old(this.content()).Values + {v}
function Keys(): (keys: set<K>)
reads this
ensures keys == this.content().Keys
predicate HasKey(k: K)
reads this
ensures HasKey(k) <==> k in this.content().Keys
function Values(): (values: set<V>)
reads this
ensures values == this.content().Values
function Items(): (items: set<(K,V)>)
reads this
ensures items == this.content().Items
ensures items == set k | k in this.content().Keys :: (k, this.content()[k])
function Select(k: K): (v: V)
reads this
requires this.HasKey(k)
ensures v in this.content().Values
ensures this.content()[k] == v
method Remove(k: K)
modifies this
ensures this.content() == old(this.content()) - {k}
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values
function Size(): (size: int)
reads this
ensures size == |this.content().Items|
}
class MutableMapDafny<K(==),V(==)> extends MutableMapTrait<K,V> {
var m: map<K,V>
function content(): map<K, V>
reads this
{
m
}
constructor ()
ensures this.content() == map[]
{
m := map[];
}
method Put(k: K, v: V)
modifies this
ensures this.content() == old(this.content())[k := v]
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values + {v}
ensures k !in old(this.content()).Keys ==> this.content().Values == old(this.content()).Values + {v}
{
m := m[k := v];
if k in old(m).Keys {
forall v' | v' in old(m).Values + {v} ensures v' in m.Values + {old(m)[k]} {
if v' == v || v' == old(m)[k] {
assert m[k] == v;
} else {
assert m.Keys == old(m).Keys + {k};
}
}
}
if k !in old(m).Keys {
forall v' | v' in old(m).Values + {v} ensures v' in m.Values {
if v' == v {
assert m[k] == v;
assert m[k] == v';
assert v' in m.Values;
} else {
assert m.Keys == old(m).Keys + {k};
}
}
}
}
function Keys(): (keys: set<K>)
reads this
ensures keys == this.content().Keys
{
m.Keys
}
predicate HasKey(k: K)
reads this
ensures HasKey(k) <==> k in this.content().Keys
{
k in m.Keys
}
function Values(): (values: set<V>)
reads this
ensures values == this.content().Values
{
m.Values
}
function Items(): (items: set<(K,V)>)
reads this
ensures items == this.content().Items
ensures items == set k | k in this.content().Keys :: (k, this.content()[k])
{
var items := set k | k in m.Keys :: (k, m[k]);
assert items == m.Items by {
forall k | k in m.Keys ensures (k, m[k]) in m.Items {
assert (k, m[k]) in m.Items;
}
assert items <= m.Items;
forall x | x in m.Items ensures x in items {
assert (x.0, m[x.0]) in items;
}
assert m.Items <= items;
}
items
}
function Select(k: K): (v: V)
reads this
requires this.HasKey(k)
ensures v in this.content().Values
ensures this.content()[k] == v
{
m[k]
}
method Remove(k: K)
modifies this
ensures this.content() == old(this.content()) - {k}
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values
{
m := map k' | k' in m.Keys && k' != k :: m[k'];
if k in old(m).Keys {
var v := old(m)[k];
forall v' | v' in old(m).Values ensures v' in m.Values + {v} {
if v' == v {
} else {
assert exists k' | k' in m.Keys :: old(m)[k'] == v';
}
}
}
}
function Size(): (size: int)
reads this
ensures size == |this.content().Items|
{
|m|
}
}
}
| /*******************************************************************************
* Copyright by the contributors to the Dafny Project
* SPDX-License-Identifier: MIT
*******************************************************************************/
// RUN: %verify "%s"
/**
* Implements mutable maps in Dafny to guard against inconsistent specifications.
* Only exists to verify feasability; not meant for actual usage.
*/
module {:options "-functionSyntax:4"} MutableMapDafny {
/**
* NOTE: Only here because of #2500; once resolved import "MutableMapTrait.dfy".
*/
trait {:termination false} MutableMapTrait<K(==),V(==)> {
function content(): map<K, V>
reads this
method Put(k: K, v: V)
modifies this
ensures this.content() == old(this.content())[k := v]
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values + {v}
ensures k !in old(this.content()).Keys ==> this.content().Values == old(this.content()).Values + {v}
function Keys(): (keys: set<K>)
reads this
ensures keys == this.content().Keys
predicate HasKey(k: K)
reads this
ensures HasKey(k) <==> k in this.content().Keys
function Values(): (values: set<V>)
reads this
ensures values == this.content().Values
function Items(): (items: set<(K,V)>)
reads this
ensures items == this.content().Items
ensures items == set k | k in this.content().Keys :: (k, this.content()[k])
function Select(k: K): (v: V)
reads this
requires this.HasKey(k)
ensures v in this.content().Values
ensures this.content()[k] == v
method Remove(k: K)
modifies this
ensures this.content() == old(this.content()) - {k}
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values
function Size(): (size: int)
reads this
ensures size == |this.content().Items|
}
class MutableMapDafny<K(==),V(==)> extends MutableMapTrait<K,V> {
var m: map<K,V>
function content(): map<K, V>
reads this
{
m
}
constructor ()
ensures this.content() == map[]
{
m := map[];
}
method Put(k: K, v: V)
modifies this
ensures this.content() == old(this.content())[k := v]
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values + {v}
ensures k !in old(this.content()).Keys ==> this.content().Values == old(this.content()).Values + {v}
{
m := m[k := v];
if k in old(m).Keys {
forall v' | v' in old(m).Values + {v} ensures v' in m.Values + {old(m)[k]} {
if v' == v || v' == old(m)[k] {
} else {
}
}
}
if k !in old(m).Keys {
forall v' | v' in old(m).Values + {v} ensures v' in m.Values {
if v' == v {
} else {
}
}
}
}
function Keys(): (keys: set<K>)
reads this
ensures keys == this.content().Keys
{
m.Keys
}
predicate HasKey(k: K)
reads this
ensures HasKey(k) <==> k in this.content().Keys
{
k in m.Keys
}
function Values(): (values: set<V>)
reads this
ensures values == this.content().Values
{
m.Values
}
function Items(): (items: set<(K,V)>)
reads this
ensures items == this.content().Items
ensures items == set k | k in this.content().Keys :: (k, this.content()[k])
{
var items := set k | k in m.Keys :: (k, m[k]);
forall k | k in m.Keys ensures (k, m[k]) in m.Items {
}
forall x | x in m.Items ensures x in items {
}
}
items
}
function Select(k: K): (v: V)
reads this
requires this.HasKey(k)
ensures v in this.content().Values
ensures this.content()[k] == v
{
m[k]
}
method Remove(k: K)
modifies this
ensures this.content() == old(this.content()) - {k}
ensures k in old(this.content()).Keys ==> this.content().Values + {old(this.content())[k]} == old(this.content()).Values
{
m := map k' | k' in m.Keys && k' != k :: m[k'];
if k in old(m).Keys {
var v := old(m)[k];
forall v' | v' in old(m).Values ensures v' in m.Values + {v} {
if v' == v {
} else {
}
}
}
}
function Size(): (size: int)
reads this
ensures size == |this.content().Items|
{
|m|
}
}
}
|
715 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_0.dfy | function abs(x: real): real
{
if x < 0.0 then -x else x
}
method has_close_elements(numbers: seq<real>, threshold: real) returns (result: bool)
ensures result <==> exists i, j ::
0 <= i < |numbers| &&
0 <= j < |numbers| &&
i != j &&
abs(numbers[i] - numbers[j]) < threshold
ensures result ==> |numbers| > 1
{
result := false;
assert (forall i0 :: (0 <= i0 < 0 ==>
forall j0 :: (0 <= j0 < |numbers| ==>
abs(numbers[i0] - numbers[j0]) >= threshold)));
for i := 0 to |numbers|
invariant (forall i0 :: (0 <= i0 < i ==>
forall j0 :: (0 <= j0 < |numbers| ==>
(i0 != j0 ==>
abs(numbers[i0] - numbers[j0]) >= threshold))))
{
for j := 0 to |numbers|
invariant (forall i0 :: (0 <= i0 <= i ==>
forall j0 :: (0 <= j0 < j ==>
(i0 != j0 ==>
abs(numbers[i0] - numbers[j0]) >= threshold))))
{
if i != j && abs(numbers[i] - numbers[j]) < threshold {
assert abs(numbers[i] - numbers[j]) < threshold;
result := true;
return;
}
}
}
}
| function abs(x: real): real
{
if x < 0.0 then -x else x
}
method has_close_elements(numbers: seq<real>, threshold: real) returns (result: bool)
ensures result <==> exists i, j ::
0 <= i < |numbers| &&
0 <= j < |numbers| &&
i != j &&
abs(numbers[i] - numbers[j]) < threshold
ensures result ==> |numbers| > 1
{
result := false;
forall j0 :: (0 <= j0 < |numbers| ==>
abs(numbers[i0] - numbers[j0]) >= threshold)));
for i := 0 to |numbers|
forall j0 :: (0 <= j0 < |numbers| ==>
(i0 != j0 ==>
abs(numbers[i0] - numbers[j0]) >= threshold))))
{
for j := 0 to |numbers|
forall j0 :: (0 <= j0 < j ==>
(i0 != j0 ==>
abs(numbers[i0] - numbers[j0]) >= threshold))))
{
if i != j && abs(numbers[i] - numbers[j]) < threshold {
result := true;
return;
}
}
}
}
|
716 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_160.dfy | function pow(base: int, exponent: int): int
requires exponent >= 0
decreases exponent
{
if exponent == 0 then 1
else if exponent % 2 == 0 then pow(base * base, exponent / 2)
else base * pow(base, exponent - 1)
}
method do_algebra(operators: seq<char>, operands: seq<int>) returns (result: int)
requires operators != [] && operands != [] && |operators| + 1 == |operands|
requires forall i :: 0 <= i < |operands| ==> operands[i] >= 0
{
result := operands[0];
var i := 0;
while i < |operators|
invariant 0 <= i <= |operators|
decreases |operators| - i
{
var op := operators[i];
i := i + 1;
match op
{
case '+' =>
result := result + operands[i];
case '-' =>
result := result - operands[i];
case '*' =>
result := result * operands[i];
case '/' =>
if operands[i] != 0 {
result := result / operands[i];
}
case '^' =>
result := pow(result, operands[i]);
case _ =>
}
}
}
| function pow(base: int, exponent: int): int
requires exponent >= 0
{
if exponent == 0 then 1
else if exponent % 2 == 0 then pow(base * base, exponent / 2)
else base * pow(base, exponent - 1)
}
method do_algebra(operators: seq<char>, operands: seq<int>) returns (result: int)
requires operators != [] && operands != [] && |operators| + 1 == |operands|
requires forall i :: 0 <= i < |operands| ==> operands[i] >= 0
{
result := operands[0];
var i := 0;
while i < |operators|
{
var op := operators[i];
i := i + 1;
match op
{
case '+' =>
result := result + operands[i];
case '-' =>
result := result - operands[i];
case '*' =>
result := result * operands[i];
case '/' =>
if operands[i] != 0 {
result := result / operands[i];
}
case '^' =>
result := pow(result, operands[i]);
case _ =>
}
}
}
|
717 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_161.dfy | function IsLetter(c: char): bool
{
(c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
function NoLetters(s: string, n: nat): bool
requires n <= |s|
{
forall c :: 0 <= c < n ==> !IsLetter(s[c])
}
function ToggleCase(c: char): char
{
if c >= 'a' && c <= 'z'
then
(c - 'a' + 'A')
else if c >= 'A' && c <= 'Z'
then
(c - 'A' + 'a')
else
c
}
function isReverse(s: string, s_prime: string): bool{
(|s| == |s_prime|) &&
(forall si :: 0 <= si < |s|/2 ==> s_prime[|s| - si - 1] == s[si])
}
method Reverse(original: seq<char>) returns (reversed: seq<char>)
ensures |reversed| == |original|
ensures forall i :: 0 <= i < |original| ==> reversed[i] == original[|original| - 1 - i]
{
reversed := [];
var i := |original|;
while i > 0
decreases i
invariant 0 <= i <= |original|
invariant |reversed| == |original| - i
invariant forall j :: 0 <= j < |original|-i ==>
reversed[j] == original[|original| - 1 - j]
{
i := i - 1;
reversed := reversed + [original[i]];
}
}
method solve(s: string) returns (result: string)
ensures |result| == |s|
ensures !NoLetters(s, |s|) ==> forall i :: 0 <= i < |s| && IsLetter(s[i]) ==> result[i] == ToggleCase(s[i])
ensures !NoLetters(s, |s|) ==> forall i :: 0 <= i < |s| && !IsLetter(s[i]) ==> result[i] == s[i]
ensures NoLetters(s, |s|) ==> isReverse(result, s)
{
var flg : bool := false;
result := "";
for i := 0 to |s|
invariant |result| == i
invariant flg <==> !NoLetters(s, i)
invariant forall j :: 0 <= j < i ==> result[j] == ToggleCase(s[j])
{
if IsLetter(s[i])
{
result := result + [ToggleCase(s[i])];
flg := true;
} else {
result := result + [s[i]];
}
}
if !flg
{
result := Reverse(s);
}
}
| function IsLetter(c: char): bool
{
(c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
function NoLetters(s: string, n: nat): bool
requires n <= |s|
{
forall c :: 0 <= c < n ==> !IsLetter(s[c])
}
function ToggleCase(c: char): char
{
if c >= 'a' && c <= 'z'
then
(c - 'a' + 'A')
else if c >= 'A' && c <= 'Z'
then
(c - 'A' + 'a')
else
c
}
function isReverse(s: string, s_prime: string): bool{
(|s| == |s_prime|) &&
(forall si :: 0 <= si < |s|/2 ==> s_prime[|s| - si - 1] == s[si])
}
method Reverse(original: seq<char>) returns (reversed: seq<char>)
ensures |reversed| == |original|
ensures forall i :: 0 <= i < |original| ==> reversed[i] == original[|original| - 1 - i]
{
reversed := [];
var i := |original|;
while i > 0
reversed[j] == original[|original| - 1 - j]
{
i := i - 1;
reversed := reversed + [original[i]];
}
}
method solve(s: string) returns (result: string)
ensures |result| == |s|
ensures !NoLetters(s, |s|) ==> forall i :: 0 <= i < |s| && IsLetter(s[i]) ==> result[i] == ToggleCase(s[i])
ensures !NoLetters(s, |s|) ==> forall i :: 0 <= i < |s| && !IsLetter(s[i]) ==> result[i] == s[i]
ensures NoLetters(s, |s|) ==> isReverse(result, s)
{
var flg : bool := false;
result := "";
for i := 0 to |s|
{
if IsLetter(s[i])
{
result := result + [ToggleCase(s[i])];
flg := true;
} else {
result := result + [s[i]];
}
}
if !flg
{
result := Reverse(s);
}
}
|
718 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_3.dfy | function sum(s: seq<int>, n: nat): int
requires n <= |s|
{
if |s| == 0 || n == 0 then
0
else
s[0] + sum(s[1..], n-1)
}
lemma sum_plus(s: seq<int>, i: nat)
requires i < |s|
ensures sum(s, i) + s[i] == sum(s, i+1)
{
}
method below_zero(ops: seq<int>) returns (result: bool)
ensures result <==> exists n: nat :: n <= |ops| && sum(ops, n) < 0
{
result := false;
var t := 0;
for i := 0 to |ops|
invariant t == sum(ops, i)
invariant forall n: nat :: n <= i ==> sum(ops, n) >= 0
{
t := t + ops[i];
sum_plus(ops, i);
if t < 0 {
result := true;
return;
}
}
}
| function sum(s: seq<int>, n: nat): int
requires n <= |s|
{
if |s| == 0 || n == 0 then
0
else
s[0] + sum(s[1..], n-1)
}
lemma sum_plus(s: seq<int>, i: nat)
requires i < |s|
ensures sum(s, i) + s[i] == sum(s, i+1)
{
}
method below_zero(ops: seq<int>) returns (result: bool)
ensures result <==> exists n: nat :: n <= |ops| && sum(ops, n) < 0
{
result := false;
var t := 0;
for i := 0 to |ops|
{
t := t + ops[i];
sum_plus(ops, i);
if t < 0 {
result := true;
return;
}
}
}
|
719 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_5.dfy | method intersperse(numbers: seq<int>, delimiter: int) returns (interspersed: seq<int>)
ensures |interspersed| == if |numbers| > 0 then 2 * |numbers| - 1 else 0
ensures forall i :: 0 <= i < |interspersed| ==> i % 2 == 0 ==>
interspersed[i] == numbers[i / 2]
ensures forall i :: 0 <= i < |interspersed| ==> i % 2 == 1 ==>
interspersed[i] == delimiter
{
interspersed := [];
for i := 0 to |numbers|
invariant |interspersed| == if i > 0 then 2 * i - 1 else 0
invariant forall i0 :: 0 <= i0 < |interspersed| ==> i0 % 2 == 0 ==>
interspersed[i0] == numbers[i0 / 2]
invariant forall i0 :: 0 <= i0 < |interspersed| ==> i0 % 2 == 1 ==>
interspersed[i0] == delimiter
{
if i > 0 {
interspersed := interspersed + [delimiter];
}
interspersed := interspersed + [numbers[i]];
}
}
| method intersperse(numbers: seq<int>, delimiter: int) returns (interspersed: seq<int>)
ensures |interspersed| == if |numbers| > 0 then 2 * |numbers| - 1 else 0
ensures forall i :: 0 <= i < |interspersed| ==> i % 2 == 0 ==>
interspersed[i] == numbers[i / 2]
ensures forall i :: 0 <= i < |interspersed| ==> i % 2 == 1 ==>
interspersed[i] == delimiter
{
interspersed := [];
for i := 0 to |numbers|
interspersed[i0] == numbers[i0 / 2]
interspersed[i0] == delimiter
{
if i > 0 {
interspersed := interspersed + [delimiter];
}
interspersed := interspersed + [numbers[i]];
}
}
|
720 | llm-verified-eval_tmp_tmpd2deqn_i_dafny_9.dfy | function isMax(m: int, numbers: seq<int>): bool
{
m in numbers &&
forall i :: 0 <= i < |numbers| ==> numbers[i] <= m
}
method max(numbers: seq<int>) returns (result: int)
requires numbers != []
ensures isMax(result, numbers)
{
result := numbers[0];
for i := 1 to |numbers|
invariant isMax(result, numbers[0..i])
{
if numbers[i] > result {
result := numbers[i];
}
}
}
method rolling_max(numbers: seq<int>) returns (result: seq<int>)
requires numbers != []
ensures |result| == |numbers|
ensures forall i :: 0 < i < |result| ==> isMax(result[i], numbers[0..(i+1)])
{
var m := numbers[0];
result := [m];
for i := 1 to |numbers|
invariant |result| == i
invariant m == result[i-1]
invariant forall j :: 0 <= j < i ==> isMax(result[j], numbers[0..(j+1)])
{
if numbers[i] > m {
m := numbers[i];
}
result := result + [m];
}
}
| function isMax(m: int, numbers: seq<int>): bool
{
m in numbers &&
forall i :: 0 <= i < |numbers| ==> numbers[i] <= m
}
method max(numbers: seq<int>) returns (result: int)
requires numbers != []
ensures isMax(result, numbers)
{
result := numbers[0];
for i := 1 to |numbers|
{
if numbers[i] > result {
result := numbers[i];
}
}
}
method rolling_max(numbers: seq<int>) returns (result: seq<int>)
requires numbers != []
ensures |result| == |numbers|
ensures forall i :: 0 < i < |result| ==> isMax(result[i], numbers[0..(i+1)])
{
var m := numbers[0];
result := [m];
for i := 1 to |numbers|
{
if numbers[i] > m {
m := numbers[i];
}
result := result + [m];
}
}
|
721 | metodosFormais_tmp_tmp4q2kmya4_T1-MetodosFormais_examples_ex1.dfy | /*
Buscar
r = 0
enquanto(r<|a|){
se (a[r] == x) retorne r
r = r+1
}
retorne -1
*/
method buscar(a:array<int>, x:int) returns (r:int)
ensures r < 0 ==> forall i :: 0 <= i <a.Length ==> a[i] != x
ensures 0 <= r < a.Length ==> a[r] == x
{
r := 0;
while r < a.Length
decreases a.Length - r
invariant 0 <= r <= a.Length
invariant forall i :: 0 <= i < r ==> a[i] != x
{
if a[r] == x
{
return r;
}
r := r + 1;
}
return -1;
}
| /*
Buscar
r = 0
enquanto(r<|a|){
se (a[r] == x) retorne r
r = r+1
}
retorne -1
*/
method buscar(a:array<int>, x:int) returns (r:int)
ensures r < 0 ==> forall i :: 0 <= i <a.Length ==> a[i] != x
ensures 0 <= r < a.Length ==> a[r] == x
{
r := 0;
while r < a.Length
{
if a[r] == x
{
return r;
}
r := r + 1;
}
return -1;
}
|
722 | metodosFormais_tmp_tmp4q2kmya4_T1-MetodosFormais_examples_somatoriov2.dfy | function somaAteAberto(a:array<nat>, i:nat):nat
requires i <= a.Length
reads a
{
if i ==0
then 0
else a[i-1] + somaAteAberto(a,i-1)
}
method somatorio(a:array<nat>) returns (s:nat)
ensures s == somaAteAberto(a, a.Length)
{
s := 0;
for i:= 0 to a.Length
invariant s == somaAteAberto(a,i)
{
s := s + a[i];
}
}
| function somaAteAberto(a:array<nat>, i:nat):nat
requires i <= a.Length
reads a
{
if i ==0
then 0
else a[i-1] + somaAteAberto(a,i-1)
}
method somatorio(a:array<nat>) returns (s:nat)
ensures s == somaAteAberto(a, a.Length)
{
s := 0;
for i:= 0 to a.Length
{
s := s + a[i];
}
}
|
723 | nitwit_tmp_tmplm098gxz_nit.dfy | // Liam Wynn, 3/13/2021, CS 510p
/*
In this program, I'm hoping to define
N's complement: a generalized form of 2's complement.
I ran across this idea back in ECE 341, when I asked
the professor about a crackpot theoretical "ternary machine".
Looking into it, I came across a general form of 2's complement.
Suppose I had the following 4 nit word in base base 3:
1 2 0 1 (3)
Now, in two's complement, you "flip" the bits and add 1. In
n's complement, you flip the bits by subtracting the current
nit value from the largest possible nit value. Since our base
is 3, our highest possible nit value is 2:
1 0 2 1 (3)
Note how the 1's don't change (2 - 1 = 1), but the "flipping"
is demonstrated in the 2 and 0. flip(2) in base 3 = 0, and flip(0)
in base 3 = 2.
Now let's increment our flipped word:
1 0 2 2 (3)
Now, if this is truly the n's complement of 1 2 0 1 (3), their
sum should be 0:
1 1 1
1 2 0 1
+ 1 0 2 2
---------
1 0 0 0 0 (3)
Now, since our word size if 4 nits, the last nit gets dropped
giving us 0 0 0 0!
So basically I want to write a Dafny program that does the above
but verified. I don't know how far I will get, but I essentially
want to write an increment, addition, and flip procedures such
that:
sum(v, increment(flip(v)) = 0, where v is a 4 nit value in
a given base n.
*/
/*
In this program, we deal with bases that are explicitly greater
than or equal to 2. Without this fact, virtually all of our
postconditions will not be provable. We will run into issues
of dividing by 0 and what not.
*/
predicate valid_base(b : nat) {
b >= 2
}
/*
Now we are in a position to define a nit formally. We say
a natural number n is a "nit" of some base b if 0 <= n < b.
0 and 1 are 2-nits ("bits") since 0 <= 0 < 2 and 0 <= 1 < 2.
*/
predicate nitness(b : nat, n : nat)
requires (valid_base(b))
{
0 <= n < b
}
/*
We define incrementing a nit (given its base). When you add two digits
together, you "carry the one" if the sum is >= 10.
1
7
+ 3
---
10
Addition simply takes two collections of things and merges them together.
Expressing the resulting collection in base 10 requires this strange
notion of "carrying the one". What it means is: the sum of 7 and 3
is one set of ten items, and nothing left over". Or if I did 6 + 7,
that is "one set of 10, and a set of 3".
The same notion applies in other bases. 1 + 1 in base 2 is "one set
of 2 and 0 sets of ones".
We can express "carrying" by using division. Division by a base
tells us how many sets of that base we have. So 19 in base 10 is
"1 set of 10, and 9 left over". So modding tells us what's left
over and division tells us how much to carry (how many sets of the
base we have).
*/
method nit_increment(b : nat, n : nat) returns (sum : nat, carry : nat)
// Note: apparently, you need to explicitly put this here
// even though we've got it in the nitness predicate
requires (valid_base(b))
requires (nitness(b, n))
ensures (nitness(b, sum))
ensures (nitness(b, carry))
{
sum := (n + 1) % b;
carry := (n + 1) / b;
}
/*
Okay next we are going to define the flip operation. In binary,
flip(0) = 1 and flip(1) = 0. We can generalize it to any base
by defining it as so:
let q be the max possible value of a given base. This
is b - 1. Given some nit n of b, the flip(n) is q - n.
For base 2, q = b - 1 = 2 - 1 = 1. flip(0) = 1 - 0 = 1,
and flip(1) = 1 - 1 = 0.
For base 3, q = 3 - 1 = 2. flip(0) = 2 - 0 = 2,
flip(1) = 2 - 1 = 1, and flip(2) = 2 - 2 = 0.
To begin with, we define a predicate is_max_nit which
is true if some natural q == b - 1.
*/
predicate is_max_nit(b : nat, q : nat) {
q == b - 1
}
/*
Next we define a meta-operator (on a base b) that
returns the max nit. To make Dafny (and our inner
mathmatician) happy, we need to require that b is
a valid base, and explicitly say max_nit(b) is
a nit of b, and that max_nit(b) is_max_nit(b).
I found these made the actual flip operation provable.
*/
method max_nit(b: nat) returns (nmax : nat)
requires (valid_base(b))
ensures (nitness(b, nmax))
ensures (is_max_nit(b, nmax))
{
nmax := b - 1;
}
/*
Now we define the flip operation proper. For this to work,
we need is_max_nit and a kind of silly proof to make Dafny
happy.
*/
method nit_flip(b: nat, n : nat) returns (nf : nat)
requires (valid_base(b))
requires (nitness(b, n))
ensures (nitness (b, nf))
{
var mn : nat := max_nit(b);
// I found I could not just assert that
// 0 <= n <= mn. I had to do this long
// series of asserts to prove it.
assert 0 < n < b ==> n <= b - 1;
assert 0 == n ==> n <= b - 1;
assert n <= b - 1;
assert mn == b - 1;
assert 0 <= n <= mn;
// But from all the above, Dafny can figure
// out that nitness(b, mn - n) holds.
nf := mn - n;
}
/*
We will now take a detour back to addition. We want to define
a general version of nit_increment that allows you to add any two nits
*/
method nit_add(b : nat, x : nat, y : nat) returns (z : nat, carry : nat)
requires (valid_base(b))
requires (nitness(b, x))
requires (nitness(b, y))
ensures (nitness(b, z))
ensures (nitness(b, carry))
// This is a useful fact for doing general form addition.
ensures (carry == 0 || carry == 1)
{
z := (x + y) % b;
carry := (x + y) / b;
// The last postcondition is a little too bold,
// so here is a proof of its correctness
assert x + y < b + b;
assert (x + y) / b < (b + b) / b;
assert (x + y) / b < 2;
assert carry < 2;
assert carry == 0 || carry == 1;
}
/*
It will come in handy to define a version of nit_add that takes
an additional argument c. Suppose I wanted to do base 2 addition
as follows:
1 1
0 1
+----
Doing one step would give us:
1
1 1
0 1
+----
0
This will allow us to do the above step nicely.
*/
method nit_add_three(b : nat, c : nat, x : nat, y : nat) returns (z : nat, carry : nat)
requires (valid_base(b))
requires (c == 0 || c == 1)
requires (nitness(b, x))
requires (nitness(b, y))
ensures (nitness(b, z))
ensures (nitness(b, carry))
ensures (carry == 0 || carry == 1)
{
if(c == 0) {
z, carry := nit_add(b, x, y);
} else {
z := (x + y + 1) % b;
carry := (x + y + 1) / b;
// Gigantic proof to show that (x + y + 1) / b will either == 1
// (meaning we need 1 set of b to contain x + y + 1)
// or (x + y + 1) == 0 (meaning we don't need a set of b to contian x + y + 1).
assert 0 <= b - 1;
assert 0 <= x < b;
assert 0 == x || 0 < x;
assert 0 < x ==> x <= b - 1;
assert 0 <= x <= b - 1;
assert 0 <= y < b;
assert 0 == y || 0 < y;
assert 0 <= b - 1;
assert 0 < y ==> y <= b - 1;
assert 0 <= y <= b - 1;
assert x + y <= (b - 1) + (b - 1);
assert x + y <= 2 * b - 2;
assert x + y + 1 <= 2 * b - 2 + 1;
assert x + y + 1 <= 2 * b - 1;
assert 2 * b - 1 < 2 * b;
assert x + y + 1 < 2 * b;
assert (x + y + 1) / b < 2;
assert (x + y + 1) / b == 0 || (x + y + 1) / b == 1;
}
}
/*
Whereas in binary computers, where we've the byte,
we will define a general version called a "nyte". A "nyte"
would be a collection of eight nits. However, for
simplicity's sake, we deal in half-nytes. A nibble is a
half-byte, so in our program we will call it a bibble.
So, a bibble given some valid_base b is a collection
of four nits.
*/
predicate bibble(b : nat, a : seq<nat>)
{
valid_base(b) &&
|a| == 4 &&
forall n :: n in a ==> nitness(b, n)
}
/*
As with nits, we will define addition, increment, and flip operations.
*/
method bibble_add(b : nat, p : seq<nat>, q : seq<nat>) returns (r : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
requires (bibble(b, q))
ensures (bibble(b, r))
{
var z3, c3 := nit_add(b, p[3], q[3]);
var z2, c2 := nit_add_three(b, c3, p[2], q[2]);
var z1, c1 := nit_add_three(b, c2, p[1], q[1]);
var z0, c0 := nit_add_three(b, c1, p[0], q[0]);
r := [z0, z1, z2, z3];
}
method bibble_increment(b : nat, p : seq<nat>) returns (r : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, r))
{
var q : seq<nat> := [0, 0, 0, 1];
assert bibble(b, q);
r := bibble_add(b, p, q);
}
method bibble_flip(b : nat, p : seq<nat>) returns (fp : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, fp))
{
var n0 := nit_flip(b, p[0]);
var n1 := nit_flip(b, p[1]);
var n2 := nit_flip(b, p[2]);
var n3 := nit_flip(b, p[3]);
fp := [n0, n1, n2, n3];
}
/*
The last part of the program: n's complement! It's the same as two's complement:
we flip all the nits and add 1.
*/
method n_complement(b : nat, p : seq<nat>) returns (com : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, com))
{
var fp := bibble_flip(b, p);
var fpi := bibble_increment(b, fp);
com := fpi;
}
method Main() {
var b := 3;
var bibble1 := [2, 1, 0, 2];
var complement := n_complement(b, bibble1);
var bibble_sum := bibble_add(b, bibble1, complement);
print bibble1, " + ", complement, " = ", bibble_sum, " (should be [0, 0, 0, 0])\n";
}
| // Liam Wynn, 3/13/2021, CS 510p
/*
In this program, I'm hoping to define
N's complement: a generalized form of 2's complement.
I ran across this idea back in ECE 341, when I asked
the professor about a crackpot theoretical "ternary machine".
Looking into it, I came across a general form of 2's complement.
Suppose I had the following 4 nit word in base base 3:
1 2 0 1 (3)
Now, in two's complement, you "flip" the bits and add 1. In
n's complement, you flip the bits by subtracting the current
nit value from the largest possible nit value. Since our base
is 3, our highest possible nit value is 2:
1 0 2 1 (3)
Note how the 1's don't change (2 - 1 = 1), but the "flipping"
is demonstrated in the 2 and 0. flip(2) in base 3 = 0, and flip(0)
in base 3 = 2.
Now let's increment our flipped word:
1 0 2 2 (3)
Now, if this is truly the n's complement of 1 2 0 1 (3), their
sum should be 0:
1 1 1
1 2 0 1
+ 1 0 2 2
---------
1 0 0 0 0 (3)
Now, since our word size if 4 nits, the last nit gets dropped
giving us 0 0 0 0!
So basically I want to write a Dafny program that does the above
but verified. I don't know how far I will get, but I essentially
want to write an increment, addition, and flip procedures such
that:
sum(v, increment(flip(v)) = 0, where v is a 4 nit value in
a given base n.
*/
/*
In this program, we deal with bases that are explicitly greater
than or equal to 2. Without this fact, virtually all of our
postconditions will not be provable. We will run into issues
of dividing by 0 and what not.
*/
predicate valid_base(b : nat) {
b >= 2
}
/*
Now we are in a position to define a nit formally. We say
a natural number n is a "nit" of some base b if 0 <= n < b.
0 and 1 are 2-nits ("bits") since 0 <= 0 < 2 and 0 <= 1 < 2.
*/
predicate nitness(b : nat, n : nat)
requires (valid_base(b))
{
0 <= n < b
}
/*
We define incrementing a nit (given its base). When you add two digits
together, you "carry the one" if the sum is >= 10.
1
7
+ 3
---
10
Addition simply takes two collections of things and merges them together.
Expressing the resulting collection in base 10 requires this strange
notion of "carrying the one". What it means is: the sum of 7 and 3
is one set of ten items, and nothing left over". Or if I did 6 + 7,
that is "one set of 10, and a set of 3".
The same notion applies in other bases. 1 + 1 in base 2 is "one set
of 2 and 0 sets of ones".
We can express "carrying" by using division. Division by a base
tells us how many sets of that base we have. So 19 in base 10 is
"1 set of 10, and 9 left over". So modding tells us what's left
over and division tells us how much to carry (how many sets of the
base we have).
*/
method nit_increment(b : nat, n : nat) returns (sum : nat, carry : nat)
// Note: apparently, you need to explicitly put this here
// even though we've got it in the nitness predicate
requires (valid_base(b))
requires (nitness(b, n))
ensures (nitness(b, sum))
ensures (nitness(b, carry))
{
sum := (n + 1) % b;
carry := (n + 1) / b;
}
/*
Okay next we are going to define the flip operation. In binary,
flip(0) = 1 and flip(1) = 0. We can generalize it to any base
by defining it as so:
let q be the max possible value of a given base. This
is b - 1. Given some nit n of b, the flip(n) is q - n.
For base 2, q = b - 1 = 2 - 1 = 1. flip(0) = 1 - 0 = 1,
and flip(1) = 1 - 1 = 0.
For base 3, q = 3 - 1 = 2. flip(0) = 2 - 0 = 2,
flip(1) = 2 - 1 = 1, and flip(2) = 2 - 2 = 0.
To begin with, we define a predicate is_max_nit which
is true if some natural q == b - 1.
*/
predicate is_max_nit(b : nat, q : nat) {
q == b - 1
}
/*
Next we define a meta-operator (on a base b) that
returns the max nit. To make Dafny (and our inner
mathmatician) happy, we need to require that b is
a valid base, and explicitly say max_nit(b) is
a nit of b, and that max_nit(b) is_max_nit(b).
I found these made the actual flip operation provable.
*/
method max_nit(b: nat) returns (nmax : nat)
requires (valid_base(b))
ensures (nitness(b, nmax))
ensures (is_max_nit(b, nmax))
{
nmax := b - 1;
}
/*
Now we define the flip operation proper. For this to work,
we need is_max_nit and a kind of silly proof to make Dafny
happy.
*/
method nit_flip(b: nat, n : nat) returns (nf : nat)
requires (valid_base(b))
requires (nitness(b, n))
ensures (nitness (b, nf))
{
var mn : nat := max_nit(b);
// I found I could not just assert that
// 0 <= n <= mn. I had to do this long
// series of asserts to prove it.
// But from all the above, Dafny can figure
// out that nitness(b, mn - n) holds.
nf := mn - n;
}
/*
We will now take a detour back to addition. We want to define
a general version of nit_increment that allows you to add any two nits
*/
method nit_add(b : nat, x : nat, y : nat) returns (z : nat, carry : nat)
requires (valid_base(b))
requires (nitness(b, x))
requires (nitness(b, y))
ensures (nitness(b, z))
ensures (nitness(b, carry))
// This is a useful fact for doing general form addition.
ensures (carry == 0 || carry == 1)
{
z := (x + y) % b;
carry := (x + y) / b;
// The last postcondition is a little too bold,
// so here is a proof of its correctness
}
/*
It will come in handy to define a version of nit_add that takes
an additional argument c. Suppose I wanted to do base 2 addition
as follows:
1 1
0 1
+----
Doing one step would give us:
1
1 1
0 1
+----
0
This will allow us to do the above step nicely.
*/
method nit_add_three(b : nat, c : nat, x : nat, y : nat) returns (z : nat, carry : nat)
requires (valid_base(b))
requires (c == 0 || c == 1)
requires (nitness(b, x))
requires (nitness(b, y))
ensures (nitness(b, z))
ensures (nitness(b, carry))
ensures (carry == 0 || carry == 1)
{
if(c == 0) {
z, carry := nit_add(b, x, y);
} else {
z := (x + y + 1) % b;
carry := (x + y + 1) / b;
// Gigantic proof to show that (x + y + 1) / b will either == 1
// (meaning we need 1 set of b to contain x + y + 1)
// or (x + y + 1) == 0 (meaning we don't need a set of b to contian x + y + 1).
}
}
/*
Whereas in binary computers, where we've the byte,
we will define a general version called a "nyte". A "nyte"
would be a collection of eight nits. However, for
simplicity's sake, we deal in half-nytes. A nibble is a
half-byte, so in our program we will call it a bibble.
So, a bibble given some valid_base b is a collection
of four nits.
*/
predicate bibble(b : nat, a : seq<nat>)
{
valid_base(b) &&
|a| == 4 &&
forall n :: n in a ==> nitness(b, n)
}
/*
As with nits, we will define addition, increment, and flip operations.
*/
method bibble_add(b : nat, p : seq<nat>, q : seq<nat>) returns (r : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
requires (bibble(b, q))
ensures (bibble(b, r))
{
var z3, c3 := nit_add(b, p[3], q[3]);
var z2, c2 := nit_add_three(b, c3, p[2], q[2]);
var z1, c1 := nit_add_three(b, c2, p[1], q[1]);
var z0, c0 := nit_add_three(b, c1, p[0], q[0]);
r := [z0, z1, z2, z3];
}
method bibble_increment(b : nat, p : seq<nat>) returns (r : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, r))
{
var q : seq<nat> := [0, 0, 0, 1];
r := bibble_add(b, p, q);
}
method bibble_flip(b : nat, p : seq<nat>) returns (fp : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, fp))
{
var n0 := nit_flip(b, p[0]);
var n1 := nit_flip(b, p[1]);
var n2 := nit_flip(b, p[2]);
var n3 := nit_flip(b, p[3]);
fp := [n0, n1, n2, n3];
}
/*
The last part of the program: n's complement! It's the same as two's complement:
we flip all the nits and add 1.
*/
method n_complement(b : nat, p : seq<nat>) returns (com : seq<nat>)
requires (valid_base(b))
requires (bibble(b, p))
ensures (bibble(b, com))
{
var fp := bibble_flip(b, p);
var fpi := bibble_increment(b, fp);
com := fpi;
}
method Main() {
var b := 3;
var bibble1 := [2, 1, 0, 2];
var complement := n_complement(b, bibble1);
var bibble_sum := bibble_add(b, bibble1, complement);
print bibble1, " + ", complement, " = ", bibble_sum, " (should be [0, 0, 0, 0])\n";
}
|
724 | paxos_proof_tmp_tmpxpmiksmt_triggers.dfy | // predicate P(x:int)
// predicate Q(x:int)
lemma M(a: seq<int>, m: map<bool,int>)
requires 2 <= |a|
requires false in m && true in m
{
assume forall i {:trigger a[i]} :: 0 <= i < |a|-1 ==> a[i] <= a[i+1];
var x :| 0 <= x <= |a|-2;
assert a[x] <= a[x+1];
}
| // predicate P(x:int)
// predicate Q(x:int)
lemma M(a: seq<int>, m: map<bool,int>)
requires 2 <= |a|
requires false in m && true in m
{
assume forall i {:trigger a[i]} :: 0 <= i < |a|-1 ==> a[i] <= a[i+1];
var x :| 0 <= x <= |a|-2;
}
|
725 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch01_fast_exp.dfy | function exp(b: nat, n: nat): nat {
if n == 0 then 1
else b * exp(b, n-1)
}
lemma exp_sum(b: nat, n1: nat, n2: nat)
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
if n1 == 0 {
return;
}
exp_sum(b, n1-1, n2);
}
// this "auto" version of exp_sum is convenient when we want to let Z3 figure
// out how to use exp_sum rather than providing all the arguments ourselves
lemma exp_sum_auto(b: nat)
ensures forall n1: nat, n2: nat :: exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
forall n1: nat, n2: nat
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2) {
exp_sum(b, n1, n2);
}
}
/* A key aspect of this proof is that each iteration handles one bit of the
* input. The best way I found to express its loop invariants is to compute and
* refer to this sequence of bits, even if the code never materializes it. */
function bits(n: nat): seq<bool>
decreases n
{
if n == 0 then []
else [if (n % 2 == 0) then false else true] + bits(n/2)
}
function from_bits(s: seq<bool>): nat {
if s == [] then 0
else (if s[0] then 1 else 0) + 2 * from_bits(s[1..])
}
lemma bits_from_bits(n: nat)
ensures from_bits(bits(n)) == n
{
}
lemma from_bits_append(s: seq<bool>, b: bool)
ensures from_bits(s + [b]) == from_bits(s) + exp(2, |s|) * (if b then 1 else 0)
{
if s == [] {
return;
}
assert s == [s[0]] + s[1..];
from_bits_append(s[1..], b);
// from recursive call
assert from_bits(s[1..] + [b]) == from_bits(s[1..]) + exp(2, |s|-1) * (if b then 1 else 0);
exp_sum(2, |s|-1, 1);
assert (s + [b])[1..] == s[1..] + [b]; // observe
assert from_bits(s + [b]) == (if s[0] then 1 else 0) + 2 * from_bits(s[1..] + [b]);
}
method fast_exp(b: nat, n: nat) returns (r: nat)
ensures r == exp(b, n)
{
// a is the exponent so far (see the invariant for the details)
var a := 1;
// c is b^(2^i) where i is the iteration number (see the invariant)
var c := b;
// we shadow n with a mutable variable since the loop modifies it at each
// iteration (it essentially tracks the remaining work, as expressed more
// precisely in the invariants)
var n := n;
// we will need to refer to the original value of n, which is shadowed, so to
// do that we store it in a ghost variable
ghost var n0 := n;
// to state the invariants we track the iteration count (but it's not used for
// the implementation, which only relies on n)
ghost var i: nat := 0;
bits_from_bits(n);
while n > 0
decreases n
invariant n <= n0
invariant i <= |bits(n0)|
// c is used to accumulate the exponent for the current bit
invariant c == exp(b, exp(2, i))
invariant bits(n) == bits(n0)[i..]
// n is the remaining work
invariant n == from_bits(bits(n0)[i..])
// a has the exponent using the bits of n0 through i
invariant a == exp(b, from_bits(bits(n0)[..i]))
{
ghost var n_loop_top := n;
if n % 2 == 1 {
assert bits(n)[0] == true;
// a accumulates bits(n0)[i..]. In this branch we drop a 1 bit from n and
// need to multiply in 2^i multiplications for that bit, which we get from
// c
a := a * c;
exp_sum(b, n0-n, i);
n := n / 2;
assert a == exp(b, from_bits(bits(n0)[..i]) + exp(2, i)) by {
exp_sum_auto(b);
}
assert bits(n0)[..i+1] == bits(n0)[..i] + [bits(n0)[i]];
from_bits_append(bits(n0)[..i], bits(n0)[i]);
assert a == exp(b, from_bits(bits(n0)[..i+1]));
} else {
assert bits(n)[0] == false;
n := n / 2;
assert bits(n0)[..i+1] == bits(n0)[..i] + [bits(n0)[i]];
from_bits_append(bits(n0)[..i], bits(n0)[i]);
// the new bit is a 0 so we don't need to change a to restore the
// invariant, we can just advance i
assert a == exp(b, from_bits(bits(n0)[..i+1]));
}
assert n == n_loop_top/2;
c := c * c;
// the invariant for c is relatively easy to maintain
assert c == exp(b, exp(2, i+1)) by {
exp_sum_auto(b);
}
i := i + 1;
}
// we need to prove that i covers all of bits(n0)
assert bits(n0)[..i] == bits(n0);
return a;
}
| function exp(b: nat, n: nat): nat {
if n == 0 then 1
else b * exp(b, n-1)
}
lemma exp_sum(b: nat, n1: nat, n2: nat)
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
if n1 == 0 {
return;
}
exp_sum(b, n1-1, n2);
}
// this "auto" version of exp_sum is convenient when we want to let Z3 figure
// out how to use exp_sum rather than providing all the arguments ourselves
lemma exp_sum_auto(b: nat)
ensures forall n1: nat, n2: nat :: exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
forall n1: nat, n2: nat
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2) {
exp_sum(b, n1, n2);
}
}
/* A key aspect of this proof is that each iteration handles one bit of the
* input. The best way I found to express its loop invariants is to compute and
* refer to this sequence of bits, even if the code never materializes it. */
function bits(n: nat): seq<bool>
{
if n == 0 then []
else [if (n % 2 == 0) then false else true] + bits(n/2)
}
function from_bits(s: seq<bool>): nat {
if s == [] then 0
else (if s[0] then 1 else 0) + 2 * from_bits(s[1..])
}
lemma bits_from_bits(n: nat)
ensures from_bits(bits(n)) == n
{
}
lemma from_bits_append(s: seq<bool>, b: bool)
ensures from_bits(s + [b]) == from_bits(s) + exp(2, |s|) * (if b then 1 else 0)
{
if s == [] {
return;
}
from_bits_append(s[1..], b);
// from recursive call
exp_sum(2, |s|-1, 1);
}
method fast_exp(b: nat, n: nat) returns (r: nat)
ensures r == exp(b, n)
{
// a is the exponent so far (see the invariant for the details)
var a := 1;
// c is b^(2^i) where i is the iteration number (see the invariant)
var c := b;
// we shadow n with a mutable variable since the loop modifies it at each
// iteration (it essentially tracks the remaining work, as expressed more
// precisely in the invariants)
var n := n;
// we will need to refer to the original value of n, which is shadowed, so to
// do that we store it in a ghost variable
ghost var n0 := n;
// to state the invariants we track the iteration count (but it's not used for
// the implementation, which only relies on n)
ghost var i: nat := 0;
bits_from_bits(n);
while n > 0
// c is used to accumulate the exponent for the current bit
// n is the remaining work
// a has the exponent using the bits of n0 through i
{
ghost var n_loop_top := n;
if n % 2 == 1 {
// a accumulates bits(n0)[i..]. In this branch we drop a 1 bit from n and
// need to multiply in 2^i multiplications for that bit, which we get from
// c
a := a * c;
exp_sum(b, n0-n, i);
n := n / 2;
exp_sum_auto(b);
}
from_bits_append(bits(n0)[..i], bits(n0)[i]);
} else {
n := n / 2;
from_bits_append(bits(n0)[..i], bits(n0)[i]);
// the new bit is a 0 so we don't need to change a to restore the
// invariant, we can just advance i
}
c := c * c;
// the invariant for c is relatively easy to maintain
exp_sum_auto(b);
}
i := i + 1;
}
// we need to prove that i covers all of bits(n0)
return a;
}
|
726 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch03_nim_v3.dfy | // Nim version 3: fix the bug and demonstrate a behavior.
//
// In this version, we've fixed the bug by actually flipping whose turn it is in
// each transition.
datatype Player = P1 | P2
{
function Other(): Player {
if this == P1 then P2 else P1
}
}
datatype Variables = Variables(piles: seq<nat>, turn: Player)
ghost predicate Init(v: Variables) {
&& |v.piles| == 3
&& v.turn.P1? // syntax
}
datatype Step =
| TurnStep(take: nat, p: nat)
| NoOpStep()
ghost predicate Turn(v: Variables, v': Variables, step: Step)
requires step.TurnStep?
{
var p := step.p;
var take := step.take;
&& p < |v.piles|
&& take <= v.piles[p]
&& v' == v.(piles := v.piles[p := v.piles[p] - take]).(turn := v.turn.Other())
}
// nearly boilerplate (just gather up all transitions)
ghost predicate NextStep(v: Variables, v': Variables, step: Step) {
match step {
case TurnStep(_, _) => Turn(v, v', step)
case NoOpStep() => v' == v // we don't really need to define predicate NoOp
}
}
// boilerplate
lemma NextStepDeterministicGivenStep(v: Variables, v': Variables, v'': Variables, step: Step)
requires NextStep(v, v', step)
requires NextStep(v, v'', step)
ensures v' == v''
{
}
// boilerplate
ghost predicate Next(v: Variables, v': Variables) {
exists step :: NextStep(v, v', step)
}
// We'll frequently prove a lemma of this form to show some example of the state
// machine transitioning. You'll prove determinism to avoid accidentally having
// transitions do things they shouldn't. Proofs will show that your state
// machine doesn't do anything bad (note this would also catch unintentional
// non-determinism, but it can be more painful to debug such issues at this
// stage). These example behaviors will prevent bugs where your state machine
// just doesn't do anything, especially because of overly restrictive
// preconditions.
lemma ExampleBehavior() returns (b: seq<Variables>)
ensures |b| >= 3 // for this example, we just demonstrate there is some execution with three states
ensures Init(b[0])
ensures forall i:nat | i + 1 < |b| :: Next(b[i], b[i+1])
{
// the syntax here constructs a Variables with named fields.
var state0 := Variables(piles := [3, 5, 7], turn := P1);
b := [
state0,
Variables(piles := [3, 1, 7], turn := P2),
Variables(piles := [3, 1, 0], turn := P1)
];
// note that we need these assertions because we need to prove Next, which is
// defined with `exists step :: ...` - Dafny needs help to see which value of
// `step` will prove this.
assert NextStep(b[0], b[1], TurnStep(take := 4, p := 1));
assert NextStep(b[1], b[2], TurnStep(take := 7, p := 2));
}
| // Nim version 3: fix the bug and demonstrate a behavior.
//
// In this version, we've fixed the bug by actually flipping whose turn it is in
// each transition.
datatype Player = P1 | P2
{
function Other(): Player {
if this == P1 then P2 else P1
}
}
datatype Variables = Variables(piles: seq<nat>, turn: Player)
ghost predicate Init(v: Variables) {
&& |v.piles| == 3
&& v.turn.P1? // syntax
}
datatype Step =
| TurnStep(take: nat, p: nat)
| NoOpStep()
ghost predicate Turn(v: Variables, v': Variables, step: Step)
requires step.TurnStep?
{
var p := step.p;
var take := step.take;
&& p < |v.piles|
&& take <= v.piles[p]
&& v' == v.(piles := v.piles[p := v.piles[p] - take]).(turn := v.turn.Other())
}
// nearly boilerplate (just gather up all transitions)
ghost predicate NextStep(v: Variables, v': Variables, step: Step) {
match step {
case TurnStep(_, _) => Turn(v, v', step)
case NoOpStep() => v' == v // we don't really need to define predicate NoOp
}
}
// boilerplate
lemma NextStepDeterministicGivenStep(v: Variables, v': Variables, v'': Variables, step: Step)
requires NextStep(v, v', step)
requires NextStep(v, v'', step)
ensures v' == v''
{
}
// boilerplate
ghost predicate Next(v: Variables, v': Variables) {
exists step :: NextStep(v, v', step)
}
// We'll frequently prove a lemma of this form to show some example of the state
// machine transitioning. You'll prove determinism to avoid accidentally having
// transitions do things they shouldn't. Proofs will show that your state
// machine doesn't do anything bad (note this would also catch unintentional
// non-determinism, but it can be more painful to debug such issues at this
// stage). These example behaviors will prevent bugs where your state machine
// just doesn't do anything, especially because of overly restrictive
// preconditions.
lemma ExampleBehavior() returns (b: seq<Variables>)
ensures |b| >= 3 // for this example, we just demonstrate there is some execution with three states
ensures Init(b[0])
ensures forall i:nat | i + 1 < |b| :: Next(b[i], b[i+1])
{
// the syntax here constructs a Variables with named fields.
var state0 := Variables(piles := [3, 5, 7], turn := P1);
b := [
state0,
Variables(piles := [3, 1, 7], turn := P2),
Variables(piles := [3, 1, 0], turn := P1)
];
// note that we need these assertions because we need to prove Next, which is
// defined with `exists step :: ...` - Dafny needs help to see which value of
// `step` will prove this.
}
|
727 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch04_inductive_chain.dfy | module Ex {
// This simple example illustrates what the process of looking for an
// inductive invariant might look like.
datatype Variables = Variables(p1: bool, p2: bool, p3: bool, p4: bool)
ghost predicate Init(v: Variables) {
&& !v.p1
&& !v.p2
&& !v.p3
&& !v.p4
}
// The state machine starts out with all four booleans false, and it "turns
// on" p1, p2, p3, and p4 in order. The safety property says p4 ==> p1;
// proving this requires a stronger inductive invariant.
datatype Step =
| Step1
| Step2
| Step3
| Step4
| Noop
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
// ordinarily we'd have a predicate for each step, but in this simple
// example it's easier to see everything written in one place
case Step1 =>
!v.p1 && v' == v.(p1 := true)
case Step2 =>
v.p1 && v' == v.(p2 := true)
case Step3 =>
v.p2 && v' == v.(p3 := true)
case Step4 =>
v.p3 && v' == v.(p4 := true)
case Noop => v' == v
}
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step: Step :: NextStep(v, v', step)
}
ghost predicate Safety(v: Variables)
{
v.p4 ==> v.p1
}
ghost predicate Inv(v: Variables)
{
// SOLUTION
// This is one approach: prove implications that go all the way back to the
// beginning, trying to slowly work our way up to something inductive.
&& Safety(v)
&& (v.p3 ==> v.p1)
&& (v.p2 ==> v.p1)
// END
}
lemma InvInductive(v: Variables, v': Variables)
requires Inv(v) && Next(v, v')
ensures Inv(v')
{
// SOLUTION
// This :| syntax is called "assign-such-that". Think of it as telling Dafny
// to assign step a value such that NextStep(v, v', step) (the predicate on
// the RHS) holds. What Dafny will do is first prove there exists such a
// step, then bind an arbitrary value to step where NextStep(v, v', step)
// holds for the remainder of the proof.
var step :| NextStep(v, v', step);
assert NextStep(v, v', step); // by definition of :|
// END
match step {
case Step1 => { return; }
case Step2 => { return; }
case Step3 => { return; }
case Step4 => {
// SOLUTION
return;
// END
}
case Noop => { return; }
}
}
lemma InvSafe(v: Variables)
ensures Inv(v) ==> Safety(v)
{
return;
}
// This is the main inductive proof of Safety, but we moved all the difficult
// reasoning to the lemmas above.
lemma SafetyHolds(v: Variables, v': Variables)
ensures Init(v) ==> Inv(v)
ensures Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(v) ==> Safety(v)
{
if Inv(v) && Next(v, v') {
InvInductive(v, v');
}
InvSafe(v);
}
// SOLUTION
// Instead of worrying about Safety, we can approach invariants by starting
// with properties that should hold in all reachable states. The advantage of
// this approach is that we can "checkpoint" our work of writing an invariant
// that characterizes reachable states. The disadvantage is that we might
// prove properties that don't help with safety and waste time.
//
// Recall that an invariant may have a counterexample to induction (CTI): a
// way to start in a state satisfying the invariant and transition out of it.
// If the invariant really holds, then a CTI simply reflects an unreachable
// state, one that we should try to eliminate by strengthening the invariant.
// If we find a "self-inductive" property Inv that satisfies Init(v) ==>
// Inv(v) and Inv(v) && Next(v, v') ==> Inv(v'), then we can extend it without
// fear of breaking inductiveness: in proving Inv(v) && Inv2(v) && Next(v, v')
// ==> Inv(v') && Inv2(v'), notice that we can immediately prove Inv(v').
// However, we've also made progress: in proving Inv2(v'), we get to know
// Inv(v). This may rule out some CTIs, and eventually will be enough to prove
// Inv2 is inductive.
//
// Notice that the above discussion involved identifying a self-inductive
// invariant without trying to prove a safety property. This is one way to go
// about proving safety: start by proving "easy" properties that hold in
// reachable states. This will reduce the burden of getting CTIs (or failed
// proofs). However, don't spend all your time proving properties about
// reachable states: there will likely be properties that really are
// invariants, but (a) the proof is complicated and (b) they don't help you
// prove safety.
predicate Inv2(v: Variables) {
// each of these conjuncts is individually "self-inductive", but all of them
// are needed together to actually prove safety
&& (v.p2 ==> v.p1)
&& (v.p3 ==> v.p2)
&& (v.p4 ==> v.p3)
}
lemma Inv2Holds(v: Variables, v': Variables)
ensures Init(v) ==> Inv2(v)
ensures Inv2(v) && Next(v, v') ==> Inv2(v')
{
assert Init(v) ==> Inv2(v);
if Inv2(v) && Next(v, v') {
var step :| NextStep(v, v', step);
match step {
case Step1 => { return; }
case Step2 => { return; }
case Step3 => { return; }
case Step4 => { return; }
case Noop => { return; }
}
}
}
// END
}
| module Ex {
// This simple example illustrates what the process of looking for an
// inductive invariant might look like.
datatype Variables = Variables(p1: bool, p2: bool, p3: bool, p4: bool)
ghost predicate Init(v: Variables) {
&& !v.p1
&& !v.p2
&& !v.p3
&& !v.p4
}
// The state machine starts out with all four booleans false, and it "turns
// on" p1, p2, p3, and p4 in order. The safety property says p4 ==> p1;
// proving this requires a stronger inductive invariant.
datatype Step =
| Step1
| Step2
| Step3
| Step4
| Noop
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
// ordinarily we'd have a predicate for each step, but in this simple
// example it's easier to see everything written in one place
case Step1 =>
!v.p1 && v' == v.(p1 := true)
case Step2 =>
v.p1 && v' == v.(p2 := true)
case Step3 =>
v.p2 && v' == v.(p3 := true)
case Step4 =>
v.p3 && v' == v.(p4 := true)
case Noop => v' == v
}
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step: Step :: NextStep(v, v', step)
}
ghost predicate Safety(v: Variables)
{
v.p4 ==> v.p1
}
ghost predicate Inv(v: Variables)
{
// SOLUTION
// This is one approach: prove implications that go all the way back to the
// beginning, trying to slowly work our way up to something inductive.
&& Safety(v)
&& (v.p3 ==> v.p1)
&& (v.p2 ==> v.p1)
// END
}
lemma InvInductive(v: Variables, v': Variables)
requires Inv(v) && Next(v, v')
ensures Inv(v')
{
// SOLUTION
// This :| syntax is called "assign-such-that". Think of it as telling Dafny
// to assign step a value such that NextStep(v, v', step) (the predicate on
// the RHS) holds. What Dafny will do is first prove there exists such a
// step, then bind an arbitrary value to step where NextStep(v, v', step)
// holds for the remainder of the proof.
var step :| NextStep(v, v', step);
// END
match step {
case Step1 => { return; }
case Step2 => { return; }
case Step3 => { return; }
case Step4 => {
// SOLUTION
return;
// END
}
case Noop => { return; }
}
}
lemma InvSafe(v: Variables)
ensures Inv(v) ==> Safety(v)
{
return;
}
// This is the main inductive proof of Safety, but we moved all the difficult
// reasoning to the lemmas above.
lemma SafetyHolds(v: Variables, v': Variables)
ensures Init(v) ==> Inv(v)
ensures Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(v) ==> Safety(v)
{
if Inv(v) && Next(v, v') {
InvInductive(v, v');
}
InvSafe(v);
}
// SOLUTION
// Instead of worrying about Safety, we can approach invariants by starting
// with properties that should hold in all reachable states. The advantage of
// this approach is that we can "checkpoint" our work of writing an invariant
// that characterizes reachable states. The disadvantage is that we might
// prove properties that don't help with safety and waste time.
//
// Recall that an invariant may have a counterexample to induction (CTI): a
// way to start in a state satisfying the invariant and transition out of it.
// If the invariant really holds, then a CTI simply reflects an unreachable
// state, one that we should try to eliminate by strengthening the invariant.
// If we find a "self-inductive" property Inv that satisfies Init(v) ==>
// Inv(v) and Inv(v) && Next(v, v') ==> Inv(v'), then we can extend it without
// fear of breaking inductiveness: in proving Inv(v) && Inv2(v) && Next(v, v')
// ==> Inv(v') && Inv2(v'), notice that we can immediately prove Inv(v').
// However, we've also made progress: in proving Inv2(v'), we get to know
// Inv(v). This may rule out some CTIs, and eventually will be enough to prove
// Inv2 is inductive.
//
// Notice that the above discussion involved identifying a self-inductive
// invariant without trying to prove a safety property. This is one way to go
// about proving safety: start by proving "easy" properties that hold in
// reachable states. This will reduce the burden of getting CTIs (or failed
// proofs). However, don't spend all your time proving properties about
// reachable states: there will likely be properties that really are
// invariants, but (a) the proof is complicated and (b) they don't help you
// prove safety.
predicate Inv2(v: Variables) {
// each of these conjuncts is individually "self-inductive", but all of them
// are needed together to actually prove safety
&& (v.p2 ==> v.p1)
&& (v.p3 ==> v.p2)
&& (v.p4 ==> v.p3)
}
lemma Inv2Holds(v: Variables, v': Variables)
ensures Init(v) ==> Inv2(v)
ensures Inv2(v) && Next(v, v') ==> Inv2(v')
{
if Inv2(v) && Next(v, v') {
var step :| NextStep(v, v', step);
match step {
case Step1 => { return; }
case Step2 => { return; }
case Step3 => { return; }
case Step4 => { return; }
case Noop => { return; }
}
}
}
// END
}
|
728 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch04_invariant_proof.dfy | /* These three declarations are _abstract_ - we declare a state machine, but
* don't actually give a definition. Dafny will assume nothing about them, so our
* proofs below will be true for an abitrary state machine. */
type Variables
predicate Init(v: Variables)
predicate Next(v: Variables, v': Variables)
/* We'll also consider an abstract Safety predicate over states and a
* user-supplied invariant to help prove the safety property. */
predicate Safety(v: Variables)
predicate Inv(v: Variables)
// We're going to reason about infinite executions, called behaviors here.
type Behavior = nat -> Variables
/* Now we want to prove the lemma below called SafetyAlwaysHolds. Take a look at
* its theorem statement. To prove this lemma, we need a helper lemma for two
* reasons: first, (because of Dafny) we need to have access to a variable for i
* to perform induction on it, and second, (more fundamentally) we need to
* _strengthen the induction hypothesis_ and prove `Inv(e(i))` rather than just
* `Safety(e(i))`. */
// This is the key induction.
lemma InvHoldsTo(e: nat -> Variables, i: nat)
requires Inv(e(0))
requires forall i:nat :: Next(e(i), e(i+1))
requires forall v, v' :: Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(e(i))
{
if i == 0 {
return;
}
InvHoldsTo(e, i-1);
// this is the inductive hypothesis
assert Inv(e(i-1));
// the requirements let us take the invariant from one step to the next (so in
// particular from e(i-1) to e(i)).
assert forall i:nat :: Inv(e(i)) ==> Inv(e(i+1));
}
ghost predicate IsBehavior(e: Behavior) {
&& Init(e(0))
&& forall i:nat :: Next(e(i), e(i+1))
}
lemma SafetyAlwaysHolds(e: Behavior)
// In the labs, we'll prove these three conditions. Note that these properties
// only require one or two states, not reasoning about sequences of states.
requires forall v :: Init(v) ==> Inv(v)
requires forall v, v' :: Inv(v) && Next(v, v') ==> Inv(v')
requires forall v :: Inv(v) ==> Safety(v)
// What we get generically from those three conditions is that the safety
// property holds for all reachable states - every state of every behavior of
// the state machine.
ensures IsBehavior(e) ==> forall i :: Safety(e(i))
{
if IsBehavior(e) {
assert Inv(e(0));
forall i:nat
ensures Safety(e(i)) {
InvHoldsTo(e, i);
}
}
}
| /* These three declarations are _abstract_ - we declare a state machine, but
* don't actually give a definition. Dafny will assume nothing about them, so our
* proofs below will be true for an abitrary state machine. */
type Variables
predicate Init(v: Variables)
predicate Next(v: Variables, v': Variables)
/* We'll also consider an abstract Safety predicate over states and a
* user-supplied invariant to help prove the safety property. */
predicate Safety(v: Variables)
predicate Inv(v: Variables)
// We're going to reason about infinite executions, called behaviors here.
type Behavior = nat -> Variables
/* Now we want to prove the lemma below called SafetyAlwaysHolds. Take a look at
* its theorem statement. To prove this lemma, we need a helper lemma for two
* reasons: first, (because of Dafny) we need to have access to a variable for i
* to perform induction on it, and second, (more fundamentally) we need to
* _strengthen the induction hypothesis_ and prove `Inv(e(i))` rather than just
* `Safety(e(i))`. */
// This is the key induction.
lemma InvHoldsTo(e: nat -> Variables, i: nat)
requires Inv(e(0))
requires forall i:nat :: Next(e(i), e(i+1))
requires forall v, v' :: Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(e(i))
{
if i == 0 {
return;
}
InvHoldsTo(e, i-1);
// this is the inductive hypothesis
// the requirements let us take the invariant from one step to the next (so in
// particular from e(i-1) to e(i)).
}
ghost predicate IsBehavior(e: Behavior) {
&& Init(e(0))
&& forall i:nat :: Next(e(i), e(i+1))
}
lemma SafetyAlwaysHolds(e: Behavior)
// In the labs, we'll prove these three conditions. Note that these properties
// only require one or two states, not reasoning about sequences of states.
requires forall v :: Init(v) ==> Inv(v)
requires forall v, v' :: Inv(v) && Next(v, v') ==> Inv(v')
requires forall v :: Inv(v) ==> Safety(v)
// What we get generically from those three conditions is that the safety
// property holds for all reachable states - every state of every behavior of
// the state machine.
ensures IsBehavior(e) ==> forall i :: Safety(e(i))
{
if IsBehavior(e) {
forall i:nat
ensures Safety(e(i)) {
InvHoldsTo(e, i);
}
}
}
|
729 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch04_leader_election.dfy | // We'll define "Between" to capture how the ring wraps around.
// SOLUTION
ghost predicate Between(start: nat, i: nat, end: nat)
{
if start < end then start < i < end
else i < end || start < i
}
lemma BetweenTests()
{
assert Between(3, 4, 5);
assert !Between(3, 2, 4);
// when start >= end, behavior is a bit tricker
// before end
assert Between(5, 2, 3);
// after start
assert Between(5, 6, 3);
// not in this range
assert !Between(5, 4, 3);
assert forall i, k | Between(i, k, i) :: i != k;
}
// END
// ids gives each node's (unique) identifier (address)
//
// highest_heard[i] is the highest other identifier the node at index i has
// heard about (or -1 if it has heard about nobody - note that -1 is not a valid identifier).
datatype Variables = Variables(ids: seq<nat>, highest_heard: seq<int>) {
ghost predicate ValidIdx(i: int) {
0<=i<|ids|
}
ghost predicate UniqueIds() {
forall i, j | ValidIdx(i) && ValidIdx(j) ::
ids[i]==ids[j] ==> i == j
}
ghost predicate WF()
{
&& 0 < |ids|
&& |ids| == |highest_heard|
}
// We'll define an important predicate for the inductive invariant.
// SOLUTION
// `end` thinks `start` is the highest
ghost predicate IsChord(start: nat, end: nat)
{
&& ValidIdx(start) && ValidIdx(end)
&& WF()
&& highest_heard[end] == ids[start]
}
// END
}
ghost predicate Init(v: Variables)
{
&& v.UniqueIds()
&& v.WF()
// Everyone begins having heard about nobody, not even themselves.
&& (forall i | v.ValidIdx(i) :: v.highest_heard[i] == -1)
}
ghost function max(a: int, b: int) : int {
if a > b then a else b
}
ghost function NextIdx(v: Variables, idx: nat) : nat
requires v.WF()
requires v.ValidIdx(idx)
{
// for demo we started with a definition using modulo (%), but this non-linear
// arithmetic is less friendly to Dafny's automation
// SOLUTION
if idx == |v.ids| - 1 then 0 else idx + 1
// END
}
// The destination of a transmission is determined by the ring topology
datatype Step = TransmissionStep(src: nat)
// This is an atomic step where src tells its neighbor (dst, computed here) the
// highest src has seen _and_ dst updates its local state to reflect receiving
// this message.
ghost predicate Transmission(v: Variables, v': Variables, step: Step)
requires step.TransmissionStep?
{
var src := step.src;
&& v.WF()
&& v.ValidIdx(src)
&& v'.ids == v.ids
// Neighbor address in ring.
&& var dst := NextIdx(v, src);
// src sends the max of its highest_heard value and its own id.
&& var message := max(v.highest_heard[src], v.ids[src]);
// dst only overwrites its highest_heard if the message is higher.
&& var dst_new_max := max(v.highest_heard[dst], message);
// demo has a bug here
// SOLUTION
&& v'.highest_heard == v.highest_heard[dst := dst_new_max]
// END
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
case TransmissionStep(_) => Transmission(v, v', step)
}
}
lemma NextStepDeterministicGivenStep(v: Variables, step: Step, v'1: Variables, v'2: Variables)
requires NextStep(v, v'1, step)
requires NextStep(v, v'2, step)
ensures v'1 == v'2
{}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
//////////////////////////////////////////////////////////////////////////////
// Spec (proof goal)
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsLeader(v: Variables, i: int)
requires v.WF()
{
&& v.ValidIdx(i)
&& v.highest_heard[i] == v.ids[i]
}
ghost predicate Safety(v: Variables)
requires v.WF()
{
forall i, j | IsLeader(v, i) && IsLeader(v, j) :: i == j
}
//////////////////////////////////////////////////////////////////////////////
// Proof
//////////////////////////////////////////////////////////////////////////////
// SOLUTION
ghost predicate ChordHeardDominated(v: Variables, start: nat, end: nat)
requires v.IsChord(start, end)
requires v.WF()
{
forall i | v.ValidIdx(i) && Between(start, i, end) ::
v.highest_heard[i] > v.ids[i]
}
// We make this opaque so Dafny does not use it automatically; instead we'll use
// the lemma UseChordDominated when needed. In many proofs opaqueness is a way
// to improve performance, since it prevents the automation from doing too much
// work; in this proof it's only so we can make clear in the proof when this
// invariant is being used.
ghost predicate {:opaque} OnChordHeardDominatesId(v: Variables)
requires v.WF()
{
forall start: nat, end: nat | v.IsChord(start, end) ::
ChordHeardDominated(v, start, end)
}
lemma UseChordDominated(v: Variables, start: nat, end: nat)
requires v.WF()
requires OnChordHeardDominatesId(v)
requires v.IsChord(start, end )
ensures ChordHeardDominated(v, start, end)
{
reveal OnChordHeardDominatesId();
}
// END
ghost predicate Inv(v: Variables)
{
&& v.WF()
// The solution will need more conjuncts
// SOLUTION
&& v.UniqueIds()
&& OnChordHeardDominatesId(v)
// Safety is not needed - we can prove it holds from the other invariants
// END
}
lemma InitImpliesInv(v: Variables)
requires Init(v)
ensures Inv(v)
{
// SOLUTION
forall start: nat, end: nat | v.IsChord(start, end)
ensures false {
}
assert OnChordHeardDominatesId(v) by {
reveal OnChordHeardDominatesId();
}
// END
}
lemma NextPreservesInv(v: Variables, v': Variables)
requires Inv(v)
requires Next(v, v')
ensures Inv(v')
{
var step :| NextStep(v, v', step);
// SOLUTION
var src := step.src;
var dst := NextIdx(v, src);
var message := max(v.highest_heard[src], v.ids[src]);
var dst_new_max := max(v.highest_heard[dst], message);
assert v'.UniqueIds();
forall start: nat, end: nat | v'.IsChord(start, end)
ensures ChordHeardDominated(v', start, end)
{
if dst == end {
// the destination ignored the message anyway (because it already knew of a high enough node)
if dst_new_max == v.highest_heard[dst] {
assert v' == v;
UseChordDominated(v, start, end);
assert ChordHeardDominated(v', start, end);
} else if v'.highest_heard[dst] == v.ids[src] {
// the new chord is empty, irrespective of the old state
assert start == src;
assert forall k | v.ValidIdx(k) :: !Between(start, k, end);
assert ChordHeardDominated(v', start, end);
} else if v'.highest_heard[end] == v.highest_heard[src] {
// extended a chord
assert v.IsChord(start, src); // trigger
UseChordDominated(v, start, src);
assert ChordHeardDominated(v', start, end);
}
assert ChordHeardDominated(v', start, end);
} else {
assert v.IsChord(start, end);
UseChordDominated(v, start, end);
assert ChordHeardDominated(v', start, end);
}
}
assert OnChordHeardDominatesId(v') by {
reveal OnChordHeardDominatesId();
}
// END
}
lemma InvImpliesSafety(v: Variables)
requires Inv(v)
ensures Safety(v)
{
// the solution gives a long proof here to try to explain what's going on, but
// only a little proof is strictly needed for Dafny
// SOLUTION
forall i: nat, j: nat | IsLeader(v, i) && IsLeader(v, j)
ensures i == j
{
assert forall k | v.ValidIdx(k) && Between(i, k, i) :: i != k;
assert v.highest_heard[j] == v.ids[j]; // it's a leader
// do this proof by contradiction
if i != j {
assert v.IsChord(i, i); // observe
assert Between(i, j, i);
UseChordDominated(v, i, i);
// here we have the contradiction already, because i and j can't dominate
// each others ids
assert false;
}
}
// END
}
| // We'll define "Between" to capture how the ring wraps around.
// SOLUTION
ghost predicate Between(start: nat, i: nat, end: nat)
{
if start < end then start < i < end
else i < end || start < i
}
lemma BetweenTests()
{
// when start >= end, behavior is a bit tricker
// before end
// after start
// not in this range
}
// END
// ids gives each node's (unique) identifier (address)
//
// highest_heard[i] is the highest other identifier the node at index i has
// heard about (or -1 if it has heard about nobody - note that -1 is not a valid identifier).
datatype Variables = Variables(ids: seq<nat>, highest_heard: seq<int>) {
ghost predicate ValidIdx(i: int) {
0<=i<|ids|
}
ghost predicate UniqueIds() {
forall i, j | ValidIdx(i) && ValidIdx(j) ::
ids[i]==ids[j] ==> i == j
}
ghost predicate WF()
{
&& 0 < |ids|
&& |ids| == |highest_heard|
}
// We'll define an important predicate for the inductive invariant.
// SOLUTION
// `end` thinks `start` is the highest
ghost predicate IsChord(start: nat, end: nat)
{
&& ValidIdx(start) && ValidIdx(end)
&& WF()
&& highest_heard[end] == ids[start]
}
// END
}
ghost predicate Init(v: Variables)
{
&& v.UniqueIds()
&& v.WF()
// Everyone begins having heard about nobody, not even themselves.
&& (forall i | v.ValidIdx(i) :: v.highest_heard[i] == -1)
}
ghost function max(a: int, b: int) : int {
if a > b then a else b
}
ghost function NextIdx(v: Variables, idx: nat) : nat
requires v.WF()
requires v.ValidIdx(idx)
{
// for demo we started with a definition using modulo (%), but this non-linear
// arithmetic is less friendly to Dafny's automation
// SOLUTION
if idx == |v.ids| - 1 then 0 else idx + 1
// END
}
// The destination of a transmission is determined by the ring topology
datatype Step = TransmissionStep(src: nat)
// This is an atomic step where src tells its neighbor (dst, computed here) the
// highest src has seen _and_ dst updates its local state to reflect receiving
// this message.
ghost predicate Transmission(v: Variables, v': Variables, step: Step)
requires step.TransmissionStep?
{
var src := step.src;
&& v.WF()
&& v.ValidIdx(src)
&& v'.ids == v.ids
// Neighbor address in ring.
&& var dst := NextIdx(v, src);
// src sends the max of its highest_heard value and its own id.
&& var message := max(v.highest_heard[src], v.ids[src]);
// dst only overwrites its highest_heard if the message is higher.
&& var dst_new_max := max(v.highest_heard[dst], message);
// demo has a bug here
// SOLUTION
&& v'.highest_heard == v.highest_heard[dst := dst_new_max]
// END
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
case TransmissionStep(_) => Transmission(v, v', step)
}
}
lemma NextStepDeterministicGivenStep(v: Variables, step: Step, v'1: Variables, v'2: Variables)
requires NextStep(v, v'1, step)
requires NextStep(v, v'2, step)
ensures v'1 == v'2
{}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
//////////////////////////////////////////////////////////////////////////////
// Spec (proof goal)
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsLeader(v: Variables, i: int)
requires v.WF()
{
&& v.ValidIdx(i)
&& v.highest_heard[i] == v.ids[i]
}
ghost predicate Safety(v: Variables)
requires v.WF()
{
forall i, j | IsLeader(v, i) && IsLeader(v, j) :: i == j
}
//////////////////////////////////////////////////////////////////////////////
// Proof
//////////////////////////////////////////////////////////////////////////////
// SOLUTION
ghost predicate ChordHeardDominated(v: Variables, start: nat, end: nat)
requires v.IsChord(start, end)
requires v.WF()
{
forall i | v.ValidIdx(i) && Between(start, i, end) ::
v.highest_heard[i] > v.ids[i]
}
// We make this opaque so Dafny does not use it automatically; instead we'll use
// the lemma UseChordDominated when needed. In many proofs opaqueness is a way
// to improve performance, since it prevents the automation from doing too much
// work; in this proof it's only so we can make clear in the proof when this
// invariant is being used.
ghost predicate {:opaque} OnChordHeardDominatesId(v: Variables)
requires v.WF()
{
forall start: nat, end: nat | v.IsChord(start, end) ::
ChordHeardDominated(v, start, end)
}
lemma UseChordDominated(v: Variables, start: nat, end: nat)
requires v.WF()
requires OnChordHeardDominatesId(v)
requires v.IsChord(start, end )
ensures ChordHeardDominated(v, start, end)
{
reveal OnChordHeardDominatesId();
}
// END
ghost predicate Inv(v: Variables)
{
&& v.WF()
// The solution will need more conjuncts
// SOLUTION
&& v.UniqueIds()
&& OnChordHeardDominatesId(v)
// Safety is not needed - we can prove it holds from the other invariants
// END
}
lemma InitImpliesInv(v: Variables)
requires Init(v)
ensures Inv(v)
{
// SOLUTION
forall start: nat, end: nat | v.IsChord(start, end)
ensures false {
}
reveal OnChordHeardDominatesId();
}
// END
}
lemma NextPreservesInv(v: Variables, v': Variables)
requires Inv(v)
requires Next(v, v')
ensures Inv(v')
{
var step :| NextStep(v, v', step);
// SOLUTION
var src := step.src;
var dst := NextIdx(v, src);
var message := max(v.highest_heard[src], v.ids[src]);
var dst_new_max := max(v.highest_heard[dst], message);
forall start: nat, end: nat | v'.IsChord(start, end)
ensures ChordHeardDominated(v', start, end)
{
if dst == end {
// the destination ignored the message anyway (because it already knew of a high enough node)
if dst_new_max == v.highest_heard[dst] {
UseChordDominated(v, start, end);
} else if v'.highest_heard[dst] == v.ids[src] {
// the new chord is empty, irrespective of the old state
} else if v'.highest_heard[end] == v.highest_heard[src] {
// extended a chord
UseChordDominated(v, start, src);
}
} else {
UseChordDominated(v, start, end);
}
}
reveal OnChordHeardDominatesId();
}
// END
}
lemma InvImpliesSafety(v: Variables)
requires Inv(v)
ensures Safety(v)
{
// the solution gives a long proof here to try to explain what's going on, but
// only a little proof is strictly needed for Dafny
// SOLUTION
forall i: nat, j: nat | IsLeader(v, i) && IsLeader(v, j)
ensures i == j
{
// do this proof by contradiction
if i != j {
UseChordDominated(v, i, i);
// here we have the contradiction already, because i and j can't dominate
// each others ids
}
}
// END
}
|
730 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch04_toy_consensus.dfy | // Ported from ivy/examples/ivy/toy_consensus.ivy.
// Ivy only supports first-order logic, which is limited (perhaps in surprising
// ways). In this model of consensus, we use some tricks to model quorums in
// first-order logic without getting into the arithmetic of why sets of n/2+1
// nodes intersect.
type Node(==)
type Quorum(==)
type Choice(==)
ghost predicate Member(n: Node, q: Quorum)
// axiom: any two quorums intersect in at least one node
// SOLUTION
// note we give this without proof: this is in general dangerous! However, here
// we believe it is possible to have Node and Quorum types with this property.
//
// The way we might realize that is to have Node be a finite type (one value for
// each node in the system) and Quorum to capture any subset with strictly more
// than half the nodes. Such a setup guarantees that any two quorums intersect.
// END
lemma {:axiom} QuorumIntersect(q1: Quorum, q2: Quorum) returns (n: Node)
ensures Member(n, q1) && Member(n, q2)
datatype Variables = Variables(
votes: map<Node, set<Choice>>,
// this is one reason why this is "toy" consensus: the decision is a global
// variable rather than being decided at each node individually
decision: set<Choice>
)
{
ghost predicate WF()
{
&& (forall n:Node :: n in votes)
}
}
datatype Step =
| CastVoteStep(n: Node, c: Choice)
| DecideStep(c: Choice, q: Quorum)
ghost predicate CastVote(v: Variables, v': Variables, step: Step)
requires v.WF()
requires step.CastVoteStep?
{
var n := step.n;
&& (v.votes[n] == {})
// learn to read these "functional updates" of maps/sequences:
// this is like v.votes[n] += {step.c} if that was supported
&& (v' == v.(votes := v.votes[n := v.votes[n] + {step.c}]))
}
ghost predicate Decide(v: Variables, v': Variables, step: Step)
requires v.WF()
requires step.DecideStep?
{
// if all nodes of a quorum have voted for a value, then that value can be a
// decision
&& (forall n: Node | Member(n, step.q) :: step.c in v.votes[n])
&& v' == v.(decision := v.decision + {step.c})
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
&& v.WF()
&& match step {
case CastVoteStep(_, _) => CastVote(v, v', step)
case DecideStep(_, _) => Decide(v, v', step)
}
}
lemma NextStepDeterministicGivenStep(v: Variables, step: Step, v'1: Variables, v'2: Variables)
requires NextStep(v, v'1, step)
requires NextStep(v, v'2, step)
ensures v'1 == v'2
{
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
ghost predicate Init(v: Variables) {
&& v.WF()
&& (forall n :: v.votes[n] == {})
&& v.decision == {}
}
ghost predicate Safety(v: Variables) {
forall c1, c2 :: c1 in v.decision && c2 in v.decision ==> c1 == c2
}
// SOLUTION
ghost predicate ChoiceQuorum(v: Variables, q: Quorum, c: Choice)
requires v.WF()
{
forall n :: Member(n, q) ==> c in v.votes[n]
}
ghost predicate Inv(v: Variables) {
&& v.WF()
&& Safety(v)
&& (forall n, v1, v2 :: v1 in v.votes[n] && v2 in v.votes[n] ==> v1 == v2)
&& (forall c :: c in v.decision ==> exists q:Quorum :: ChoiceQuorum(v, q, c))
}
// END
lemma InitImpliesInv(v: Variables)
requires Init(v)
ensures Inv(v)
{}
lemma InvInductive(v: Variables, v': Variables)
requires Inv(v)
requires Next(v, v')
ensures Inv(v')
{
var step :| NextStep(v, v', step);
// SOLUTION
match step {
case CastVoteStep(n, c) => {
forall c | c in v'.decision
ensures exists q:Quorum :: ChoiceQuorum(v', q, c)
{
var q :| ChoiceQuorum(v, q, c);
assert ChoiceQuorum(v', q, c);
}
return;
}
case DecideStep(c, q) => {
forall c | c in v'.decision
ensures exists q:Quorum :: ChoiceQuorum(v', q, c)
{
var q0 :| ChoiceQuorum(v, q0, c);
assert ChoiceQuorum(v', q0, c);
}
forall c1, c2 | c1 in v'.decision && c2 in v'.decision
ensures c1 == c2
{
var q1 :| ChoiceQuorum(v, q1, c1);
var q2 :| ChoiceQuorum(v, q2, c2);
var n := QuorumIntersect(q1, q2);
}
assert Safety(v');
return;
}
}
// END
}
lemma SafetyHolds(v: Variables, v': Variables)
ensures Init(v) ==> Inv(v)
ensures Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(v) ==> Safety(v)
{
if Inv(v) && Next(v, v') {
InvInductive(v, v');
}
}
| // Ported from ivy/examples/ivy/toy_consensus.ivy.
// Ivy only supports first-order logic, which is limited (perhaps in surprising
// ways). In this model of consensus, we use some tricks to model quorums in
// first-order logic without getting into the arithmetic of why sets of n/2+1
// nodes intersect.
type Node(==)
type Quorum(==)
type Choice(==)
ghost predicate Member(n: Node, q: Quorum)
// axiom: any two quorums intersect in at least one node
// SOLUTION
// note we give this without proof: this is in general dangerous! However, here
// we believe it is possible to have Node and Quorum types with this property.
//
// The way we might realize that is to have Node be a finite type (one value for
// each node in the system) and Quorum to capture any subset with strictly more
// than half the nodes. Such a setup guarantees that any two quorums intersect.
// END
lemma {:axiom} QuorumIntersect(q1: Quorum, q2: Quorum) returns (n: Node)
ensures Member(n, q1) && Member(n, q2)
datatype Variables = Variables(
votes: map<Node, set<Choice>>,
// this is one reason why this is "toy" consensus: the decision is a global
// variable rather than being decided at each node individually
decision: set<Choice>
)
{
ghost predicate WF()
{
&& (forall n:Node :: n in votes)
}
}
datatype Step =
| CastVoteStep(n: Node, c: Choice)
| DecideStep(c: Choice, q: Quorum)
ghost predicate CastVote(v: Variables, v': Variables, step: Step)
requires v.WF()
requires step.CastVoteStep?
{
var n := step.n;
&& (v.votes[n] == {})
// learn to read these "functional updates" of maps/sequences:
// this is like v.votes[n] += {step.c} if that was supported
&& (v' == v.(votes := v.votes[n := v.votes[n] + {step.c}]))
}
ghost predicate Decide(v: Variables, v': Variables, step: Step)
requires v.WF()
requires step.DecideStep?
{
// if all nodes of a quorum have voted for a value, then that value can be a
// decision
&& (forall n: Node | Member(n, step.q) :: step.c in v.votes[n])
&& v' == v.(decision := v.decision + {step.c})
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
&& v.WF()
&& match step {
case CastVoteStep(_, _) => CastVote(v, v', step)
case DecideStep(_, _) => Decide(v, v', step)
}
}
lemma NextStepDeterministicGivenStep(v: Variables, step: Step, v'1: Variables, v'2: Variables)
requires NextStep(v, v'1, step)
requires NextStep(v, v'2, step)
ensures v'1 == v'2
{
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
ghost predicate Init(v: Variables) {
&& v.WF()
&& (forall n :: v.votes[n] == {})
&& v.decision == {}
}
ghost predicate Safety(v: Variables) {
forall c1, c2 :: c1 in v.decision && c2 in v.decision ==> c1 == c2
}
// SOLUTION
ghost predicate ChoiceQuorum(v: Variables, q: Quorum, c: Choice)
requires v.WF()
{
forall n :: Member(n, q) ==> c in v.votes[n]
}
ghost predicate Inv(v: Variables) {
&& v.WF()
&& Safety(v)
&& (forall n, v1, v2 :: v1 in v.votes[n] && v2 in v.votes[n] ==> v1 == v2)
&& (forall c :: c in v.decision ==> exists q:Quorum :: ChoiceQuorum(v, q, c))
}
// END
lemma InitImpliesInv(v: Variables)
requires Init(v)
ensures Inv(v)
{}
lemma InvInductive(v: Variables, v': Variables)
requires Inv(v)
requires Next(v, v')
ensures Inv(v')
{
var step :| NextStep(v, v', step);
// SOLUTION
match step {
case CastVoteStep(n, c) => {
forall c | c in v'.decision
ensures exists q:Quorum :: ChoiceQuorum(v', q, c)
{
var q :| ChoiceQuorum(v, q, c);
}
return;
}
case DecideStep(c, q) => {
forall c | c in v'.decision
ensures exists q:Quorum :: ChoiceQuorum(v', q, c)
{
var q0 :| ChoiceQuorum(v, q0, c);
}
forall c1, c2 | c1 in v'.decision && c2 in v'.decision
ensures c1 == c2
{
var q1 :| ChoiceQuorum(v, q1, c1);
var q2 :| ChoiceQuorum(v, q2, c2);
var n := QuorumIntersect(q1, q2);
}
return;
}
}
// END
}
lemma SafetyHolds(v: Variables, v': Variables)
ensures Init(v) ==> Inv(v)
ensures Inv(v) && Next(v, v') ==> Inv(v')
ensures Inv(v) ==> Safety(v)
{
if Inv(v) && Next(v, v') {
InvInductive(v, v');
}
}
|
731 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_ch06_refinement_proof.dfy | // Analogous to ch04/invariant_proof.dfy, we show what the conditions on a
// refinement (an abstraction function, invariant, an initial condition, and an
// inductive property)
module Types {
type Event(==, 0, !new)
}
import opened Types
module Code {
import opened Types
type Variables(==, 0, !new)
ghost predicate Init(v: Variables)
ghost predicate Next(v: Variables, v': Variables, ev: Event)
ghost predicate IsBehavior(tr: nat -> Event) {
exists ss: nat -> Variables ::
&& Init(ss(0))
&& forall n: nat :: Next(ss(n), ss(n + 1), tr(n))
}
}
module Spec {
import opened Types
type Variables(==, 0, !new)
ghost predicate Init(v: Variables)
ghost predicate Next(v: Variables, v': Variables, ev: Event)
ghost predicate IsBehavior(tr: nat -> Event) {
exists ss: nat -> Variables ::
&& Init(ss(0))
&& forall n: nat :: Next(ss(n), ss(n + 1), tr(n))
}
}
// The proof of refinement is based on supplying these two pieces of data. Note
// that they don't appear in the final statement of Refinement; they're only the
// evidence that shows how to demonstrate refinement one step at a time.
ghost predicate Inv(v: Code.Variables)
ghost function Abstraction(v: Code.Variables): Spec.Variables
// These two properties of the abstraction are sometimes called a "forward
// simulation", to distinguish them from refinement which is the property we're
// trying to achieve. (There is also an analogous "backward simulation" that
// works in the reverse direction of execution and is more complicated - we
// won't need it).
lemma {:axiom} AbstractionInit(v: Code.Variables)
requires Code.Init(v)
ensures Inv(v)
ensures Spec.Init(Abstraction(v))
lemma {:axiom} AbstractionInductive(v: Code.Variables, v': Code.Variables, ev: Event)
requires Inv(v)
requires Code.Next(v, v', ev)
ensures Inv(v')
ensures Spec.Next(Abstraction(v), Abstraction(v'), ev)
// InvAt is a helper lemma to show the invariant always holds using Dafny
// induction.
lemma InvAt(tr: nat -> Event, ss: nat -> Code.Variables, i: nat)
requires Code.Init(ss(0))
requires forall k:nat :: Code.Next(ss(k), ss(k + 1), tr(k))
ensures Inv(ss(i))
{
if i == 0 {
AbstractionInit(ss(0));
} else {
InvAt(tr, ss, i - 1);
AbstractionInductive(ss(i - 1), ss(i), tr(i - 1));
}
}
// RefinementTo is a helper lemma to prove refinement inductively (for a
// specific sequence of states).
lemma RefinementTo(tr: nat -> Event, ss: nat -> Code.Variables, i: nat)
requires forall n: nat :: Code.Next(ss(n), ss(n + 1), tr(n))
requires forall n: nat :: Inv(ss(n))
ensures
var ss' := (j: nat) => Abstraction(ss(j));
&& forall n: nat | n < i :: Spec.Next(ss'(n), ss'(n + 1), tr(n))
{
if i == 0 {
return;
} else {
var ss' := (j: nat) => Abstraction(ss(j));
RefinementTo(tr, ss, i - 1);
AbstractionInductive(ss(i - 1), ss(i), tr(i - 1));
}
}
// Refinement is the key property we use the abstraction and forward simulation
// to prove.
lemma Refinement(tr: nat -> Event)
requires Code.IsBehavior(tr)
ensures Spec.IsBehavior(tr)
{
var ss: nat -> Code.Variables :|
&& Code.Init(ss(0))
&& forall n: nat :: Code.Next(ss(n), ss(n + 1), tr(n));
forall i: nat
ensures Inv(ss(i)) {
InvAt(tr, ss, i);
}
var ss': nat -> Spec.Variables :=
(i: nat) => Abstraction(ss(i));
assert Spec.Init(ss'(0)) by {
AbstractionInit(ss(0));
}
forall n: nat
ensures Spec.Next(ss'(n), ss'(n + 1), tr(n))
{
RefinementTo(tr, ss, n+1);
}
}
| // Analogous to ch04/invariant_proof.dfy, we show what the conditions on a
// refinement (an abstraction function, invariant, an initial condition, and an
// inductive property)
module Types {
type Event(==, 0, !new)
}
import opened Types
module Code {
import opened Types
type Variables(==, 0, !new)
ghost predicate Init(v: Variables)
ghost predicate Next(v: Variables, v': Variables, ev: Event)
ghost predicate IsBehavior(tr: nat -> Event) {
exists ss: nat -> Variables ::
&& Init(ss(0))
&& forall n: nat :: Next(ss(n), ss(n + 1), tr(n))
}
}
module Spec {
import opened Types
type Variables(==, 0, !new)
ghost predicate Init(v: Variables)
ghost predicate Next(v: Variables, v': Variables, ev: Event)
ghost predicate IsBehavior(tr: nat -> Event) {
exists ss: nat -> Variables ::
&& Init(ss(0))
&& forall n: nat :: Next(ss(n), ss(n + 1), tr(n))
}
}
// The proof of refinement is based on supplying these two pieces of data. Note
// that they don't appear in the final statement of Refinement; they're only the
// evidence that shows how to demonstrate refinement one step at a time.
ghost predicate Inv(v: Code.Variables)
ghost function Abstraction(v: Code.Variables): Spec.Variables
// These two properties of the abstraction are sometimes called a "forward
// simulation", to distinguish them from refinement which is the property we're
// trying to achieve. (There is also an analogous "backward simulation" that
// works in the reverse direction of execution and is more complicated - we
// won't need it).
lemma {:axiom} AbstractionInit(v: Code.Variables)
requires Code.Init(v)
ensures Inv(v)
ensures Spec.Init(Abstraction(v))
lemma {:axiom} AbstractionInductive(v: Code.Variables, v': Code.Variables, ev: Event)
requires Inv(v)
requires Code.Next(v, v', ev)
ensures Inv(v')
ensures Spec.Next(Abstraction(v), Abstraction(v'), ev)
// InvAt is a helper lemma to show the invariant always holds using Dafny
// induction.
lemma InvAt(tr: nat -> Event, ss: nat -> Code.Variables, i: nat)
requires Code.Init(ss(0))
requires forall k:nat :: Code.Next(ss(k), ss(k + 1), tr(k))
ensures Inv(ss(i))
{
if i == 0 {
AbstractionInit(ss(0));
} else {
InvAt(tr, ss, i - 1);
AbstractionInductive(ss(i - 1), ss(i), tr(i - 1));
}
}
// RefinementTo is a helper lemma to prove refinement inductively (for a
// specific sequence of states).
lemma RefinementTo(tr: nat -> Event, ss: nat -> Code.Variables, i: nat)
requires forall n: nat :: Code.Next(ss(n), ss(n + 1), tr(n))
requires forall n: nat :: Inv(ss(n))
ensures
var ss' := (j: nat) => Abstraction(ss(j));
&& forall n: nat | n < i :: Spec.Next(ss'(n), ss'(n + 1), tr(n))
{
if i == 0 {
return;
} else {
var ss' := (j: nat) => Abstraction(ss(j));
RefinementTo(tr, ss, i - 1);
AbstractionInductive(ss(i - 1), ss(i), tr(i - 1));
}
}
// Refinement is the key property we use the abstraction and forward simulation
// to prove.
lemma Refinement(tr: nat -> Event)
requires Code.IsBehavior(tr)
ensures Spec.IsBehavior(tr)
{
var ss: nat -> Code.Variables :|
&& Code.Init(ss(0))
&& forall n: nat :: Code.Next(ss(n), ss(n + 1), tr(n));
forall i: nat
ensures Inv(ss(i)) {
InvAt(tr, ss, i);
}
var ss': nat -> Spec.Variables :=
(i: nat) => Abstraction(ss(i));
AbstractionInit(ss(0));
}
forall n: nat
ensures Spec.Next(ss'(n), ss'(n + 1), tr(n))
{
RefinementTo(tr, ss, n+1);
}
}
|
732 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_dafny-internals_02-triggers_triggers2.dfy | function f(x: int): int
function ff(x: int): int
lemma {:axiom} ff_eq()
ensures forall x {:trigger ff(x)} :: ff(x) == f(f(x))
lemma {:axiom} ff_eq2()
ensures forall x {:trigger f(f(x))} :: ff(x) == f(f(x))
lemma {:axiom} ff_eq_bad()
// dafny ignores this trigger because it's an obvious loop
ensures forall x {:trigger {f(x)}} :: ff(x) == f(f(x))
lemma use_ff(x: int)
{
ff_eq();
assert f(ff(x)) == ff(f(x));
}
lemma use_ff2(x: int)
{
ff_eq2();
assert f(f(x)) == ff(x);
assert f(ff(x)) == ff(f(x));
}
| function f(x: int): int
function ff(x: int): int
lemma {:axiom} ff_eq()
ensures forall x {:trigger ff(x)} :: ff(x) == f(f(x))
lemma {:axiom} ff_eq2()
ensures forall x {:trigger f(f(x))} :: ff(x) == f(f(x))
lemma {:axiom} ff_eq_bad()
// dafny ignores this trigger because it's an obvious loop
ensures forall x {:trigger {f(x)}} :: ff(x) == f(f(x))
lemma use_ff(x: int)
{
ff_eq();
}
lemma use_ff2(x: int)
{
ff_eq2();
}
|
733 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_dafny-internals_03-encoding_lemma_call.dfy | function f(x: int): int
lemma {:axiom} f_positive(x: int)
requires x >= 0
ensures f(x) >= 0
lemma f_2_pos()
ensures f(2) >= 0
{
f_positive(2);
}
lemma f_1_1_pos()
ensures f(1 + 1) >= 0
{
f_2_pos();
assert 1 + 1 == 2;
}
| function f(x: int): int
lemma {:axiom} f_positive(x: int)
requires x >= 0
ensures f(x) >= 0
lemma f_2_pos()
ensures f(2) >= 0
{
f_positive(2);
}
lemma f_1_1_pos()
ensures f(1 + 1) >= 0
{
f_2_pos();
}
|
734 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_demos_dafny-internals_03-encoding_pair.dfy | // based on https://ethz.ch/content/dam/ethz/special-interest/infk/chair-program-method/pm/documents/Education/Courses/SS2019/Program%20Verification/05-EncodingToSMT.pdf
module DafnyVersion {
datatype Pair = Pair(x: int, y: int)
function pair_x(p: Pair): int {
p.x
}
function pair_y(p: Pair): int {
p.y
}
lemma UsePair() {
assert Pair(1, 2) != Pair(2, 1);
var p := Pair(1, 2);
assert pair_x(p) + pair_y(p) == 3;
assert forall p1, p2 :: pair_x(p1) == pair_x(p2) && pair_y(p1) == pair_y(p2) ==> p1 == p2;
}
}
// Dafny encodes pairs to SMT by emitting the SMT equivalent of the following.
module Encoding {
// We define the new type as a new "sort" in SMT. This will create a new type
// but not give any way to construct or use it.
type Pair(==)
// Then we define _uninterpreted functions_ for all the operations on the
// type. These are all the implicit operations on a DafnyVersion.Pair:
function pair(x: int, y: int): Pair
function pair_x(p: Pair): int
function pair_y(p: Pair): int
// Finally (and this is the interesting bit) we define _axioms_ that assume
// the uninterpreted functions have the expected properties. Getting the
// axioms right is a bit of an art in that we want sound and minimal axioms,
// ones that are efficient for the solver, and we want to fully characterize
// pairs so that proofs go through.
lemma {:axiom} x_defn()
ensures forall x, y :: pair_x(pair(x, y)) == x
lemma {:axiom} y_defn()
ensures forall x, y :: pair_y(pair(x, y)) == y
lemma {:axiom} bijection()
ensures forall p:Pair :: pair(pair_x(p), pair_y(p)) == p
lemma UseEncoding() {
x_defn();
y_defn();
bijection();
assert pair(1, 2) != pair(2, 1) by {
x_defn();
}
assert pair_y(pair(1, 2)) == 2 by {
y_defn();
}
assert forall p1, p2 |
pair_x(p1) == pair_x(p2) && pair_y(p1) == pair_y(p2)
:: p1 == p2 by {
bijection();
}
}
// Exercises to think about:
// How exactly are the axioms being used in each proof above?
// What happens if we remove the bijection axiom?
// Can you think of other properties wee would expect?
// Are we missing any axioms? How would you know? (hard)
}
| // based on https://ethz.ch/content/dam/ethz/special-interest/infk/chair-program-method/pm/documents/Education/Courses/SS2019/Program%20Verification/05-EncodingToSMT.pdf
module DafnyVersion {
datatype Pair = Pair(x: int, y: int)
function pair_x(p: Pair): int {
p.x
}
function pair_y(p: Pair): int {
p.y
}
lemma UsePair() {
var p := Pair(1, 2);
}
}
// Dafny encodes pairs to SMT by emitting the SMT equivalent of the following.
module Encoding {
// We define the new type as a new "sort" in SMT. This will create a new type
// but not give any way to construct or use it.
type Pair(==)
// Then we define _uninterpreted functions_ for all the operations on the
// type. These are all the implicit operations on a DafnyVersion.Pair:
function pair(x: int, y: int): Pair
function pair_x(p: Pair): int
function pair_y(p: Pair): int
// Finally (and this is the interesting bit) we define _axioms_ that assume
// the uninterpreted functions have the expected properties. Getting the
// axioms right is a bit of an art in that we want sound and minimal axioms,
// ones that are efficient for the solver, and we want to fully characterize
// pairs so that proofs go through.
lemma {:axiom} x_defn()
ensures forall x, y :: pair_x(pair(x, y)) == x
lemma {:axiom} y_defn()
ensures forall x, y :: pair_y(pair(x, y)) == y
lemma {:axiom} bijection()
ensures forall p:Pair :: pair(pair_x(p), pair_y(p)) == p
lemma UseEncoding() {
x_defn();
y_defn();
bijection();
x_defn();
}
y_defn();
}
pair_x(p1) == pair_x(p2) && pair_y(p1) == pair_y(p2)
:: p1 == p2 by {
bijection();
}
}
// Exercises to think about:
// How exactly are the axioms being used in each proof above?
// What happens if we remove the bijection axiom?
// Can you think of other properties wee would expect?
// Are we missing any axioms? How would you know? (hard)
}
|
735 | protocol-verification-fa2023_tmp_tmpw6hy3mjp_exercises_chapter04-invariants_ch03exercise03.dfy |
// Model a lock service that consists of a single server and an
// arbitrary number of clients.
//
// The state of the system includes the server's state (whether the server
// knows that some client holds the lock, and if so which one)
// and the clients' states (for each client, whether that client knows
// it holds the lock).
//
// The system should begin with the server holding the lock.
// An acquire step atomically transfers the lock from the server to some client.
// (Note that we're not modeling the network yet -- the lock disappears from
// the server and appears at a client in a single atomic transition.)
// A release step atomically transfers the lock from the client back to the server.
//
// The safety property is that no two clients ever hold the lock
// simultaneously.
// SOLUTION
datatype ServerGrant = Unlocked | Client(id: nat)
datatype ClientRecord = Released | Acquired
datatype Variables = Variables(
clientCount: nat, /* constant */
server: ServerGrant, clients: seq<ClientRecord>
) {
ghost predicate ValidIdx(idx: int) {
0 <= idx < this.clientCount
}
ghost predicate WellFormed() {
|clients| == this.clientCount
}
}
// END
ghost predicate Init(v:Variables) {
&& v.WellFormed()
// SOLUTION
&& v.server.Unlocked?
&& |v.clients| == v.clientCount
&& forall i | 0 <= i < |v.clients| :: v.clients[i].Released?
// END
}
// SOLUTION
ghost predicate Acquire(v:Variables, v':Variables, id:int) {
&& v.WellFormed()
&& v'.WellFormed()
&& v.ValidIdx(id)
&& v.server.Unlocked?
&& v'.server == Client(id)
&& v'.clients == v.clients[id := Acquired]
&& v'.clientCount == v.clientCount
}
ghost predicate Release(v:Variables, v':Variables, id:int) {
&& v.WellFormed()
&& v'.WellFormed()
&& v.ValidIdx(id)
&& v.clients[id].Acquired?
&& v'.server.Unlocked?
&& v'.clients == v.clients[id := Released]
&& v'.clientCount == v.clientCount
}
// END
// Jay-Normal-Form: pack all the nondeterminism into a single object
// that gets there-exist-ed once.
datatype Step =
// SOLUTION
| AcquireStep(id: int)
| ReleaseStep(id: int)
// END
ghost predicate NextStep(v:Variables, v':Variables, step: Step) {
match step
// SOLUTION
case AcquireStep(id) => Acquire(v, v', id)
case ReleaseStep(id) => Release(v, v', id)
// END
}
lemma NextStepDeterministicGivenStep(v:Variables, v':Variables, step: Step)
requires NextStep(v, v', step)
ensures forall v'' | NextStep(v, v'', step) :: v' == v''
{}
ghost predicate Next(v:Variables, v':Variables) {
exists step :: NextStep(v, v', step)
}
// A good definition of safety for the lock server is that no two clients
// may hold the lock simultaneously. This predicate should capture that
// idea in terms of the Variables you have defined.
ghost predicate Safety(v:Variables) {
// SOLUTION
// HAND-GRADE: The examiner must read the definition of Variables and confirm
// that this predicate captures the semantics in the comment at the top of the
// predicate.
forall i,j |
&& 0 <= i < |v.clients|
&& 0 <= j < |v.clients|
&& v.clients[i].Acquired?
&& v.clients[j].Acquired?
:: i == j
// END
}
// This predicate should be true if and only if the client with index `clientIndex`
// has the lock acquired.
// Since you defined the Variables state, you must define this predicate in
// those terms.
ghost predicate ClientHoldsLock(v: Variables, clientIndex: nat)
requires v.WellFormed()
{
// SOLUTION
&& v.server == Client(clientIndex)
// END
}
// Show a behavior that the system can release a lock from clientA and deliver
// it to clientB.
lemma PseudoLiveness(clientA:nat, clientB:nat) returns (behavior:seq<Variables>)
requires clientA == 2
requires clientB == 0
ensures 2 <= |behavior| // precondition for index operators below
ensures Init(behavior[0])
ensures forall i | 0 <= i < |behavior|-1 :: Next(behavior[i], behavior[i+1]) // Behavior satisfies your state machine
ensures forall i | 0 <= i < |behavior| :: Safety(behavior[i]) // Behavior always satisfies the Safety predicate
ensures behavior[|behavior|-1].WellFormed() // precondition for calling ClientHoldsLock
ensures ClientHoldsLock(behavior[1], clientA) // first clientA acquires lock
ensures ClientHoldsLock(behavior[|behavior|-1], clientB) // eventually clientB acquires lock
{
// SOLUTION
var state0 := Variables(clientCount := 3, server := Unlocked, clients := [Released, Released, Released]);
var state1 := Variables(clientCount := 3, server := Client(clientA), clients := [Released, Released, Acquired]);
var state2 := Variables(clientCount := 3, server := Unlocked, clients := [Released, Released, Released]);
var state3 := Variables(clientCount := 3, server := Client(clientB), clients := [Acquired, Released, Released]);
assert NextStep(state0, state1, AcquireStep(clientA));
assert Release(state1, state2, 2);
assert NextStep(state1, state2, ReleaseStep(clientA)); // witness
assert NextStep(state2, state3, AcquireStep(clientB)); // witness
behavior := [state0, state1, state2, state3];
// END
}
|
// Model a lock service that consists of a single server and an
// arbitrary number of clients.
//
// The state of the system includes the server's state (whether the server
// knows that some client holds the lock, and if so which one)
// and the clients' states (for each client, whether that client knows
// it holds the lock).
//
// The system should begin with the server holding the lock.
// An acquire step atomically transfers the lock from the server to some client.
// (Note that we're not modeling the network yet -- the lock disappears from
// the server and appears at a client in a single atomic transition.)
// A release step atomically transfers the lock from the client back to the server.
//
// The safety property is that no two clients ever hold the lock
// simultaneously.
// SOLUTION
datatype ServerGrant = Unlocked | Client(id: nat)
datatype ClientRecord = Released | Acquired
datatype Variables = Variables(
clientCount: nat, /* constant */
server: ServerGrant, clients: seq<ClientRecord>
) {
ghost predicate ValidIdx(idx: int) {
0 <= idx < this.clientCount
}
ghost predicate WellFormed() {
|clients| == this.clientCount
}
}
// END
ghost predicate Init(v:Variables) {
&& v.WellFormed()
// SOLUTION
&& v.server.Unlocked?
&& |v.clients| == v.clientCount
&& forall i | 0 <= i < |v.clients| :: v.clients[i].Released?
// END
}
// SOLUTION
ghost predicate Acquire(v:Variables, v':Variables, id:int) {
&& v.WellFormed()
&& v'.WellFormed()
&& v.ValidIdx(id)
&& v.server.Unlocked?
&& v'.server == Client(id)
&& v'.clients == v.clients[id := Acquired]
&& v'.clientCount == v.clientCount
}
ghost predicate Release(v:Variables, v':Variables, id:int) {
&& v.WellFormed()
&& v'.WellFormed()
&& v.ValidIdx(id)
&& v.clients[id].Acquired?
&& v'.server.Unlocked?
&& v'.clients == v.clients[id := Released]
&& v'.clientCount == v.clientCount
}
// END
// Jay-Normal-Form: pack all the nondeterminism into a single object
// that gets there-exist-ed once.
datatype Step =
// SOLUTION
| AcquireStep(id: int)
| ReleaseStep(id: int)
// END
ghost predicate NextStep(v:Variables, v':Variables, step: Step) {
match step
// SOLUTION
case AcquireStep(id) => Acquire(v, v', id)
case ReleaseStep(id) => Release(v, v', id)
// END
}
lemma NextStepDeterministicGivenStep(v:Variables, v':Variables, step: Step)
requires NextStep(v, v', step)
ensures forall v'' | NextStep(v, v'', step) :: v' == v''
{}
ghost predicate Next(v:Variables, v':Variables) {
exists step :: NextStep(v, v', step)
}
// A good definition of safety for the lock server is that no two clients
// may hold the lock simultaneously. This predicate should capture that
// idea in terms of the Variables you have defined.
ghost predicate Safety(v:Variables) {
// SOLUTION
// HAND-GRADE: The examiner must read the definition of Variables and confirm
// that this predicate captures the semantics in the comment at the top of the
// predicate.
forall i,j |
&& 0 <= i < |v.clients|
&& 0 <= j < |v.clients|
&& v.clients[i].Acquired?
&& v.clients[j].Acquired?
:: i == j
// END
}
// This predicate should be true if and only if the client with index `clientIndex`
// has the lock acquired.
// Since you defined the Variables state, you must define this predicate in
// those terms.
ghost predicate ClientHoldsLock(v: Variables, clientIndex: nat)
requires v.WellFormed()
{
// SOLUTION
&& v.server == Client(clientIndex)
// END
}
// Show a behavior that the system can release a lock from clientA and deliver
// it to clientB.
lemma PseudoLiveness(clientA:nat, clientB:nat) returns (behavior:seq<Variables>)
requires clientA == 2
requires clientB == 0
ensures 2 <= |behavior| // precondition for index operators below
ensures Init(behavior[0])
ensures forall i | 0 <= i < |behavior|-1 :: Next(behavior[i], behavior[i+1]) // Behavior satisfies your state machine
ensures forall i | 0 <= i < |behavior| :: Safety(behavior[i]) // Behavior always satisfies the Safety predicate
ensures behavior[|behavior|-1].WellFormed() // precondition for calling ClientHoldsLock
ensures ClientHoldsLock(behavior[1], clientA) // first clientA acquires lock
ensures ClientHoldsLock(behavior[|behavior|-1], clientB) // eventually clientB acquires lock
{
// SOLUTION
var state0 := Variables(clientCount := 3, server := Unlocked, clients := [Released, Released, Released]);
var state1 := Variables(clientCount := 3, server := Client(clientA), clients := [Released, Released, Acquired]);
var state2 := Variables(clientCount := 3, server := Unlocked, clients := [Released, Released, Released]);
var state3 := Variables(clientCount := 3, server := Client(clientB), clients := [Acquired, Released, Released]);
behavior := [state0, state1, state2, state3];
// END
}
|
736 | pucrs-metodos-formais-t1_tmp_tmp7gvq3cw4_fila.dfy | /*
OK fila de tamanho ilimitado com arrays circulares
OK representação ghost: coleção de elementos da fila e qualquer outra informação necessária
OK predicate: invariante da representação abstrata associada à coleção do tipo fila
Operações
- OK construtor inicia fila fazia
- OK adicionar novo elemento na fila -> enfileira()
- OK remover um elemento da fila e retornar seu valor caso a fila contenha elementos -> desenfileira()
- OK verificar se um elemento pertence a fila -> contem()
- OK retornar numero de elementos da fila -> tamanho()
- OK verificar se a fila é vazia ou não -> estaVazia()
- OK concatenar duas filas retornando uma nova fila sem alterar nenhuma das outras -> concat()
OK criar método main testando a implementação
OK transformar uso de naturais para inteiros
*/
class {:autocontracts} Fila
{
var a: array<int>;
var cauda: nat;
const defaultSize: nat;
ghost var Conteudo: seq<int>;
// invariante
ghost predicate Valid() {
defaultSize > 0
&& a.Length >= defaultSize
&& 0 <= cauda <= a.Length
&& Conteudo == a[0..cauda]
}
// inicia fila com 3 elementos
constructor ()
ensures Conteudo == []
ensures defaultSize == 3
ensures a.Length == 3
ensures fresh(a)
{
defaultSize := 3;
a := new int[3];
cauda := 0;
Conteudo := [];
}
function tamanho():nat
ensures tamanho() == |Conteudo|
{
cauda
}
function estaVazia(): bool
ensures estaVazia() <==> |Conteudo| == 0
{
cauda == 0
}
method enfileira(e:int)
ensures Conteudo == old(Conteudo) + [e]
{
if (cauda == a.Length) {
var novoArray := new int[cauda + defaultSize];
var i := 0;
forall i | 0 <= i < a.Length
{
novoArray[i] := a[i];
}
a := novoArray;
}
a[cauda] := e;
cauda := cauda + 1;
Conteudo := Conteudo + [e];
}
method desenfileira() returns (e:int)
requires |Conteudo| > 0
ensures e == old(Conteudo)[0]
ensures Conteudo == old(Conteudo)[1..]
{
e := a[0];
cauda := cauda - 1;
forall i | 0 <= i < cauda
{
a[i] := a[i+1];
}
Conteudo := a[0..cauda];
}
method contem(e: int) returns (r:bool)
ensures r <==> exists i :: 0 <= i < cauda && e == a[i]
{
var i := 0;
r:= false;
while i < cauda
invariant 0 <= i <= cauda
invariant forall j: nat :: j < i ==> a[j] != e
{
if (a[i] == e) {
r:= true;
return;
}
i := i + 1;
}
return r;
}
method concat(f2: Fila) returns (r: Fila)
requires Valid()
requires f2.Valid()
ensures r.Conteudo == Conteudo + f2.Conteudo
{
r := new Fila();
var i:= 0;
while i < cauda
invariant 0 <= i <= cauda
invariant 0 <= i <= r.cauda
invariant r.cauda <= r.a.Length
invariant fresh(r.Repr)
invariant r.Valid()
invariant r.Conteudo == Conteudo[0..i]
{
var valor := a[i];
r.enfileira(valor);
i := i + 1;
}
var j := 0;
while j < f2.cauda
invariant 0 <= j <= f2.cauda
invariant 0 <= j <= r.cauda
invariant r.cauda <= r.a.Length
invariant fresh(r.Repr)
invariant r.Valid()
invariant r.Conteudo == Conteudo + f2.Conteudo[0..j]
{
var valor := f2.a[j];
r.enfileira(valor);
j := j + 1;
}
}
}
method Main()
{
var fila := new Fila();
// enfileira deve alocar mais espaço
fila.enfileira(1);
fila.enfileira(2);
fila.enfileira(3);
fila.enfileira(4);
assert fila.Conteudo == [1, 2, 3, 4];
// tamanho
var q := fila.tamanho();
assert q == 4;
// desenfileira
var e := fila.desenfileira();
assert e == 1;
assert fila.Conteudo == [2, 3, 4];
assert fila.tamanho() == 3;
// contem
assert fila.Conteudo == [2, 3, 4];
var r := fila.contem(1);
assert r == false;
assert fila.a[0] == 2;
var r2 := fila.contem(2);
assert r2 == true;
// estaVazia
var vazia := fila.estaVazia();
assert vazia == false;
var outraFila := new Fila();
vazia := outraFila.estaVazia();
assert vazia == true;
// concat
assert fila.Conteudo == [2, 3, 4];
outraFila.enfileira(5);
outraFila.enfileira(6);
outraFila.enfileira(7);
assert outraFila.Conteudo == [5, 6, 7];
var concatenada := fila.concat(outraFila);
assert concatenada.Conteudo == [2,3,4,5,6,7];
}
| /*
OK fila de tamanho ilimitado com arrays circulares
OK representação ghost: coleção de elementos da fila e qualquer outra informação necessária
OK predicate: invariante da representação abstrata associada à coleção do tipo fila
Operações
- OK construtor inicia fila fazia
- OK adicionar novo elemento na fila -> enfileira()
- OK remover um elemento da fila e retornar seu valor caso a fila contenha elementos -> desenfileira()
- OK verificar se um elemento pertence a fila -> contem()
- OK retornar numero de elementos da fila -> tamanho()
- OK verificar se a fila é vazia ou não -> estaVazia()
- OK concatenar duas filas retornando uma nova fila sem alterar nenhuma das outras -> concat()
OK criar método main testando a implementação
OK transformar uso de naturais para inteiros
*/
class {:autocontracts} Fila
{
var a: array<int>;
var cauda: nat;
const defaultSize: nat;
ghost var Conteudo: seq<int>;
// invariante
ghost predicate Valid() {
defaultSize > 0
&& a.Length >= defaultSize
&& 0 <= cauda <= a.Length
&& Conteudo == a[0..cauda]
}
// inicia fila com 3 elementos
constructor ()
ensures Conteudo == []
ensures defaultSize == 3
ensures a.Length == 3
ensures fresh(a)
{
defaultSize := 3;
a := new int[3];
cauda := 0;
Conteudo := [];
}
function tamanho():nat
ensures tamanho() == |Conteudo|
{
cauda
}
function estaVazia(): bool
ensures estaVazia() <==> |Conteudo| == 0
{
cauda == 0
}
method enfileira(e:int)
ensures Conteudo == old(Conteudo) + [e]
{
if (cauda == a.Length) {
var novoArray := new int[cauda + defaultSize];
var i := 0;
forall i | 0 <= i < a.Length
{
novoArray[i] := a[i];
}
a := novoArray;
}
a[cauda] := e;
cauda := cauda + 1;
Conteudo := Conteudo + [e];
}
method desenfileira() returns (e:int)
requires |Conteudo| > 0
ensures e == old(Conteudo)[0]
ensures Conteudo == old(Conteudo)[1..]
{
e := a[0];
cauda := cauda - 1;
forall i | 0 <= i < cauda
{
a[i] := a[i+1];
}
Conteudo := a[0..cauda];
}
method contem(e: int) returns (r:bool)
ensures r <==> exists i :: 0 <= i < cauda && e == a[i]
{
var i := 0;
r:= false;
while i < cauda
{
if (a[i] == e) {
r:= true;
return;
}
i := i + 1;
}
return r;
}
method concat(f2: Fila) returns (r: Fila)
requires Valid()
requires f2.Valid()
ensures r.Conteudo == Conteudo + f2.Conteudo
{
r := new Fila();
var i:= 0;
while i < cauda
{
var valor := a[i];
r.enfileira(valor);
i := i + 1;
}
var j := 0;
while j < f2.cauda
{
var valor := f2.a[j];
r.enfileira(valor);
j := j + 1;
}
}
}
method Main()
{
var fila := new Fila();
// enfileira deve alocar mais espaço
fila.enfileira(1);
fila.enfileira(2);
fila.enfileira(3);
fila.enfileira(4);
// tamanho
var q := fila.tamanho();
// desenfileira
var e := fila.desenfileira();
// contem
var r := fila.contem(1);
var r2 := fila.contem(2);
// estaVazia
var vazia := fila.estaVazia();
var outraFila := new Fila();
vazia := outraFila.estaVazia();
// concat
outraFila.enfileira(5);
outraFila.enfileira(6);
outraFila.enfileira(7);
var concatenada := fila.concat(outraFila);
}
|
737 | repo-8967-Ironclad_tmp_tmp4q25en_1_ironclad-apps_src_Dafny_Libraries_Util_seqs_simple.dfy | static lemma lemma_vacuous_statement_about_a_sequence(intseq:seq<int>, j:int)
requires 0<=j<|intseq|;
ensures intseq[0..j]==intseq[..j];
{
}
static lemma lemma_painful_statement_about_a_sequence(intseq:seq<int>)
ensures intseq==intseq[..|intseq|];
{
}
static lemma lemma_obvious_statement_about_a_sequence(boolseq:seq<bool>, j:int)
requires 0<=j<|boolseq|-1;
ensures boolseq[1..][j] == boolseq[j+1];
{
}
static lemma lemma_obvious_statement_about_a_sequence_int(intseq:seq<int>, j:int)
requires 0<=j<|intseq|-1;
ensures intseq[1..][j] == intseq[j+1];
{
}
static lemma lemma_straightforward_statement_about_a_sequence(intseq:seq<int>, j:int)
requires 0<=j<|intseq|;
ensures intseq[..j] + intseq[j..] == intseq;
{
}
static lemma lemma_sequence_reduction(s:seq<int>, b:nat)
requires 0<b<|s|;
ensures s[0..b][0..b-1] == s[0..b-1];
{
var t := s[0..b];
forall (i | 0<=i<b-1)
ensures s[0..b][0..b-1][i] == s[0..b-1][i];
{
}
}
static lemma lemma_seq_concatenation_associative(a:seq<int>, b:seq<int>, c:seq<int>)
ensures (a+b)+c == a+(b+c);
{
}
static lemma lemma_subseq_concatenation(s: seq<int>, left: int, middle: int, right: int)
requires 0 <= left <= middle <= right <= |s|;
ensures s[left..right] == s[left..middle] + s[middle..right];
{
}
static lemma lemma_seq_equality(a:seq<int>, b:seq<int>, len:int)
requires |a| == |b| == len;
requires forall i {:trigger a[i]}{:trigger b[i]} :: 0 <= i < len ==> a[i] == b[i];
ensures a == b;
{
assert forall i :: 0 <= i < len ==> a[i] == b[i];
}
static lemma lemma_seq_suffix(s: seq<int>, prefix_length: int, index: int)
requires 0 <= prefix_length <= index < |s|;
ensures s[index] == s[prefix_length..][index - prefix_length];
{
}
| static lemma lemma_vacuous_statement_about_a_sequence(intseq:seq<int>, j:int)
requires 0<=j<|intseq|;
ensures intseq[0..j]==intseq[..j];
{
}
static lemma lemma_painful_statement_about_a_sequence(intseq:seq<int>)
ensures intseq==intseq[..|intseq|];
{
}
static lemma lemma_obvious_statement_about_a_sequence(boolseq:seq<bool>, j:int)
requires 0<=j<|boolseq|-1;
ensures boolseq[1..][j] == boolseq[j+1];
{
}
static lemma lemma_obvious_statement_about_a_sequence_int(intseq:seq<int>, j:int)
requires 0<=j<|intseq|-1;
ensures intseq[1..][j] == intseq[j+1];
{
}
static lemma lemma_straightforward_statement_about_a_sequence(intseq:seq<int>, j:int)
requires 0<=j<|intseq|;
ensures intseq[..j] + intseq[j..] == intseq;
{
}
static lemma lemma_sequence_reduction(s:seq<int>, b:nat)
requires 0<b<|s|;
ensures s[0..b][0..b-1] == s[0..b-1];
{
var t := s[0..b];
forall (i | 0<=i<b-1)
ensures s[0..b][0..b-1][i] == s[0..b-1][i];
{
}
}
static lemma lemma_seq_concatenation_associative(a:seq<int>, b:seq<int>, c:seq<int>)
ensures (a+b)+c == a+(b+c);
{
}
static lemma lemma_subseq_concatenation(s: seq<int>, left: int, middle: int, right: int)
requires 0 <= left <= middle <= right <= |s|;
ensures s[left..right] == s[left..middle] + s[middle..right];
{
}
static lemma lemma_seq_equality(a:seq<int>, b:seq<int>, len:int)
requires |a| == |b| == len;
requires forall i {:trigger a[i]}{:trigger b[i]} :: 0 <= i < len ==> a[i] == b[i];
ensures a == b;
{
}
static lemma lemma_seq_suffix(s: seq<int>, prefix_length: int, index: int)
requires 0 <= prefix_length <= index < |s|;
ensures s[index] == s[prefix_length..][index - prefix_length];
{
}
|
738 | sat_dfy_tmp_tmpfcyj8am9_dfy_Seq.dfy | module Seq {
function seq_sum(s: seq<int>) : (sum: int)
{
if s == [] then
0
else
var x := s[0];
var remaining := s[1..];
x + seq_sum(remaining)
}
lemma SeqPartsSameSum(s: seq<int>, s1: seq<int>, s2: seq<int>)
requires s == s1 + s2
ensures seq_sum(s) == seq_sum(s1) + seq_sum(s2)
{
if s == [] {
assert s1 == [];
assert s2 == [];
} else if s1 == [] {
assert s2 == s;
} else {
var x := s1[0];
var s1' := s1[1..];
assert s == [x] + s1' + s2;
SeqPartsSameSum(s[1..], s1[1..], s2);
}
}
lemma DifferentPermutationSameSum(s1: seq<int>, s2: seq<int>)
requires multiset(s1) == multiset(s2)
ensures seq_sum(s1) == seq_sum(s2)
{
if s1 == [] {
assert s2 == [];
} else {
var x :| x in s1;
assert x in s1;
assert multiset(s1)[x] > 0;
assert multiset(s2)[x] > 0;
assert x in s2;
var i1, i2 :| 0 <= i1 < |s1| && 0 <= i2 < |s2| && s1[i1] == s2[i2] && s1[i1] == x;
var remaining1 := s1[..i1] + s1[i1+1..];
assert s1 == s1[..i1] + s1[i1..];
assert s1 == s1[..i1] + [x] + s1[i1+1..];
assert seq_sum(s1) == seq_sum(s1[..i1] + [x] + s1[i1+1..]);
SeqPartsSameSum(s1[..i1+1], s1[..i1], [x]);
SeqPartsSameSum(s1, s1[..i1+1], s1[i1+1..]);
assert seq_sum(s1) == seq_sum(s1[..i1]) + x + seq_sum(s1[i1+1..]);
SeqPartsSameSum(remaining1, s1[..i1], s1[i1+1..]);
assert multiset(s1) == multiset(remaining1 + [x]);
assert seq_sum(s1) == seq_sum(remaining1) + x;
assert multiset(s1) == multiset(remaining1) + multiset([x]);
assert multiset(s1) - multiset([x]) == multiset(remaining1);
var remaining2 := s2[..i2] + s2[i2+1..];
assert s2 == s2[..i2] + s2[i2..];
assert s2 == s2[..i2] + [x] + s2[i2+1..];
assert seq_sum(s2) == seq_sum(s2[..i2] + [x] + s2[i2+1..]);
SeqPartsSameSum(s2[..i2+1], s2[..i2], [x]);
SeqPartsSameSum(s2, s2[..i2+1], s2[i2+1..]);
assert seq_sum(s2) == seq_sum(s2[..i2]) + x + seq_sum(s2[i2+1..]);
SeqPartsSameSum(remaining2, s2[..i2], s2[i2+1..]);
assert multiset(s2) == multiset(remaining2 + [x]);
assert seq_sum(s2) == seq_sum(remaining2) + x;
assert multiset(s2) == multiset(remaining2) + multiset([x]);
assert multiset(s2) - multiset([x]) == multiset(remaining2);
DifferentPermutationSameSum(remaining1, remaining2);
assert seq_sum(remaining1) == seq_sum(remaining2);
assert seq_sum(s1) == seq_sum(s2);
}
}
}
| module Seq {
function seq_sum(s: seq<int>) : (sum: int)
{
if s == [] then
0
else
var x := s[0];
var remaining := s[1..];
x + seq_sum(remaining)
}
lemma SeqPartsSameSum(s: seq<int>, s1: seq<int>, s2: seq<int>)
requires s == s1 + s2
ensures seq_sum(s) == seq_sum(s1) + seq_sum(s2)
{
if s == [] {
} else if s1 == [] {
} else {
var x := s1[0];
var s1' := s1[1..];
SeqPartsSameSum(s[1..], s1[1..], s2);
}
}
lemma DifferentPermutationSameSum(s1: seq<int>, s2: seq<int>)
requires multiset(s1) == multiset(s2)
ensures seq_sum(s1) == seq_sum(s2)
{
if s1 == [] {
} else {
var x :| x in s1;
var i1, i2 :| 0 <= i1 < |s1| && 0 <= i2 < |s2| && s1[i1] == s2[i2] && s1[i1] == x;
var remaining1 := s1[..i1] + s1[i1+1..];
SeqPartsSameSum(s1[..i1+1], s1[..i1], [x]);
SeqPartsSameSum(s1, s1[..i1+1], s1[i1+1..]);
SeqPartsSameSum(remaining1, s1[..i1], s1[i1+1..]);
var remaining2 := s2[..i2] + s2[i2+1..];
SeqPartsSameSum(s2[..i2+1], s2[..i2], [x]);
SeqPartsSameSum(s2, s2[..i2+1], s2[i2+1..]);
SeqPartsSameSum(remaining2, s2[..i2], s2[i2+1..]);
DifferentPermutationSameSum(remaining1, remaining2);
}
}
}
|
739 | se2011_tmp_tmp71eb82zt_ass1_ex4.dfy | method Eval(x:int) returns (r:int) // do not change
requires x >= 0
ensures r == x*x
{ // do not change
var y:int := x; // do not change
var z:int := 0; // do not change
while y>0 // do not change
invariant 0 <= y <= x && z == x*(x-y)
decreases y
{ // do not change
z := z + x; // do not change
y := y - 1; // do not change
} // do not change
return z; // do not change
} // do not change
| method Eval(x:int) returns (r:int) // do not change
requires x >= 0
ensures r == x*x
{ // do not change
var y:int := x; // do not change
var z:int := 0; // do not change
while y>0 // do not change
{ // do not change
z := z + x; // do not change
y := y - 1; // do not change
} // do not change
return z; // do not change
} // do not change
|
740 | se2011_tmp_tmp71eb82zt_ass1_ex6.dfy | method Ceiling7(n:nat) returns (k:nat)
requires n >= 0
ensures k == n-(n%7)
{
k := n-(n%7);
}
method test7() {
var k: nat;
k := Ceiling7(43);
assert k == 42;
k := Ceiling7(6);
assert k == 0;
k := Ceiling7(1000);
assert k == 994;
k := Ceiling7(7);
assert k == 7;
k := Ceiling7(70);
assert k == 70;
}
| method Ceiling7(n:nat) returns (k:nat)
requires n >= 0
ensures k == n-(n%7)
{
k := n-(n%7);
}
method test7() {
var k: nat;
k := Ceiling7(43);
k := Ceiling7(6);
k := Ceiling7(1000);
k := Ceiling7(7);
k := Ceiling7(70);
}
|
741 | se2011_tmp_tmp71eb82zt_ass2_ex2.dfy | // ex2
// this was me playing around to try and get an ensures for the method
/*predicate method check(a: array<int>, seclar:int)
requires a.Length > 0
reads a
{ ensures exists i :: 0 <= i < a.Length && forall j :: (0 <= j < a.Length && j != i) ==> (a[i] >= a[j]) && (seclar <= a[i]) && ( if a[j] != a[i] then seclar >= a[j] else seclar <= a[j]) } */
method SecondLargest(a:array<int>) returns (seclar:int)
requires a.Length > 0
//ensures exists i :: 0 <= i < a.Length && forall j :: (0 <= j < a.Length && j != i) ==> (a[i] >= a[j]) && (seclar <= a[i]) && ( if a[j] != a[i] then seclar >= a[j] else seclar <= a[j])
{
// if length = 1, return first element
if a.Length == 1
{ seclar := a[0]; }
else
{
var l, s, i: int := 0, 0, 0;
// set initial largest and second largest
if a[1] > a[0]
{ l := a[1]; s := a[0]; }
else
{ l := a[0]; s := a[1]; }
while i < a.Length
invariant 0 <= i <= a.Length
invariant forall j :: (0 <= j < i) ==> l >= a[j]
invariant s <= l
{
if a[i] > l // check if curr is greater then largest and set l and s
{ s := l; l := a[i]; }
if a[i] > s && a[i] < l // check if curr is greater than s and set s
{ s := a[i]; }
if s == l && s > a[i] // check s is not the same as l
{ s := a[i]; }
i := i+1;
}
seclar := s;
}
}
method Main()
{
var a: array<int> := new int[][1];
assert a[0] == 1;
var x:int := SecondLargest(a);
// assert x == 1;
var b: array<int> := new int[][9,1];
assert b[0] == 9 && b[1] == 1;
x := SecondLargest(b);
// assert x == 1;
var c: array<int> := new int[][1,9];
assert c[0] == 1 && c[1] == 9;
x := SecondLargest(c);
// assert x == 1;
var d: array<int> := new int[][2,42,-4,123,42];
assert d[0] == 2 && d[1] == 42 && d[2] == -4 && d[3] == 123 && d[4] == 42;
x := SecondLargest(d);
// assert x == 42;
var e: array<int> := new int[][1,9,8];
assert e[0] == 1 && e[1] == 9 && e[2] == 8;
x := SecondLargest(e);
// assert x == 8;
}
| // ex2
// this was me playing around to try and get an ensures for the method
/*predicate method check(a: array<int>, seclar:int)
requires a.Length > 0
reads a
{ ensures exists i :: 0 <= i < a.Length && forall j :: (0 <= j < a.Length && j != i) ==> (a[i] >= a[j]) && (seclar <= a[i]) && ( if a[j] != a[i] then seclar >= a[j] else seclar <= a[j]) } */
method SecondLargest(a:array<int>) returns (seclar:int)
requires a.Length > 0
//ensures exists i :: 0 <= i < a.Length && forall j :: (0 <= j < a.Length && j != i) ==> (a[i] >= a[j]) && (seclar <= a[i]) && ( if a[j] != a[i] then seclar >= a[j] else seclar <= a[j])
{
// if length = 1, return first element
if a.Length == 1
{ seclar := a[0]; }
else
{
var l, s, i: int := 0, 0, 0;
// set initial largest and second largest
if a[1] > a[0]
{ l := a[1]; s := a[0]; }
else
{ l := a[0]; s := a[1]; }
while i < a.Length
{
if a[i] > l // check if curr is greater then largest and set l and s
{ s := l; l := a[i]; }
if a[i] > s && a[i] < l // check if curr is greater than s and set s
{ s := a[i]; }
if s == l && s > a[i] // check s is not the same as l
{ s := a[i]; }
i := i+1;
}
seclar := s;
}
}
method Main()
{
var a: array<int> := new int[][1];
var x:int := SecondLargest(a);
// assert x == 1;
var b: array<int> := new int[][9,1];
x := SecondLargest(b);
// assert x == 1;
var c: array<int> := new int[][1,9];
x := SecondLargest(c);
// assert x == 1;
var d: array<int> := new int[][2,42,-4,123,42];
x := SecondLargest(d);
// assert x == 42;
var e: array<int> := new int[][1,9,8];
x := SecondLargest(e);
// assert x == 8;
}
|
742 | software-specification-p1_tmp_tmpz9x6mpxb_BoilerPlate_Ex1.dfy | datatype Tree<V> = Leaf(V) | SingleNode(V, Tree<V>) | DoubleNode(V, Tree<V>, Tree<V>)
datatype Code<V> = CLf(V) | CSNd(V) | CDNd(V)
function serialise<V>(t : Tree<V>) : seq<Code<V>>
decreases t
{
match t {
case Leaf(v) => [ CLf(v) ]
case SingleNode(v, t) => serialise(t) + [ CSNd(v) ]
case DoubleNode(v, t1, t2) => serialise(t2) + serialise(t1) + [ CDNd(v) ]
}
}
// Ex 1
function deserialiseAux<T>(codes: seq<Code<T>>, trees: seq<Tree<T>>): seq<Tree<T>>
requires |codes| > 0 || |trees| > 0
ensures |deserialiseAux(codes, trees)| >= 0
decreases codes
{
if |codes| == 0 then trees
else
match codes[0] {
case CLf(v) => deserialiseAux(codes[1..], trees + [Leaf(v)])
case CSNd(v) => if (|trees| >= 1) then deserialiseAux(codes[1..], trees[..|trees|-1] + [SingleNode(v, trees[|trees|-1])]) else trees
case CDNd(v) => if (|trees| >= 2) then deserialiseAux(codes[1..], trees[..|trees|-2] + [DoubleNode(v, trees[|trees|-1], trees[|trees|-2])]) else trees
}
}
function deserialise<T>(s:seq<Code<T>>):seq<Tree<T>>
requires |s| > 0
{
deserialiseAux(s, [])
}
// Ex 2
method testSerializeWithASingleLeaf()
{
var tree := Leaf(42);
var result := serialise(tree);
assert result == [CLf(42)];
}
method testSerializeNullValues()
{
var tree := Leaf(null);
var result := serialise(tree);
assert result == [CLf(null)];
}
method testSerializeWithAllElements()
{
var tree: Tree<int> := DoubleNode(9, Leaf(6), DoubleNode(2, Leaf(5), SingleNode(4, Leaf(3))));
var codes := serialise(tree);
assert |codes| == 6;
var expectedCodes := [CLf(3), CSNd(4), CLf(5), CDNd(2), CLf(6), CDNd(9)];
assert codes == expectedCodes;
}
// Ex 3
method testDeseraliseWithASingleLeaf() {
var codes: seq<Code<int>> := [CLf(9)];
var trees := deserialise(codes);
assert |trees| == 1;
var expectedTree := Leaf(9);
assert trees[0] == expectedTree;
}
method testDeserializeWithASingleNode()
{
var codes: seq<Code<int>> := [CLf(3), CSNd(9), CLf(5)];
var trees := deserialise(codes);
assert |trees| == 2;
var expectedTree1 := SingleNode(9, Leaf(3));
var expectedTree2 := Leaf(5);
assert trees[0] == expectedTree1;
assert trees[1] == expectedTree2;
}
method testDeserialiseWithAllElements()
{
var codes: seq<Code<int>> := [CLf(3), CSNd(4), CLf(5), CDNd(2), CLf(6), CDNd(9)];
var trees := deserialise(codes);
assert |trees| == 1;
var expectedTree := DoubleNode(9, Leaf(6), DoubleNode(2, Leaf(5), SingleNode(4, Leaf(3))));
assert trees[0] == expectedTree;
}
// Ex 4
lemma SerialiseLemma<V>(t: Tree<V>)
ensures deserialise(serialise(t)) == [t]
{
assert serialise(t) + [] == serialise(t);
calc{
deserialise(serialise(t));
==
deserialise(serialise(t) + []);
==
deserialiseAux(serialise(t) + [], []);
== { DeserialisetAfterSerialiseLemma(t, [], []); }
deserialiseAux([],[] + [t]);
==
deserialiseAux([],[t]);
==
[t];
}
}
lemma DeserialisetAfterSerialiseLemma<T> (t : Tree<T>, cds : seq<Code<T>>, ts : seq<Tree<T>>)
ensures deserialiseAux(serialise(t) + cds, ts) == deserialiseAux(cds, ts + [t])
{
match t{
case Leaf(x) =>
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux([CLf(x)] + cds, ts);
==
deserialiseAux(cds, ts + [Leaf(x)]);
==
deserialiseAux(cds, ts + [t]);
}
case SingleNode(x,t1) =>
assert serialise(t1) + [ CSNd(x) ] + cds == serialise(t1) + ([ CSNd(x) ] + cds);
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux( serialise(t1) + [CSNd(x)] + cds ,ts);
==
deserialiseAux((serialise(t1) + [CSNd(x)] + cds),ts);
== { DeserialisetAfterSerialiseLemma(t1 , [ CSNd(x) ], ts); }
deserialiseAux(serialise(t1)+ [CSNd(x)] + cds, ts );
==
deserialiseAux( ([CSNd(x)] + cds), ts + [ t1 ]);
==
deserialiseAux(cds, ts + [SingleNode(x,t1)]);
==
deserialiseAux(cds, ts + [t]);
}
case DoubleNode(x,t1,t2) =>
assert serialise(t2) + serialise(t1) + [ CDNd(x) ] + cds == serialise(t2) + (serialise(t1) + [ CDNd(x) ] + cds);
assert serialise(t1) + [CDNd(x)] + cds == serialise(t1) + ([CDNd(x)] + cds);
assert (ts + [ t2 ]) + [ t1 ] == ts + [t2,t1];
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux(serialise(t2) + serialise(t1) + [CDNd(x)] + cds ,ts);
==
deserialiseAux(serialise(t2) + (serialise(t1) + [CDNd(x)] + cds),ts);
== { DeserialisetAfterSerialiseLemma(t2, serialise(t1) + [ CDNd(x) ], ts); }
deserialiseAux(serialise(t1)+ [CDNd(x)] + cds, ts + [ t2 ]);
==
deserialiseAux(serialise(t1) + ([CDNd(x)] + cds), ts + [ t2 ]);
== { DeserialisetAfterSerialiseLemma(t1, [ CDNd(x) ] + cds, ts + [ t2 ]); }
deserialiseAux([ CDNd(x) ] + cds, (ts + [ t2 ]) + [t1]);
==
deserialiseAux([ CDNd(x) ] + cds, ts + [t2, t1]);
==
deserialiseAux([CDNd(x)] + cds, ts + [t2 , t1]);
==
deserialiseAux(cds, ts + [DoubleNode(x,t1,t2)]);
==
deserialiseAux(cds, ts + [t]);
}
}
}
| datatype Tree<V> = Leaf(V) | SingleNode(V, Tree<V>) | DoubleNode(V, Tree<V>, Tree<V>)
datatype Code<V> = CLf(V) | CSNd(V) | CDNd(V)
function serialise<V>(t : Tree<V>) : seq<Code<V>>
{
match t {
case Leaf(v) => [ CLf(v) ]
case SingleNode(v, t) => serialise(t) + [ CSNd(v) ]
case DoubleNode(v, t1, t2) => serialise(t2) + serialise(t1) + [ CDNd(v) ]
}
}
// Ex 1
function deserialiseAux<T>(codes: seq<Code<T>>, trees: seq<Tree<T>>): seq<Tree<T>>
requires |codes| > 0 || |trees| > 0
ensures |deserialiseAux(codes, trees)| >= 0
{
if |codes| == 0 then trees
else
match codes[0] {
case CLf(v) => deserialiseAux(codes[1..], trees + [Leaf(v)])
case CSNd(v) => if (|trees| >= 1) then deserialiseAux(codes[1..], trees[..|trees|-1] + [SingleNode(v, trees[|trees|-1])]) else trees
case CDNd(v) => if (|trees| >= 2) then deserialiseAux(codes[1..], trees[..|trees|-2] + [DoubleNode(v, trees[|trees|-1], trees[|trees|-2])]) else trees
}
}
function deserialise<T>(s:seq<Code<T>>):seq<Tree<T>>
requires |s| > 0
{
deserialiseAux(s, [])
}
// Ex 2
method testSerializeWithASingleLeaf()
{
var tree := Leaf(42);
var result := serialise(tree);
}
method testSerializeNullValues()
{
var tree := Leaf(null);
var result := serialise(tree);
}
method testSerializeWithAllElements()
{
var tree: Tree<int> := DoubleNode(9, Leaf(6), DoubleNode(2, Leaf(5), SingleNode(4, Leaf(3))));
var codes := serialise(tree);
var expectedCodes := [CLf(3), CSNd(4), CLf(5), CDNd(2), CLf(6), CDNd(9)];
}
// Ex 3
method testDeseraliseWithASingleLeaf() {
var codes: seq<Code<int>> := [CLf(9)];
var trees := deserialise(codes);
var expectedTree := Leaf(9);
}
method testDeserializeWithASingleNode()
{
var codes: seq<Code<int>> := [CLf(3), CSNd(9), CLf(5)];
var trees := deserialise(codes);
var expectedTree1 := SingleNode(9, Leaf(3));
var expectedTree2 := Leaf(5);
}
method testDeserialiseWithAllElements()
{
var codes: seq<Code<int>> := [CLf(3), CSNd(4), CLf(5), CDNd(2), CLf(6), CDNd(9)];
var trees := deserialise(codes);
var expectedTree := DoubleNode(9, Leaf(6), DoubleNode(2, Leaf(5), SingleNode(4, Leaf(3))));
}
// Ex 4
lemma SerialiseLemma<V>(t: Tree<V>)
ensures deserialise(serialise(t)) == [t]
{
calc{
deserialise(serialise(t));
==
deserialise(serialise(t) + []);
==
deserialiseAux(serialise(t) + [], []);
== { DeserialisetAfterSerialiseLemma(t, [], []); }
deserialiseAux([],[] + [t]);
==
deserialiseAux([],[t]);
==
[t];
}
}
lemma DeserialisetAfterSerialiseLemma<T> (t : Tree<T>, cds : seq<Code<T>>, ts : seq<Tree<T>>)
ensures deserialiseAux(serialise(t) + cds, ts) == deserialiseAux(cds, ts + [t])
{
match t{
case Leaf(x) =>
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux([CLf(x)] + cds, ts);
==
deserialiseAux(cds, ts + [Leaf(x)]);
==
deserialiseAux(cds, ts + [t]);
}
case SingleNode(x,t1) =>
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux( serialise(t1) + [CSNd(x)] + cds ,ts);
==
deserialiseAux((serialise(t1) + [CSNd(x)] + cds),ts);
== { DeserialisetAfterSerialiseLemma(t1 , [ CSNd(x) ], ts); }
deserialiseAux(serialise(t1)+ [CSNd(x)] + cds, ts );
==
deserialiseAux( ([CSNd(x)] + cds), ts + [ t1 ]);
==
deserialiseAux(cds, ts + [SingleNode(x,t1)]);
==
deserialiseAux(cds, ts + [t]);
}
case DoubleNode(x,t1,t2) =>
calc{
deserialiseAux(serialise(t) + cds, ts);
==
deserialiseAux(serialise(t2) + serialise(t1) + [CDNd(x)] + cds ,ts);
==
deserialiseAux(serialise(t2) + (serialise(t1) + [CDNd(x)] + cds),ts);
== { DeserialisetAfterSerialiseLemma(t2, serialise(t1) + [ CDNd(x) ], ts); }
deserialiseAux(serialise(t1)+ [CDNd(x)] + cds, ts + [ t2 ]);
==
deserialiseAux(serialise(t1) + ([CDNd(x)] + cds), ts + [ t2 ]);
== { DeserialisetAfterSerialiseLemma(t1, [ CDNd(x) ] + cds, ts + [ t2 ]); }
deserialiseAux([ CDNd(x) ] + cds, (ts + [ t2 ]) + [t1]);
==
deserialiseAux([ CDNd(x) ] + cds, ts + [t2, t1]);
==
deserialiseAux([CDNd(x)] + cds, ts + [t2 , t1]);
==
deserialiseAux(cds, ts + [DoubleNode(x,t1,t2)]);
==
deserialiseAux(cds, ts + [t]);
}
}
}
|
743 | software_analysis_tmp_tmpmt6bo9sf_ss.dfy | method find_min_index(a : array<int>, s: int, e: int) returns (min_i: int)
requires a.Length > 0
requires 0 <= s < a.Length
requires e <= a.Length
requires e > s
ensures min_i >= s
ensures min_i < e
ensures forall k: int :: s <= k < e ==> a[min_i] <= a[k]
{
min_i := s;
var i : int := s;
while i < e
decreases e - i // loop variant
invariant s <= i <= e
invariant s <= min_i < e
// unnecessary invariant
// invariant i < e ==> min_i <= i
invariant forall k: int :: s <= k < i ==> a[min_i] <= a[k]
{
if a[i] <= a[min_i] {
min_i := i;
}
i := i + 1;
}
}
predicate is_sorted(ss: seq<int>)
{
forall i, j: int:: 0 <= i <= j < |ss| ==> ss[i] <= ss[j]
}
predicate is_permutation(a:seq<int>, b:seq<int>)
decreases |a|
decreases |b|
{
|a| == |b| &&
((|a| == 0 && |b| == 0) ||
exists i,j : int :: 0<=i<|a| && 0<=j<|b| && a[i] == b[j] && is_permutation(a[0..i] + if i < |a| then a[i+1..] else [], b[0..j] + if j < |b| then b[j+1..] else []))
}
// predicate is_permutation(a:seq<int>, b:seq<int>)
// decreases |a|
// decreases |b|
// {
// |a| == |b| && ((|a| == 0 && |b| == 0) || exists i,j : int :: 0<=i<|a| && 0<=j<|b| && a[i] == b[j] && is_permutation(a[0..i] + a[i+1..], b[0..j] + b[j+1..]))
// }
predicate is_permutation2(a:seq<int>, b:seq<int>)
{
multiset(a) == multiset(b)
}
method selection_sort(ns: array<int>)
requires ns.Length >= 0
ensures is_sorted(ns[..])
ensures is_permutation2(old(ns[..]), ns[..])
modifies ns
{
var i: int := 0;
var l: int := ns.Length;
while i < l
decreases l - i
invariant 0 <= i <= l
invariant is_permutation2(old(ns[..]), ns[..])
invariant forall k, kk: int :: 0 <= k < i && i <= kk < ns.Length ==> ns[k] <= ns[kk] // left els must be lesser than right ones
invariant is_sorted(ns[..i])
{
var min_i: int := find_min_index(ns, i, ns.Length);
ns[i], ns[min_i] := ns[min_i], ns[i];
i := i + 1;
}
}
| method find_min_index(a : array<int>, s: int, e: int) returns (min_i: int)
requires a.Length > 0
requires 0 <= s < a.Length
requires e <= a.Length
requires e > s
ensures min_i >= s
ensures min_i < e
ensures forall k: int :: s <= k < e ==> a[min_i] <= a[k]
{
min_i := s;
var i : int := s;
while i < e
// unnecessary invariant
// invariant i < e ==> min_i <= i
{
if a[i] <= a[min_i] {
min_i := i;
}
i := i + 1;
}
}
predicate is_sorted(ss: seq<int>)
{
forall i, j: int:: 0 <= i <= j < |ss| ==> ss[i] <= ss[j]
}
predicate is_permutation(a:seq<int>, b:seq<int>)
{
|a| == |b| &&
((|a| == 0 && |b| == 0) ||
exists i,j : int :: 0<=i<|a| && 0<=j<|b| && a[i] == b[j] && is_permutation(a[0..i] + if i < |a| then a[i+1..] else [], b[0..j] + if j < |b| then b[j+1..] else []))
}
// predicate is_permutation(a:seq<int>, b:seq<int>)
// decreases |a|
// decreases |b|
// {
// |a| == |b| && ((|a| == 0 && |b| == 0) || exists i,j : int :: 0<=i<|a| && 0<=j<|b| && a[i] == b[j] && is_permutation(a[0..i] + a[i+1..], b[0..j] + b[j+1..]))
// }
predicate is_permutation2(a:seq<int>, b:seq<int>)
{
multiset(a) == multiset(b)
}
method selection_sort(ns: array<int>)
requires ns.Length >= 0
ensures is_sorted(ns[..])
ensures is_permutation2(old(ns[..]), ns[..])
modifies ns
{
var i: int := 0;
var l: int := ns.Length;
while i < l
{
var min_i: int := find_min_index(ns, i, ns.Length);
ns[i], ns[min_i] := ns[min_i], ns[i];
i := i + 1;
}
}
|
744 | specTesting_tmp_tmpueam35lx_examples_binary_search_binary_search_specs.dfy | lemma BinarySearch(intSeq:seq<int>, key:int) returns (r:int)
// original
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key
{
var lo:nat := 0;
var hi:nat := |intSeq|;
while lo < hi
invariant 0 <= lo <= hi <= |intSeq|
invariant forall i:nat | 0 <= i < lo :: intSeq[i] < key
invariant forall i:nat | hi <= i < |intSeq| :: intSeq[i] > key
{
var mid := (lo + hi) / 2;
if (intSeq[mid] < key) {
lo := mid + 1;
} else if (intSeq[mid] > key) {
hi := mid;
} else {
return mid;
}
}
return -1;
}
predicate BinarySearchTransition(intSeq:seq<int>, key:int, r:int)
requires (forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j])
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key)
}
lemma BinarySearchDeterministic(intSeq:seq<int>, key:int) returns (r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
{
var lo:nat := 0;
var hi:nat := |intSeq|;
while lo < hi
invariant 0 <= lo <= hi <= |intSeq|
invariant forall i:nat | 0 <= i < lo :: intSeq[i] < key
invariant (forall i:nat | hi <= i < |intSeq| :: intSeq[i] > key)
|| (forall i:nat | hi <= i < |intSeq| :: intSeq[i] >= key && exists i:nat | lo <= i < hi :: intSeq[i] == key)
{
var mid := (lo + hi) / 2;
if (intSeq[mid] < key) {
lo := mid + 1;
} else if (intSeq[mid] > key) {
hi := mid;
} else {
assert intSeq[mid] == key;
var inner_mid := (lo + mid) / 2;
if (intSeq[inner_mid] < key) {
lo := inner_mid + 1;
} else if (hi != inner_mid + 1) {
hi := inner_mid + 1;
} else {
if (intSeq[lo] == key) {
return lo;
} else {
lo := lo + 1;
}
}
}
}
return -1;
}
predicate BinarySearchDeterministicTransition(intSeq:seq<int>, key:int, r:int)
requires (forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j])
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key)
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong1(intSeq:seq<int>, key:int) returns (r:int)
// first element
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | 0 < i < |intSeq| :: intSeq[i] != key // i >= 0
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
predicate BinarySearchWrong1Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | 0 < i < |intSeq| :: intSeq[i] != key) // i >= 0
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong2(intSeq:seq<int>, key:int) returns (r:int)
// last element
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | 0 <= i < |intSeq| - 1 :: intSeq[i] != key // i < |intSeq|
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
predicate BinarySearchWrong2Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | 0 <= i < |intSeq| - 1 :: intSeq[i] != key) // i < |intSeq|
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong3(intSeq:seq<int>, key:int) returns (r:int)
// weaker spec
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r < 0 || (r < |intSeq| && intSeq[r] == key) // post condition not correctly formed
{
return -1;
}
predicate BinarySearchWrong3Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
r < 0 || (r < |intSeq| && intSeq[r] == key)
}
lemma BinarySearchWrong4(intSeq:seq<int>, key:int) returns (r:int)
// non-realistic
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures 0 <= r < |intSeq| && intSeq[r] == key
predicate BinarySearchWrong4Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
0 <= r < |intSeq| && intSeq[r] == key
}
| lemma BinarySearch(intSeq:seq<int>, key:int) returns (r:int)
// original
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key
{
var lo:nat := 0;
var hi:nat := |intSeq|;
while lo < hi
{
var mid := (lo + hi) / 2;
if (intSeq[mid] < key) {
lo := mid + 1;
} else if (intSeq[mid] > key) {
hi := mid;
} else {
return mid;
}
}
return -1;
}
predicate BinarySearchTransition(intSeq:seq<int>, key:int, r:int)
requires (forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j])
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key)
}
lemma BinarySearchDeterministic(intSeq:seq<int>, key:int) returns (r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
{
var lo:nat := 0;
var hi:nat := |intSeq|;
while lo < hi
|| (forall i:nat | hi <= i < |intSeq| :: intSeq[i] >= key && exists i:nat | lo <= i < hi :: intSeq[i] == key)
{
var mid := (lo + hi) / 2;
if (intSeq[mid] < key) {
lo := mid + 1;
} else if (intSeq[mid] > key) {
hi := mid;
} else {
var inner_mid := (lo + mid) / 2;
if (intSeq[inner_mid] < key) {
lo := inner_mid + 1;
} else if (hi != inner_mid + 1) {
hi := inner_mid + 1;
} else {
if (intSeq[lo] == key) {
return lo;
} else {
lo := lo + 1;
}
}
}
}
return -1;
}
predicate BinarySearchDeterministicTransition(intSeq:seq<int>, key:int, r:int)
requires (forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j])
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | i < |intSeq| :: intSeq[i] != key)
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong1(intSeq:seq<int>, key:int) returns (r:int)
// first element
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | 0 < i < |intSeq| :: intSeq[i] != key // i >= 0
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
predicate BinarySearchWrong1Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | 0 < i < |intSeq| :: intSeq[i] != key) // i >= 0
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong2(intSeq:seq<int>, key:int) returns (r:int)
// last element
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r >= 0 ==> r < |intSeq| && intSeq[r] == key
ensures r < 0 ==> forall i:nat | 0 <= i < |intSeq| - 1 :: intSeq[i] != key // i < |intSeq|
// make it deterministic
ensures r < 0 ==> r == -1 // return -1 if not found
ensures r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key // multiple matches return the first result
predicate BinarySearchWrong2Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
&& (r >= 0 ==> r < |intSeq| && intSeq[r] == key)
&& (r < 0 ==> forall i:nat | 0 <= i < |intSeq| - 1 :: intSeq[i] != key) // i < |intSeq|
// make it deterministic
&& (r < 0 ==> r == -1) // return -1 if not found
&& (r >= 0 ==> forall i:nat | i < r :: intSeq[i] < key)
}
lemma BinarySearchWrong3(intSeq:seq<int>, key:int) returns (r:int)
// weaker spec
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures r < 0 || (r < |intSeq| && intSeq[r] == key) // post condition not correctly formed
{
return -1;
}
predicate BinarySearchWrong3Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
r < 0 || (r < |intSeq| && intSeq[r] == key)
}
lemma BinarySearchWrong4(intSeq:seq<int>, key:int) returns (r:int)
// non-realistic
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
ensures 0 <= r < |intSeq| && intSeq[r] == key
predicate BinarySearchWrong4Transition(intSeq:seq<int>, key:int, r:int)
requires forall i,j | 0 <= i <= j < |intSeq| :: intSeq[i] <= intSeq[j]
{
0 <= r < |intSeq| && intSeq[r] == key
}
|
745 | specTesting_tmp_tmpueam35lx_examples_increment_decrement_spec.dfy | module OneSpec {
datatype Variables = Variables(value: int)
predicate Init(v: Variables)
{
v.value == 0
}
predicate IncrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value + 1
}
predicate DecrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value - 1
}
datatype Step =
| IncrementStep()
| DecrementStep()
predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step
case IncrementStep() => IncrementOp(v, v')
case DecrementStep() => DecrementOp(v, v')
}
predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
}
module OneProtocol {
datatype Variables = Variables(value: int)
predicate Init(v: Variables)
{
v.value == 0
}
predicate IncrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value - 1
}
predicate DecrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value + 1
}
datatype Step =
| IncrementStep()
| DecrementStep()
predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step
case IncrementStep() => IncrementOp(v, v')
case DecrementStep() => DecrementOp(v, v')
}
predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
}
module RefinementProof {
import OneSpec
import opened OneProtocol
function Abstraction(v: Variables) : OneSpec.Variables {
OneSpec.Variables(v.value)
}
lemma RefinementInit(v: Variables)
requires Init(v)
ensures OneSpec.Init(Abstraction(v))
{
}
lemma RefinementNext(v: Variables, v': Variables)
requires Next(v, v')
ensures OneSpec.Next(Abstraction(v), Abstraction(v'))
{
var step :| NextStep(v, v', step);
match step {
case IncrementStep() => {
assert OneSpec.NextStep(Abstraction(v), Abstraction(v'), OneSpec.DecrementStep());
}
case DecrementStep() => {
assert OneSpec.NextStep(Abstraction(v), Abstraction(v'), OneSpec.IncrementStep());
}
}
}
}
| module OneSpec {
datatype Variables = Variables(value: int)
predicate Init(v: Variables)
{
v.value == 0
}
predicate IncrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value + 1
}
predicate DecrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value - 1
}
datatype Step =
| IncrementStep()
| DecrementStep()
predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step
case IncrementStep() => IncrementOp(v, v')
case DecrementStep() => DecrementOp(v, v')
}
predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
}
module OneProtocol {
datatype Variables = Variables(value: int)
predicate Init(v: Variables)
{
v.value == 0
}
predicate IncrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value - 1
}
predicate DecrementOp(v: Variables, v': Variables)
{
&& v'.value == v.value + 1
}
datatype Step =
| IncrementStep()
| DecrementStep()
predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step
case IncrementStep() => IncrementOp(v, v')
case DecrementStep() => DecrementOp(v, v')
}
predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
}
module RefinementProof {
import OneSpec
import opened OneProtocol
function Abstraction(v: Variables) : OneSpec.Variables {
OneSpec.Variables(v.value)
}
lemma RefinementInit(v: Variables)
requires Init(v)
ensures OneSpec.Init(Abstraction(v))
{
}
lemma RefinementNext(v: Variables, v': Variables)
requires Next(v, v')
ensures OneSpec.Next(Abstraction(v), Abstraction(v'))
{
var step :| NextStep(v, v', step);
match step {
case IncrementStep() => {
}
case DecrementStep() => {
}
}
}
}
|
746 | specTesting_tmp_tmpueam35lx_examples_max_max.dfy | lemma max(a:int, b:int) returns (m:int)
ensures m >= a
ensures m >= b
ensures m == a || m == b
{
if (a > b) {
m := a;
} else {
m := b;
}
}
predicate post_max(a:int, b:int, m:int)
{
&& m >= a
&& m >= b
&& (m == a || m == b)
}
// to check if it is functioning: postcondition not too accommodating
// the case it will refuse
lemma post_max_point_1(a:int, b:int, m:int)
requires a > b
requires m != a
ensures !post_max(a, b, m)
{}
// an equivalent way of doing so
lemma post_max_point_1'(a:int, b:int, m:int)
requires a > b
requires post_max(a, b, m)
ensures m == a
{}
lemma post_max_point_2(a:int, b:int, m:int)
requires a == b
requires m != a || m != b
ensures !post_max(a, b, m)
{}
lemma post_max_point_3(a:int, b:int, m:int)
requires a < b
requires m != b
ensures !post_max(a, b, m)
{}
lemma post_max_vertical_1(a:int, b:int, m:int)
requires m != a && m != b
ensures !post_max(a, b, m)
{}
lemma post_max_vertical_1'(a:int, b:int, m:int)
requires post_max(a, b, m)
ensures m == a || m == b
{}
// to check if it is implementable
lemma post_max_realistic_1(a:int, b:int, m:int)
requires a > b
requires m == a
ensures post_max(a, b, m)
{}
lemma post_max_realistic_2(a:int, b:int, m:int)
requires a < b
requires m == b
ensures post_max(a, b, m)
{}
lemma post_max_realistic_3(a:int, b:int, m:int)
requires a == b
requires m == a
ensures post_max(a, b, m)
{}
// this form is more natural
lemma max_deterministic(a:int, b:int, m:int, m':int)
// should include preconditions if applicable
requires post_max(a, b, m)
requires post_max(a, b, m')
ensures m == m'
{}
lemma max_deterministic'(a:int, b:int, m:int, m':int)
requires m != m'
ensures !post_max(a, b, m) || !post_max(a, b, m')
{}
lemma lemmaInvTheProposerOfAnyValidBlockInAnHonestBlockchailnIsInTheSetOfValidatorsHelper6Helper<T>(
s: seq<int>,
b: int,
i: nat
)
requires |s| > i
requires b == s[i]
ensures s[..i] + [b] == s[..i+1]
{ }
lemma multisetEquality(m1:multiset<int>, m2:multiset<int>, m3:multiset<int>, m4:multiset<int>)
requires m1 > m2 + m3
requires m1 == m2 + m4
ensures m3 < m4
{
assert m3 < m1 - m2;
}
| lemma max(a:int, b:int) returns (m:int)
ensures m >= a
ensures m >= b
ensures m == a || m == b
{
if (a > b) {
m := a;
} else {
m := b;
}
}
predicate post_max(a:int, b:int, m:int)
{
&& m >= a
&& m >= b
&& (m == a || m == b)
}
// to check if it is functioning: postcondition not too accommodating
// the case it will refuse
lemma post_max_point_1(a:int, b:int, m:int)
requires a > b
requires m != a
ensures !post_max(a, b, m)
{}
// an equivalent way of doing so
lemma post_max_point_1'(a:int, b:int, m:int)
requires a > b
requires post_max(a, b, m)
ensures m == a
{}
lemma post_max_point_2(a:int, b:int, m:int)
requires a == b
requires m != a || m != b
ensures !post_max(a, b, m)
{}
lemma post_max_point_3(a:int, b:int, m:int)
requires a < b
requires m != b
ensures !post_max(a, b, m)
{}
lemma post_max_vertical_1(a:int, b:int, m:int)
requires m != a && m != b
ensures !post_max(a, b, m)
{}
lemma post_max_vertical_1'(a:int, b:int, m:int)
requires post_max(a, b, m)
ensures m == a || m == b
{}
// to check if it is implementable
lemma post_max_realistic_1(a:int, b:int, m:int)
requires a > b
requires m == a
ensures post_max(a, b, m)
{}
lemma post_max_realistic_2(a:int, b:int, m:int)
requires a < b
requires m == b
ensures post_max(a, b, m)
{}
lemma post_max_realistic_3(a:int, b:int, m:int)
requires a == b
requires m == a
ensures post_max(a, b, m)
{}
// this form is more natural
lemma max_deterministic(a:int, b:int, m:int, m':int)
// should include preconditions if applicable
requires post_max(a, b, m)
requires post_max(a, b, m')
ensures m == m'
{}
lemma max_deterministic'(a:int, b:int, m:int, m':int)
requires m != m'
ensures !post_max(a, b, m) || !post_max(a, b, m')
{}
lemma lemmaInvTheProposerOfAnyValidBlockInAnHonestBlockchailnIsInTheSetOfValidatorsHelper6Helper<T>(
s: seq<int>,
b: int,
i: nat
)
requires |s| > i
requires b == s[i]
ensures s[..i] + [b] == s[..i+1]
{ }
lemma multisetEquality(m1:multiset<int>, m2:multiset<int>, m3:multiset<int>, m4:multiset<int>)
requires m1 > m2 + m3
requires m1 == m2 + m4
ensures m3 < m4
{
}
|
747 | specTesting_tmp_tmpueam35lx_examples_sort_sort.dfy | method quickSort(intSeq:array<int>)
modifies intSeq
ensures forall i:nat, j:nat | 0 <= i <= j < intSeq.Length :: intSeq[i] <= intSeq[j]
// ensures multiset(intSeq[..]) == multiset(old(intSeq[..]))
lemma sort(prevSeq:seq<int>) returns (curSeq:seq<int>)
ensures (forall i:nat, j:nat | 0 <= i <= j < |curSeq| :: curSeq[i] <= curSeq[j])
ensures multiset(prevSeq) == multiset(curSeq)
predicate post_sort(prevSeq:seq<int>, curSeq:seq<int>)
{
&& (forall i:nat, j:nat | 0 <= i <= j < |curSeq| :: curSeq[i] <= curSeq[j])
&& multiset(prevSeq) == multiset(curSeq)
}
lemma multisetAdditivity(m1:multiset<int>, m2:multiset<int>, m3:multiset<int>, m4:multiset<int>)
requires m1 == m2 + m3
requires m1 == m2 + m4
ensures m3 == m4
{
assert m3 == m1 - m2;
assert m4 == m1 - m2;
}
lemma twoSortedSequencesWithSameElementsAreEqual(s1:seq<int>, s2:seq<int>)
requires (forall i:nat, j:nat | 0 <= i <= j < |s1| :: s1[i] <= s1[j])
requires (forall i:nat, j:nat | 0 <= i <= j < |s2| :: s2[i] <= s2[j])
requires multiset(s1) == multiset(s2)
requires |s1| == |s2|
ensures s1 == s2
{
if (|s1| != 0) {
if s1[|s1|-1] == s2[|s2|-1] {
assert multiset(s1[..|s1|-1]) == multiset(s2[..|s2|-1]) by {
assert s1 == s1[..|s1|-1] + [s1[|s1|-1]];
assert multiset(s1) == multiset(s1[..|s1|-1]) + multiset([s1[|s1|-1]]);
assert s2 == s2[..|s1|-1] + [s2[|s1|-1]];
assert multiset(s2) == multiset(s2[..|s1|-1]) + multiset([s2[|s1|-1]]);
assert multiset([s1[|s1|-1]]) == multiset([s2[|s1|-1]]);
multisetAdditivity(multiset(s1), multiset([s1[|s1|-1]]), multiset(s1[..|s1|-1]), multiset(s2[..|s1|-1]));
}
twoSortedSequencesWithSameElementsAreEqual(s1[..|s1|-1], s2[..|s2|-1]);
} else if s1[|s1|-1] < s2[|s2|-1] {
assert s2[|s2|-1] !in multiset(s1);
assert false;
} else {
assert s1[|s1|-1] !in multiset(s2);
assert false;
}
}
}
lemma sort_determinisitc(prevSeq:seq<int>, curSeq:seq<int>, curSeq':seq<int>)
requires post_sort(prevSeq, curSeq)
requires post_sort(prevSeq, curSeq')
ensures curSeq == curSeq'
{
if (|curSeq| != |curSeq'|) {
assert |multiset(curSeq)| != |multiset(curSeq')|;
} else {
twoSortedSequencesWithSameElementsAreEqual(curSeq, curSeq');
}
}
lemma sort_determinisitc1(prevSeq:seq<int>, curSeq:seq<int>, curSeq':seq<int>)
requires prevSeq == [5,4,3,2,1]
requires post_sort(prevSeq, curSeq)
requires post_sort(prevSeq, curSeq')
ensures curSeq == curSeq'
{
}
| method quickSort(intSeq:array<int>)
modifies intSeq
ensures forall i:nat, j:nat | 0 <= i <= j < intSeq.Length :: intSeq[i] <= intSeq[j]
// ensures multiset(intSeq[..]) == multiset(old(intSeq[..]))
lemma sort(prevSeq:seq<int>) returns (curSeq:seq<int>)
ensures (forall i:nat, j:nat | 0 <= i <= j < |curSeq| :: curSeq[i] <= curSeq[j])
ensures multiset(prevSeq) == multiset(curSeq)
predicate post_sort(prevSeq:seq<int>, curSeq:seq<int>)
{
&& (forall i:nat, j:nat | 0 <= i <= j < |curSeq| :: curSeq[i] <= curSeq[j])
&& multiset(prevSeq) == multiset(curSeq)
}
lemma multisetAdditivity(m1:multiset<int>, m2:multiset<int>, m3:multiset<int>, m4:multiset<int>)
requires m1 == m2 + m3
requires m1 == m2 + m4
ensures m3 == m4
{
}
lemma twoSortedSequencesWithSameElementsAreEqual(s1:seq<int>, s2:seq<int>)
requires (forall i:nat, j:nat | 0 <= i <= j < |s1| :: s1[i] <= s1[j])
requires (forall i:nat, j:nat | 0 <= i <= j < |s2| :: s2[i] <= s2[j])
requires multiset(s1) == multiset(s2)
requires |s1| == |s2|
ensures s1 == s2
{
if (|s1| != 0) {
if s1[|s1|-1] == s2[|s2|-1] {
multisetAdditivity(multiset(s1), multiset([s1[|s1|-1]]), multiset(s1[..|s1|-1]), multiset(s2[..|s1|-1]));
}
twoSortedSequencesWithSameElementsAreEqual(s1[..|s1|-1], s2[..|s2|-1]);
} else if s1[|s1|-1] < s2[|s2|-1] {
} else {
}
}
}
lemma sort_determinisitc(prevSeq:seq<int>, curSeq:seq<int>, curSeq':seq<int>)
requires post_sort(prevSeq, curSeq)
requires post_sort(prevSeq, curSeq')
ensures curSeq == curSeq'
{
if (|curSeq| != |curSeq'|) {
} else {
twoSortedSequencesWithSameElementsAreEqual(curSeq, curSeq');
}
}
lemma sort_determinisitc1(prevSeq:seq<int>, curSeq:seq<int>, curSeq':seq<int>)
requires prevSeq == [5,4,3,2,1]
requires post_sort(prevSeq, curSeq)
requires post_sort(prevSeq, curSeq')
ensures curSeq == curSeq'
{
}
|
748 | stunning-palm-tree_tmp_tmpr84c2iwh_ch1.dfy | // Ex 1.3
method Triple (x: int) returns (r: int)
ensures r == 3*x {
var y := 2*x;
r := y + x;
assert r == 3*x;
}
method Caller() {
var t := Triple(18);
assert t < 100;
}
// Ex 1.6
method MinUnderSpec (x: int, y: int) returns (r: int)
ensures r <= x && r <= y {
if x <= y {
r := x - 1;
} else {
r := y - 1;
}
}
method Min (x: int, y: int) returns (r: int)
ensures r <= x && r <= y
ensures r == x || r == y {
if x <= y {
r := x;
} else {
r := y;
}
}
// Ex 1.7
method MaxSum (x: int, y: int) returns (s:int, m: int)
ensures s == x + y
ensures x <= m && y <= m
ensures m == x || m == y
// look ma, no implementation!
method MaxSumCaller() {
var s, m := MaxSum(1928, 1);
assert s == 1929;
assert m == 1928;
}
// Ex 1.8
method ReconstructFromMaxSum (s: int, m: int ) returns (x: int, y: int)
// requires (0 < s && s / 2 < m && m < s)
requires s - m <= m
ensures s == x + y
ensures (m == y || m == x) && x <= m && y <= m
{
x := m;
y := s - m;
}
method TestMaxSum(x: int, y: int)
// requires x > 0 && y > 0 && x != y
{
var s, m := MaxSum(x, y);
var xx, yy := ReconstructFromMaxSum(s, m);
assert (xx == x && yy == y) || (xx == y && yy == x);
}
// Ex 1.9
function Average (a: int, b: int): int {
(a + b) / 2
}
method Triple'(x: int) returns (r: int)
// spec 1: ensures Average(r, 3*x) == 3*x
ensures Average(2*r, 6*x) == 6*x
{
// r := x + x + x + 1; // does not meet spec of Triple, but does spec 1
r := x + x + x;
}
| // Ex 1.3
method Triple (x: int) returns (r: int)
ensures r == 3*x {
var y := 2*x;
r := y + x;
}
method Caller() {
var t := Triple(18);
}
// Ex 1.6
method MinUnderSpec (x: int, y: int) returns (r: int)
ensures r <= x && r <= y {
if x <= y {
r := x - 1;
} else {
r := y - 1;
}
}
method Min (x: int, y: int) returns (r: int)
ensures r <= x && r <= y
ensures r == x || r == y {
if x <= y {
r := x;
} else {
r := y;
}
}
// Ex 1.7
method MaxSum (x: int, y: int) returns (s:int, m: int)
ensures s == x + y
ensures x <= m && y <= m
ensures m == x || m == y
// look ma, no implementation!
method MaxSumCaller() {
var s, m := MaxSum(1928, 1);
}
// Ex 1.8
method ReconstructFromMaxSum (s: int, m: int ) returns (x: int, y: int)
// requires (0 < s && s / 2 < m && m < s)
requires s - m <= m
ensures s == x + y
ensures (m == y || m == x) && x <= m && y <= m
{
x := m;
y := s - m;
}
method TestMaxSum(x: int, y: int)
// requires x > 0 && y > 0 && x != y
{
var s, m := MaxSum(x, y);
var xx, yy := ReconstructFromMaxSum(s, m);
}
// Ex 1.9
function Average (a: int, b: int): int {
(a + b) / 2
}
method Triple'(x: int) returns (r: int)
// spec 1: ensures Average(r, 3*x) == 3*x
ensures Average(2*r, 6*x) == 6*x
{
// r := x + x + x + 1; // does not meet spec of Triple, but does spec 1
r := x + x + x;
}
|
749 | stunning-palm-tree_tmp_tmpr84c2iwh_ch10.dfy | // Ch. 10: Datatype Invariants
module PQueue {
export
// Impl
provides PQueue
provides Empty, IsEmpty, Insert, RemoveMin
// Spec
provides Valid, Elements, EmptyCorrect, IsEmptyCorrect
provides InsertCorrect, RemoveMinCorrect
reveals IsMin
// Implementation
type PQueue = BraunTree
datatype BraunTree =
| Leaf
| Node(x: int, left: BraunTree, right: BraunTree)
function Empty(): PQueue {
Leaf
}
predicate IsEmpty(pq: PQueue) {
pq == Leaf
}
function Insert(pq: PQueue, y: int): PQueue {
match pq
case Leaf => Node(y, Leaf, Leaf)
case Node(x, left, right) =>
if y < x then
Node(y, Insert(right ,x), left)
else
Node(x, Insert(right, y), left)
}
function RemoveMin(pq: PQueue): (int, PQueue)
requires Valid(pq) && !IsEmpty(pq)
{
var Node(x, left, right) := pq;
(x, DeleteMin(pq))
}
function DeleteMin(pq: PQueue): PQueue
requires IsBalanced(pq) && !IsEmpty(pq)
{
// Ex. 10.4: by the IsBalanced property, pq.left is always as large or one node larger
// than pq.right. Thus pq.left.Leaf? ==> pq.right.leaf?
if pq.right.Leaf? then
pq.left
else if pq.left.x <= pq.right.x then
Node(pq.left.x, pq.right, DeleteMin(pq.left))
else
Node(pq.right.x, ReplaceRoot(pq.right, pq.left.x), DeleteMin(pq.left))
}
function ReplaceRoot(pq: PQueue, r: int): PQueue
requires !IsEmpty(pq)
{
// left is empty or r is smaller than either sub-root
if pq.left.Leaf? ||
(r <= pq.left.x && (pq.right.Leaf? || r <= pq.right.x))
then
// simply replace the root
Node(r, pq.left, pq.right)
// right is empty, left has one element
else if pq.right.Leaf? then
Node(pq.left.x, Node(r, Leaf, Leaf), Leaf)
// both left/right are non-empty and `r` needs to be inserted deeper in the sub-trees
else if pq.left.x < pq.right.x then
// promote left root
Node(pq.left.x, ReplaceRoot(pq.left, r), pq.right)
else
// promote right root
Node(pq.right.x, pq.left, ReplaceRoot(pq.right, r))
}
//////////////////////////////////////////////////////////////
// Specification exposed to callers
//////////////////////////////////////////////////////////////
ghost function Elements(pq: PQueue): multiset<int> {
match pq
case Leaf => multiset{}
case Node(x, left, right) =>
multiset{x} + Elements(left) + Elements(right)
}
ghost predicate Valid(pq: PQueue) {
IsBinaryHeap(pq) && IsBalanced(pq)
}
//////////////////////////////////////////////////////////////
// Lemmas
//////////////////////////////////////////////////////////////
ghost predicate IsBinaryHeap(pq: PQueue) {
match pq
case Leaf => true
case Node(x, left, right) =>
IsBinaryHeap(left) && IsBinaryHeap(right) &&
(left.Leaf? || x <= left.x) &&
(right.Leaf? || x <= right.x)
}
ghost predicate IsBalanced(pq: PQueue) {
match pq
case Leaf => true
case Node(_, left, right) =>
IsBalanced(left) && IsBalanced(right) &&
var L, R := |Elements(left)|, |Elements(right)|;
L == R || L == R + 1
}
// Ex. 10.2
lemma {:induction false} BinaryHeapStoresMin(pq: PQueue, y: int)
requires IsBinaryHeap(pq) && y in Elements(pq)
ensures pq.x <= y
{
if pq.Node? {
assert y in Elements(pq) ==> (y == pq.x
|| y in Elements(pq.left)
|| y in Elements(pq.right));
if y == pq.x {
assert pq.x <= y;
} else if y in Elements(pq.left) {
assert pq.x <= pq.left.x;
BinaryHeapStoresMin(pq.left, y);
assert pq.x <= y;
} else if y in Elements(pq.right) {
assert pq.x <= pq.right.x;
BinaryHeapStoresMin(pq.right, y);
assert pq.x <= y;
}
}
}
lemma EmptyCorrect()
ensures Valid(Empty()) && Elements(Empty()) == multiset{}
{ // unfold Empty()
}
lemma IsEmptyCorrect(pq: PQueue)
requires Valid(pq)
ensures IsEmpty(pq) <==> Elements(pq) == multiset{}
{
if Elements(pq) == multiset{} {
assert pq.Leaf?;
}
}
lemma InsertCorrect(pq: PQueue, y: int)
requires Valid(pq)
ensures var pq' := Insert(pq, y);
Valid(pq') && Elements(Insert(pq, y)) == Elements(pq) + multiset{y}
{}
lemma RemoveMinCorrect(pq: PQueue)
requires Valid(pq)
requires !IsEmpty(pq)
ensures var (y, pq') := RemoveMin(pq);
Elements(pq) == Elements(pq') + multiset{y} &&
IsMin(y, Elements(pq)) &&
Valid(pq')
{
DeleteMinCorrect(pq);
}
lemma {:induction false} {:rlimit 1000} {:vcs_split_on_every_assert} DeleteMinCorrect(pq: PQueue)
requires Valid(pq) && !IsEmpty(pq)
ensures var pq' := DeleteMin(pq);
Valid(pq') &&
Elements(pq') + multiset{pq.x} == Elements(pq) &&
|Elements(pq')| == |Elements(pq)| - 1
{
if pq.left.Leaf? || pq.right.Leaf? {}
else if pq.left.x <= pq.right.x {
DeleteMinCorrect(pq.left);
} else {
var left, right := ReplaceRoot(pq.right, pq.left.x), DeleteMin(pq.left);
var pq' := Node(pq.right.x, left, right);
assert pq' == DeleteMin(pq);
// Elements post-condition
calc {
Elements(pq') + multiset{pq.x};
== // defn Elements
(multiset{pq.right.x} + Elements(left) + Elements(right)) + multiset{pq.x};
== // multiset left assoc
((multiset{pq.right.x} + Elements(left)) + Elements(right)) + multiset{pq.x};
== { ReplaceRootCorrect(pq.right, pq.left.x);
assert multiset{pq.right.x} + Elements(left) == Elements(pq.right) + multiset{pq.left.x}; }
((Elements(pq.right) + multiset{pq.left.x}) + Elements(right)) + multiset{pq.x};
== // defn right
((Elements(pq.right) + multiset{pq.left.x}) + Elements(DeleteMin(pq.left))) + multiset{pq.x};
== // multiset right assoc
(Elements(pq.right) + (multiset{pq.left.x} + Elements(DeleteMin(pq.left)))) + multiset{pq.x};
== { DeleteMinCorrect(pq.left);
assert multiset{pq.left.x} + Elements(DeleteMin(pq.left)) == Elements(pq.left); }
(Elements(pq.right) + (Elements(pq.left))) + multiset{pq.x};
==
multiset{pq.x} + Elements(pq.right) + (Elements(pq.left));
==
Elements(pq);
}
// Validity
// Prove IsBinaryHeap(pq')
// IsBinaryHeap(left) && IsBinaryHeap(right) &&
DeleteMinCorrect(pq.left);
assert Valid(right);
ReplaceRootCorrect(pq.right, pq.left.x);
assert Valid(left);
// (left.Leaf? || x <= left.x) &&
assert pq.left.x in Elements(left);
assert pq.right.x <= pq.left.x;
BinaryHeapStoresMin(pq.left, pq.left.x);
BinaryHeapStoresMin(pq.right, pq.right.x);
assert pq.right.x <= left.x;
// (right.Leaf? || x <= right.x)
assert right.Leaf? || pq.right.x <= right.x;
assert IsBinaryHeap(pq');
}
}
lemma {:induction false} {:rlimit 1000} {:vcs_split_on_every_assert} ReplaceRootCorrect(pq: PQueue, r: int)
requires Valid(pq) && !IsEmpty(pq)
ensures var pq' := ReplaceRoot(pq, r);
Valid(pq') &&
r in Elements(pq') &&
|Elements(pq')| == |Elements(pq)| &&
Elements(pq) + multiset{r} == Elements(pq') + multiset{pq.x}
{
var pq' := ReplaceRoot(pq, r);
// Element post-condition
var left, right := pq'.left, pq'.right;
if pq.left.Leaf? ||
(r <= pq.left.x && (pq.right.Leaf? || r <= pq.right.x))
{
// simply replace the root
assert Valid(pq');
assert |Elements(pq')| == |Elements(pq)|;
}
else if pq.right.Leaf? {
// both left/right are non-empty and `r` needs to be inserted deeper in the sub-trees
}
else if pq.left.x < pq.right.x {
// promote left root
assert pq.left.Node? && pq.right.Node?;
assert pq.left.x < r || pq.right.x < r;
assert pq' == Node(pq.left.x, ReplaceRoot(pq.left, r), pq.right);
ReplaceRootCorrect(pq.left, r);
assert Valid(pq');
calc {
Elements(pq') + multiset{pq.x};
==
(multiset{pq.left.x} + Elements(ReplaceRoot(pq.left, r)) + Elements(pq.right)) + multiset{pq.x};
== { ReplaceRootCorrect(pq.left, r); }
(Elements(pq.left) + multiset{r}) + Elements(pq.right) + multiset{pq.x};
==
Elements(pq) + multiset{r};
}
}
else {
// promote right root
assert pq' == Node(pq.right.x, pq.left, ReplaceRoot(pq.right, r));
ReplaceRootCorrect(pq.right, r);
assert Valid(pq');
calc {
Elements(pq') + multiset{pq.x};
== // defn
(multiset{pq.right.x} + Elements(pq.left) + Elements(ReplaceRoot(pq.right, r))) + multiset{pq.x};
== // assoc
(Elements(pq.left) + (Elements(ReplaceRoot(pq.right, r)) + multiset{pq.right.x})) + multiset{pq.x};
== { ReplaceRootCorrect(pq.right, r); }
(Elements(pq.left) + multiset{r} + Elements(pq.right)) + multiset{pq.x};
==
Elements(pq) + multiset{r};
}
}
}
ghost predicate IsMin(y: int, s: multiset<int>) {
y in s && forall x :: x in s ==> y <= x
}
}
// Ex 10.0, 10.1
module PQueueClient {
import PQ = PQueue
method Client() {
var pq := PQ.Empty();
PQ.EmptyCorrect();
assert PQ.Elements(pq) == multiset{};
assert PQ.Valid(pq);
PQ.InsertCorrect(pq, 1);
var pq1 := PQ.Insert(pq, 1);
assert 1 in PQ.Elements(pq1);
PQ.InsertCorrect(pq1, 2);
var pq2 := PQ.Insert(pq1, 2);
assert 2 in PQ.Elements(pq2);
PQ.IsEmptyCorrect(pq2);
PQ.RemoveMinCorrect(pq2);
var (m, pq3) := PQ.RemoveMin(pq2);
PQ.IsEmptyCorrect(pq3);
PQ.RemoveMinCorrect(pq3);
var (n, pq4) := PQ.RemoveMin(pq3);
PQ.IsEmptyCorrect(pq4);
assert PQ.IsEmpty(pq4);
assert m <= n;
}
}
| // Ch. 10: Datatype Invariants
module PQueue {
export
// Impl
provides PQueue
provides Empty, IsEmpty, Insert, RemoveMin
// Spec
provides Valid, Elements, EmptyCorrect, IsEmptyCorrect
provides InsertCorrect, RemoveMinCorrect
reveals IsMin
// Implementation
type PQueue = BraunTree
datatype BraunTree =
| Leaf
| Node(x: int, left: BraunTree, right: BraunTree)
function Empty(): PQueue {
Leaf
}
predicate IsEmpty(pq: PQueue) {
pq == Leaf
}
function Insert(pq: PQueue, y: int): PQueue {
match pq
case Leaf => Node(y, Leaf, Leaf)
case Node(x, left, right) =>
if y < x then
Node(y, Insert(right ,x), left)
else
Node(x, Insert(right, y), left)
}
function RemoveMin(pq: PQueue): (int, PQueue)
requires Valid(pq) && !IsEmpty(pq)
{
var Node(x, left, right) := pq;
(x, DeleteMin(pq))
}
function DeleteMin(pq: PQueue): PQueue
requires IsBalanced(pq) && !IsEmpty(pq)
{
// Ex. 10.4: by the IsBalanced property, pq.left is always as large or one node larger
// than pq.right. Thus pq.left.Leaf? ==> pq.right.leaf?
if pq.right.Leaf? then
pq.left
else if pq.left.x <= pq.right.x then
Node(pq.left.x, pq.right, DeleteMin(pq.left))
else
Node(pq.right.x, ReplaceRoot(pq.right, pq.left.x), DeleteMin(pq.left))
}
function ReplaceRoot(pq: PQueue, r: int): PQueue
requires !IsEmpty(pq)
{
// left is empty or r is smaller than either sub-root
if pq.left.Leaf? ||
(r <= pq.left.x && (pq.right.Leaf? || r <= pq.right.x))
then
// simply replace the root
Node(r, pq.left, pq.right)
// right is empty, left has one element
else if pq.right.Leaf? then
Node(pq.left.x, Node(r, Leaf, Leaf), Leaf)
// both left/right are non-empty and `r` needs to be inserted deeper in the sub-trees
else if pq.left.x < pq.right.x then
// promote left root
Node(pq.left.x, ReplaceRoot(pq.left, r), pq.right)
else
// promote right root
Node(pq.right.x, pq.left, ReplaceRoot(pq.right, r))
}
//////////////////////////////////////////////////////////////
// Specification exposed to callers
//////////////////////////////////////////////////////////////
ghost function Elements(pq: PQueue): multiset<int> {
match pq
case Leaf => multiset{}
case Node(x, left, right) =>
multiset{x} + Elements(left) + Elements(right)
}
ghost predicate Valid(pq: PQueue) {
IsBinaryHeap(pq) && IsBalanced(pq)
}
//////////////////////////////////////////////////////////////
// Lemmas
//////////////////////////////////////////////////////////////
ghost predicate IsBinaryHeap(pq: PQueue) {
match pq
case Leaf => true
case Node(x, left, right) =>
IsBinaryHeap(left) && IsBinaryHeap(right) &&
(left.Leaf? || x <= left.x) &&
(right.Leaf? || x <= right.x)
}
ghost predicate IsBalanced(pq: PQueue) {
match pq
case Leaf => true
case Node(_, left, right) =>
IsBalanced(left) && IsBalanced(right) &&
var L, R := |Elements(left)|, |Elements(right)|;
L == R || L == R + 1
}
// Ex. 10.2
lemma {:induction false} BinaryHeapStoresMin(pq: PQueue, y: int)
requires IsBinaryHeap(pq) && y in Elements(pq)
ensures pq.x <= y
{
if pq.Node? {
|| y in Elements(pq.left)
|| y in Elements(pq.right));
if y == pq.x {
} else if y in Elements(pq.left) {
BinaryHeapStoresMin(pq.left, y);
} else if y in Elements(pq.right) {
BinaryHeapStoresMin(pq.right, y);
}
}
}
lemma EmptyCorrect()
ensures Valid(Empty()) && Elements(Empty()) == multiset{}
{ // unfold Empty()
}
lemma IsEmptyCorrect(pq: PQueue)
requires Valid(pq)
ensures IsEmpty(pq) <==> Elements(pq) == multiset{}
{
if Elements(pq) == multiset{} {
}
}
lemma InsertCorrect(pq: PQueue, y: int)
requires Valid(pq)
ensures var pq' := Insert(pq, y);
Valid(pq') && Elements(Insert(pq, y)) == Elements(pq) + multiset{y}
{}
lemma RemoveMinCorrect(pq: PQueue)
requires Valid(pq)
requires !IsEmpty(pq)
ensures var (y, pq') := RemoveMin(pq);
Elements(pq) == Elements(pq') + multiset{y} &&
IsMin(y, Elements(pq)) &&
Valid(pq')
{
DeleteMinCorrect(pq);
}
lemma {:induction false} {:rlimit 1000} {:vcs_split_on_every_assert} DeleteMinCorrect(pq: PQueue)
requires Valid(pq) && !IsEmpty(pq)
ensures var pq' := DeleteMin(pq);
Valid(pq') &&
Elements(pq') + multiset{pq.x} == Elements(pq) &&
|Elements(pq')| == |Elements(pq)| - 1
{
if pq.left.Leaf? || pq.right.Leaf? {}
else if pq.left.x <= pq.right.x {
DeleteMinCorrect(pq.left);
} else {
var left, right := ReplaceRoot(pq.right, pq.left.x), DeleteMin(pq.left);
var pq' := Node(pq.right.x, left, right);
// Elements post-condition
calc {
Elements(pq') + multiset{pq.x};
== // defn Elements
(multiset{pq.right.x} + Elements(left) + Elements(right)) + multiset{pq.x};
== // multiset left assoc
((multiset{pq.right.x} + Elements(left)) + Elements(right)) + multiset{pq.x};
== { ReplaceRootCorrect(pq.right, pq.left.x);
((Elements(pq.right) + multiset{pq.left.x}) + Elements(right)) + multiset{pq.x};
== // defn right
((Elements(pq.right) + multiset{pq.left.x}) + Elements(DeleteMin(pq.left))) + multiset{pq.x};
== // multiset right assoc
(Elements(pq.right) + (multiset{pq.left.x} + Elements(DeleteMin(pq.left)))) + multiset{pq.x};
== { DeleteMinCorrect(pq.left);
(Elements(pq.right) + (Elements(pq.left))) + multiset{pq.x};
==
multiset{pq.x} + Elements(pq.right) + (Elements(pq.left));
==
Elements(pq);
}
// Validity
// Prove IsBinaryHeap(pq')
// IsBinaryHeap(left) && IsBinaryHeap(right) &&
DeleteMinCorrect(pq.left);
ReplaceRootCorrect(pq.right, pq.left.x);
// (left.Leaf? || x <= left.x) &&
BinaryHeapStoresMin(pq.left, pq.left.x);
BinaryHeapStoresMin(pq.right, pq.right.x);
// (right.Leaf? || x <= right.x)
}
}
lemma {:induction false} {:rlimit 1000} {:vcs_split_on_every_assert} ReplaceRootCorrect(pq: PQueue, r: int)
requires Valid(pq) && !IsEmpty(pq)
ensures var pq' := ReplaceRoot(pq, r);
Valid(pq') &&
r in Elements(pq') &&
|Elements(pq')| == |Elements(pq)| &&
Elements(pq) + multiset{r} == Elements(pq') + multiset{pq.x}
{
var pq' := ReplaceRoot(pq, r);
// Element post-condition
var left, right := pq'.left, pq'.right;
if pq.left.Leaf? ||
(r <= pq.left.x && (pq.right.Leaf? || r <= pq.right.x))
{
// simply replace the root
}
else if pq.right.Leaf? {
// both left/right are non-empty and `r` needs to be inserted deeper in the sub-trees
}
else if pq.left.x < pq.right.x {
// promote left root
ReplaceRootCorrect(pq.left, r);
calc {
Elements(pq') + multiset{pq.x};
==
(multiset{pq.left.x} + Elements(ReplaceRoot(pq.left, r)) + Elements(pq.right)) + multiset{pq.x};
== { ReplaceRootCorrect(pq.left, r); }
(Elements(pq.left) + multiset{r}) + Elements(pq.right) + multiset{pq.x};
==
Elements(pq) + multiset{r};
}
}
else {
// promote right root
ReplaceRootCorrect(pq.right, r);
calc {
Elements(pq') + multiset{pq.x};
== // defn
(multiset{pq.right.x} + Elements(pq.left) + Elements(ReplaceRoot(pq.right, r))) + multiset{pq.x};
== // assoc
(Elements(pq.left) + (Elements(ReplaceRoot(pq.right, r)) + multiset{pq.right.x})) + multiset{pq.x};
== { ReplaceRootCorrect(pq.right, r); }
(Elements(pq.left) + multiset{r} + Elements(pq.right)) + multiset{pq.x};
==
Elements(pq) + multiset{r};
}
}
}
ghost predicate IsMin(y: int, s: multiset<int>) {
y in s && forall x :: x in s ==> y <= x
}
}
// Ex 10.0, 10.1
module PQueueClient {
import PQ = PQueue
method Client() {
var pq := PQ.Empty();
PQ.EmptyCorrect();
PQ.InsertCorrect(pq, 1);
var pq1 := PQ.Insert(pq, 1);
PQ.InsertCorrect(pq1, 2);
var pq2 := PQ.Insert(pq1, 2);
PQ.IsEmptyCorrect(pq2);
PQ.RemoveMinCorrect(pq2);
var (m, pq3) := PQ.RemoveMin(pq2);
PQ.IsEmptyCorrect(pq3);
PQ.RemoveMinCorrect(pq3);
var (n, pq4) := PQ.RemoveMin(pq3);
PQ.IsEmptyCorrect(pq4);
}
}
|
750 | stunning-palm-tree_tmp_tmpr84c2iwh_ch5.dfy | function More(x: int): int {
if x <= 0 then 1 else More(x - 2) + 3
}
lemma {:induction false} Increasing(x: int)
ensures x < More(x)
{
if x <= 0 {}
else {
// x < More(x) <=> x < More(x - 2) + 3
// <=> x - 3 < More(x - 2)
// Increasing(x - 2) ==> x - 2 < More(x - 2)
// ==> x - 3 < x - 2 < More(x - 2)
Increasing(x - 2);
}
}
method ExampleLemmaUse(a: int) {
var b := More(a);
Increasing(a);
var c := More(b);
Increasing(b);
assert 2 <= c - a;
}
// Ex 5.0
method ExampleLemmaUse50(a: int) {
Increasing(a);
var b := More(a);
var c := More(b);
if a < 1000 {
Increasing(b);
assert 2 <= c - a;
}
assert a < 200 ==> 2 <= c - a;
}
// Ex 5.1
method ExampleLemmaUse51(a: int) {
Increasing(a);
var b := More(a);
Increasing(b);
b := More(b);
if a < 1000 {
// Increasing(More(a));
assert 2 <= b - a;
}
assert a < 200 ==> 2 <= b - a;
}
// Ex 5.6
function Ack(m: nat, n: nat): nat {
if m == 0 then
n + 1
else if n == 0 then
Ack(m - 1, 1)
else
Ack(m - 1, Ack(m, n - 1))
}
lemma {:induction false} Ack1n(m: nat, n: nat)
requires m == 1
ensures Ack(m, n) == n + 2
{
if n == 0 {
calc {
Ack(m, n);
==
Ack(m - 1, 1);
==
Ack(0, 1);
==
1 + 1;
==
2;
==
n + 2;
}
}
else {
calc {
Ack(m, n);
== // defn
Ack(m - 1, Ack(m, n - 1));
== // subs m := 1
Ack(0, Ack(1, n - 1));
== { Ack1n(1, n - 1); }
Ack(0, (n - 1) + 2);
== // arith
Ack(0, n + 1);
== // arith
(n + 1) + 1;
== // arith
n + 2;
}
}
}
// Ex 5.5
function Reduce(m: nat, x: int): int {
if m == 0 then x else Reduce(m / 2, x + 1) - m
}
lemma {:induction false} ReduceUpperBound(m: nat, x: int)
ensures Reduce(m, x) <= x
{
if m == 0 { // trivial
assert Reduce(0, x) == x;
}
else {
calc {
Reduce(m, x);
== // defn
Reduce(m / 2, x + 1) - m;
<= { ReduceUpperBound(m/2, x+1); }
Reduce(m / 2, x + 1) - m + x + 1 - Reduce(m / 2, x + 1);
== // arith
x - m + 1;
<= { assert m >= 1; }
x;
}
}
}
// 5.5.1
lemma {:induction false} ReduceLowerBound(m: nat, x: int)
ensures x - 2 * m <= Reduce(m, x)
{
if m == 0 { // trivial
assert x - 2 * 0 <= x == Reduce(0, x);
}
else {
calc {
Reduce(m, x);
== // defn
Reduce(m / 2, x + 1) - m;
>= { ReduceLowerBound(m/2, x+1);
assert x + 1 - m <= Reduce(m / 2, x + 1); }
x + 1 - 2 * m;
> // arith
x - 2 * m;
}
}
}
// ------------------------------------------------------------------------------
// ----- Expr Eval --------------------------------------------------------------
// ------------------------------------------------------------------------------
// 5.8.0
datatype Expr = Const(nat)
| Var(string)
| Node(op: Op, args: List<Expr>)
datatype Op = Mul | Add
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function Eval(e: Expr, env: map<string, nat>): nat
{
match e {
case Const(c) => c
case Var(s) => if s in env then env[s] else 0
case Node(op, args) => EvalList(op, args, env)
}
}
// intro'd in 5.8.1
function Unit(op: Op): nat {
match op case Add => 0 case Mul => 1
}
function EvalList(op: Op, args: List<Expr>, env: map<string, nat>): nat
decreases args, op, env
{
match args {
case Nil => Unit(op)
case Cons(e, tail) =>
var v0, v1 := Eval(e, env), EvalList(op, tail, env);
match op
case Add => v0 + v1
case Mul => v0 * v1
}
}
function Substitute(e: Expr, n: string, c: nat): Expr
{
match e
case Const(_) => e
case Var(s) => if s == n then Const(c) else e
case Node(op, args) => Node(op, SubstituteList(args, n, c))
}
function SubstituteList(es: List<Expr>, n: string, c: nat): List<Expr>
{
match es
case Nil => Nil
case Cons(head, tail) => Cons(Substitute(head, n, c), SubstituteList(tail, n, c))
}
lemma {:induction false} EvalSubstituteCorrect(e: Expr, n: string, c: nat, env: map<string, nat>)
ensures Eval(Substitute(e, n, c), env) == Eval(e, env[n := c])
{
match e
case Const(_) => {}
case Var(s) => {
calc {
Eval(Substitute(e, n, c), env);
Eval(if s == n then Const(c) else e, env);
if s == n then Eval(Const(c), env) else Eval(e, env);
if s == n then c else Eval(e, env);
if s == n then c else Eval(e, env[n := c]);
if s == n then Eval(e, env[n := c]) else Eval(e, env[n := c]);
Eval(e, env[n := c]);
}
}
case Node(op, args) => {
EvalSubstituteListCorrect(op, args, n, c, env);
}
}
lemma {:induction false} EvalSubstituteListCorrect(op: Op, args: List<Expr>, n: string, c: nat, env: map<string, nat>)
ensures EvalList(op, SubstituteList(args, n, c), env) == EvalList(op, args, env[n := c])
decreases args, op, n, c, env
{
match args
case Nil => {}
case Cons(head, tail) => {
// Ex 5.15
calc {
EvalList(op, SubstituteList(args, n, c), env);
== // defn SubstituteList
EvalList(op, Cons(Substitute(head, n, c), SubstituteList(tail, n, c)), env);
== // unfold defn EvalList
EvalList(op, Cons(Substitute(head, n, c), SubstituteList(tail, n, c)), env);
==
(match op
case Add => Eval(Substitute(head, n, c), env) + EvalList(op, SubstituteList(tail, n, c), env)
case Mul => Eval(Substitute(head, n, c), env) * EvalList(op, SubstituteList(tail, n, c), env));
== { EvalSubstituteCorrect(head, n, c, env); }
(match op
case Add => Eval(head, env[n := c]) + EvalList(op, SubstituteList(tail, n, c), env)
case Mul => Eval(head, env[n := c]) * EvalList(op, SubstituteList(tail, n, c), env));
== { EvalSubstituteListCorrect(op, tail, n, c, env); }
(match op
case Add => Eval(head, env[n := c]) + EvalList(op, tail, env[n := c])
case Mul => Eval(head, env[n := c]) * EvalList(op, tail, env[n := c]));
== // fold defn Eval/EvalList
EvalList(op, args, env[n := c]);
}
}
}
// Ex 5.16
lemma EvalEnv(e: Expr, n: string, env: map<string, nat>)
requires n in env.Keys
ensures Eval(e, env) == Eval(Substitute(e, n, env[n]), env)
{
match e
case Const(_) => {}
case Var(s) => {}
case Node(op, args) => {
match args
case Nil => {}
case Cons(head, tail) => { EvalEnv(head, n, env); EvalEnvList(op, tail, n, env); }
}
}
lemma EvalEnvList(op: Op, es: List<Expr>, n: string, env: map<string, nat>)
decreases es, op, n, env
requires n in env.Keys
ensures EvalList(op, es, env) == EvalList(op, SubstituteList(es, n, env[n]), env)
{
match es
case Nil => {}
case Cons(head, tail) => { EvalEnv(head, n, env); EvalEnvList(op, tail, n, env); }
}
// Ex 5.17
lemma EvalEnvDefault(e: Expr, n: string, env: map<string, nat>)
requires n !in env.Keys
ensures Eval(e, env) == Eval(Substitute(e, n, 0), env)
{
match e
case Const(_) => {}
case Var(s) => {}
case Node(op, args) => {
calc {
Eval(Substitute(e, n, 0), env);
EvalList(op, SubstituteList(args, n, 0), env);
== { EvalEnvDefaultList(op, args, n, env); }
EvalList(op, args, env);
Eval(e, env);
}
}
}
lemma EvalEnvDefaultList(op: Op, args: List<Expr>, n: string, env: map<string, nat>)
decreases args, op, n, env
requires n !in env.Keys
ensures EvalList(op, args, env) == EvalList(op, SubstituteList(args, n, 0), env)
{
match args
case Nil => {}
case Cons(head, tail) => { EvalEnvDefault(head, n, env); EvalEnvDefaultList(op, tail, n, env); }
}
// Ex 5.18
lemma SubstituteIdempotent(e: Expr, n: string, c: nat)
ensures Substitute(Substitute(e, n, c), n, c) == Substitute(e, n, c)
{
match e
case Const(_) => {}
case Var(_) => {}
case Node(op, args) => { SubstituteListIdempotent(args, n, c); }
}
lemma SubstituteListIdempotent(args: List<Expr>, n: string, c: nat)
ensures SubstituteList(SubstituteList(args, n, c), n, c) == SubstituteList(args, n, c)
{
match args
case Nil => {}
case Cons(head, tail) => { SubstituteIdempotent(head, n, c); SubstituteListIdempotent(tail, n, c); }
}
// 5.8.1
// Optimization is correct
function Optimize(e: Expr): Expr
// intrinsic
// ensures forall env: map<string, nat> :: Eval(Optimize(e), env) == Eval(e, env)
{
if e.Node? then
var args := OptimizeAndFilter(e.args, Unit(e.op));
Shorten(e.op, args)
else
e
}
lemma OptimizeCorrect(e: Expr, env: map<string, nat>)
ensures Eval(Optimize(e), env) == Eval(e, env)
{
if e.Node? {
OptimizeAndFilterCorrect(e.args, e.op, env);
ShortenCorrect(OptimizeAndFilter(e.args, Unit(e.op)), e.op, env);
// calc {
// Eval(Optimize(e), env);
// == // defn Optimize
// Eval(Shorten(e.op, OptimizeAndFilter(e.args, Unit(e.op))), env);
// == { ShortenCorrect(OptimizeAndFilter(e.args, Unit(e.op)), e.op, env); }
// Eval(Node(e.op, OptimizeAndFilter(e.args, Unit(e.op))), env);
// == { OptimizeAndFilterCorrect(e.args, e.op, env); }
// Eval(e, env);
// }
}
}
function OptimizeAndFilter(args: List<Expr>, u: nat): List<Expr>
// intrinsic
// ensures forall op: Op, env: map<string, nat> :: u == Unit(op) ==> Eval(Node(op, OptimizeAndFilter(args, u)), env) == Eval(Node(op, args), env)
{
match args
case Nil => Nil
case Cons(head, tail) =>
var hd, tl := Optimize(head), OptimizeAndFilter(tail, u);
if hd == Const(u) then tl else Cons(hd, tl)
}
lemma OptimizeAndFilterCorrect(args: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(Node(op, OptimizeAndFilter(args, Unit(op))), env) == Eval(Node(op, args), env)
{
match args
case Nil => {}
case Cons(head, tail) => {
OptimizeCorrect(head, env);
OptimizeAndFilterCorrect(tail, op, env);
// var hd, tl := Optimize(head), OptimizeAndFilter(tail, Unit(op));
// var u := Unit(op);
// if hd == Const(u) {
// calc {
// Eval(Node(op, OptimizeAndFilter(args, u)), env);
// ==
// EvalList(op, OptimizeAndFilter(args, u), env);
// == { assert OptimizeAndFilter(args, u) == tl; }
// EvalList(op, tl, env);
// ==
// Eval(Node(op, tl), env);
// == { EvalListUnitHead(hd, tl, op, env); }
// Eval(Node(op, Cons(hd, tl)), env);
// == { OptimizeCorrect(head, env); OptimizeAndFilterCorrect(tail, op, env); }
// Eval(Node(op, args), env);
// }
// } else {
// calc {
// Eval(Node(op, OptimizeAndFilter(args, u)), env);
// ==
// EvalList(op, OptimizeAndFilter(args, u), env);
// == { assert OptimizeAndFilter(args, u) == Cons(hd, tl); }
// EvalList(op, Cons(hd, tl), env);
// ==
// Eval(Node(op, Cons(hd, tl)), env);
// == { OptimizeCorrect(head, env); OptimizeAndFilterCorrect(tail, op, env); }
// Eval(Node(op, args), env);
// }
// }
}
}
lemma EvalListUnitHead(head: Expr, tail: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(head, env) == Unit(op) ==> EvalList(op, Cons(head, tail), env) == EvalList(op, tail, env)
{
// Note: verifier can prove the whole lemma with empty body!
var ehead, etail := Eval(head, env), EvalList(op, tail, env);
if ehead == Unit(op) {
match op
case Add => {
calc {
EvalList(op, Cons(head, tail), env);
== // defn EvalList
ehead + etail;
== // { assert ehead == Unit(Add); assert Unit(Add) == 0; }
etail;
}
}
case Mul => {
calc {
EvalList(op, Cons(head, tail), env);
== // defn EvalList
ehead * etail;
== // { assert ehead == 1; }
etail;
}
}
}
}
function Shorten(op: Op, args: List<Expr>): Expr {
match args
case Nil => Const(Unit(op))
// shorten the singleton list
case Cons(head, Nil) => head
// reduce units from the head
case _ => Node(op, args)
}
lemma ShortenCorrect(args: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(Shorten(op, args), env) == Eval(Node(op, args), env)
{
match args
case Nil => {}
case Cons(head, Nil) => {
calc {
Eval(Node(op, args), env);
EvalList(op, Cons(head, Nil), env);
Eval(head, env);
/* Eval(Shorten(op, Cons(head, Nil)), env); */
/* Eval(Shorten(op, args), env); */
}
}
case _ => {}
}
| function More(x: int): int {
if x <= 0 then 1 else More(x - 2) + 3
}
lemma {:induction false} Increasing(x: int)
ensures x < More(x)
{
if x <= 0 {}
else {
// x < More(x) <=> x < More(x - 2) + 3
// <=> x - 3 < More(x - 2)
// Increasing(x - 2) ==> x - 2 < More(x - 2)
// ==> x - 3 < x - 2 < More(x - 2)
Increasing(x - 2);
}
}
method ExampleLemmaUse(a: int) {
var b := More(a);
Increasing(a);
var c := More(b);
Increasing(b);
}
// Ex 5.0
method ExampleLemmaUse50(a: int) {
Increasing(a);
var b := More(a);
var c := More(b);
if a < 1000 {
Increasing(b);
}
}
// Ex 5.1
method ExampleLemmaUse51(a: int) {
Increasing(a);
var b := More(a);
Increasing(b);
b := More(b);
if a < 1000 {
// Increasing(More(a));
}
}
// Ex 5.6
function Ack(m: nat, n: nat): nat {
if m == 0 then
n + 1
else if n == 0 then
Ack(m - 1, 1)
else
Ack(m - 1, Ack(m, n - 1))
}
lemma {:induction false} Ack1n(m: nat, n: nat)
requires m == 1
ensures Ack(m, n) == n + 2
{
if n == 0 {
calc {
Ack(m, n);
==
Ack(m - 1, 1);
==
Ack(0, 1);
==
1 + 1;
==
2;
==
n + 2;
}
}
else {
calc {
Ack(m, n);
== // defn
Ack(m - 1, Ack(m, n - 1));
== // subs m := 1
Ack(0, Ack(1, n - 1));
== { Ack1n(1, n - 1); }
Ack(0, (n - 1) + 2);
== // arith
Ack(0, n + 1);
== // arith
(n + 1) + 1;
== // arith
n + 2;
}
}
}
// Ex 5.5
function Reduce(m: nat, x: int): int {
if m == 0 then x else Reduce(m / 2, x + 1) - m
}
lemma {:induction false} ReduceUpperBound(m: nat, x: int)
ensures Reduce(m, x) <= x
{
if m == 0 { // trivial
}
else {
calc {
Reduce(m, x);
== // defn
Reduce(m / 2, x + 1) - m;
<= { ReduceUpperBound(m/2, x+1); }
Reduce(m / 2, x + 1) - m + x + 1 - Reduce(m / 2, x + 1);
== // arith
x - m + 1;
<= { assert m >= 1; }
x;
}
}
}
// 5.5.1
lemma {:induction false} ReduceLowerBound(m: nat, x: int)
ensures x - 2 * m <= Reduce(m, x)
{
if m == 0 { // trivial
}
else {
calc {
Reduce(m, x);
== // defn
Reduce(m / 2, x + 1) - m;
>= { ReduceLowerBound(m/2, x+1);
x + 1 - 2 * m;
> // arith
x - 2 * m;
}
}
}
// ------------------------------------------------------------------------------
// ----- Expr Eval --------------------------------------------------------------
// ------------------------------------------------------------------------------
// 5.8.0
datatype Expr = Const(nat)
| Var(string)
| Node(op: Op, args: List<Expr>)
datatype Op = Mul | Add
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function Eval(e: Expr, env: map<string, nat>): nat
{
match e {
case Const(c) => c
case Var(s) => if s in env then env[s] else 0
case Node(op, args) => EvalList(op, args, env)
}
}
// intro'd in 5.8.1
function Unit(op: Op): nat {
match op case Add => 0 case Mul => 1
}
function EvalList(op: Op, args: List<Expr>, env: map<string, nat>): nat
{
match args {
case Nil => Unit(op)
case Cons(e, tail) =>
var v0, v1 := Eval(e, env), EvalList(op, tail, env);
match op
case Add => v0 + v1
case Mul => v0 * v1
}
}
function Substitute(e: Expr, n: string, c: nat): Expr
{
match e
case Const(_) => e
case Var(s) => if s == n then Const(c) else e
case Node(op, args) => Node(op, SubstituteList(args, n, c))
}
function SubstituteList(es: List<Expr>, n: string, c: nat): List<Expr>
{
match es
case Nil => Nil
case Cons(head, tail) => Cons(Substitute(head, n, c), SubstituteList(tail, n, c))
}
lemma {:induction false} EvalSubstituteCorrect(e: Expr, n: string, c: nat, env: map<string, nat>)
ensures Eval(Substitute(e, n, c), env) == Eval(e, env[n := c])
{
match e
case Const(_) => {}
case Var(s) => {
calc {
Eval(Substitute(e, n, c), env);
Eval(if s == n then Const(c) else e, env);
if s == n then Eval(Const(c), env) else Eval(e, env);
if s == n then c else Eval(e, env);
if s == n then c else Eval(e, env[n := c]);
if s == n then Eval(e, env[n := c]) else Eval(e, env[n := c]);
Eval(e, env[n := c]);
}
}
case Node(op, args) => {
EvalSubstituteListCorrect(op, args, n, c, env);
}
}
lemma {:induction false} EvalSubstituteListCorrect(op: Op, args: List<Expr>, n: string, c: nat, env: map<string, nat>)
ensures EvalList(op, SubstituteList(args, n, c), env) == EvalList(op, args, env[n := c])
{
match args
case Nil => {}
case Cons(head, tail) => {
// Ex 5.15
calc {
EvalList(op, SubstituteList(args, n, c), env);
== // defn SubstituteList
EvalList(op, Cons(Substitute(head, n, c), SubstituteList(tail, n, c)), env);
== // unfold defn EvalList
EvalList(op, Cons(Substitute(head, n, c), SubstituteList(tail, n, c)), env);
==
(match op
case Add => Eval(Substitute(head, n, c), env) + EvalList(op, SubstituteList(tail, n, c), env)
case Mul => Eval(Substitute(head, n, c), env) * EvalList(op, SubstituteList(tail, n, c), env));
== { EvalSubstituteCorrect(head, n, c, env); }
(match op
case Add => Eval(head, env[n := c]) + EvalList(op, SubstituteList(tail, n, c), env)
case Mul => Eval(head, env[n := c]) * EvalList(op, SubstituteList(tail, n, c), env));
== { EvalSubstituteListCorrect(op, tail, n, c, env); }
(match op
case Add => Eval(head, env[n := c]) + EvalList(op, tail, env[n := c])
case Mul => Eval(head, env[n := c]) * EvalList(op, tail, env[n := c]));
== // fold defn Eval/EvalList
EvalList(op, args, env[n := c]);
}
}
}
// Ex 5.16
lemma EvalEnv(e: Expr, n: string, env: map<string, nat>)
requires n in env.Keys
ensures Eval(e, env) == Eval(Substitute(e, n, env[n]), env)
{
match e
case Const(_) => {}
case Var(s) => {}
case Node(op, args) => {
match args
case Nil => {}
case Cons(head, tail) => { EvalEnv(head, n, env); EvalEnvList(op, tail, n, env); }
}
}
lemma EvalEnvList(op: Op, es: List<Expr>, n: string, env: map<string, nat>)
requires n in env.Keys
ensures EvalList(op, es, env) == EvalList(op, SubstituteList(es, n, env[n]), env)
{
match es
case Nil => {}
case Cons(head, tail) => { EvalEnv(head, n, env); EvalEnvList(op, tail, n, env); }
}
// Ex 5.17
lemma EvalEnvDefault(e: Expr, n: string, env: map<string, nat>)
requires n !in env.Keys
ensures Eval(e, env) == Eval(Substitute(e, n, 0), env)
{
match e
case Const(_) => {}
case Var(s) => {}
case Node(op, args) => {
calc {
Eval(Substitute(e, n, 0), env);
EvalList(op, SubstituteList(args, n, 0), env);
== { EvalEnvDefaultList(op, args, n, env); }
EvalList(op, args, env);
Eval(e, env);
}
}
}
lemma EvalEnvDefaultList(op: Op, args: List<Expr>, n: string, env: map<string, nat>)
requires n !in env.Keys
ensures EvalList(op, args, env) == EvalList(op, SubstituteList(args, n, 0), env)
{
match args
case Nil => {}
case Cons(head, tail) => { EvalEnvDefault(head, n, env); EvalEnvDefaultList(op, tail, n, env); }
}
// Ex 5.18
lemma SubstituteIdempotent(e: Expr, n: string, c: nat)
ensures Substitute(Substitute(e, n, c), n, c) == Substitute(e, n, c)
{
match e
case Const(_) => {}
case Var(_) => {}
case Node(op, args) => { SubstituteListIdempotent(args, n, c); }
}
lemma SubstituteListIdempotent(args: List<Expr>, n: string, c: nat)
ensures SubstituteList(SubstituteList(args, n, c), n, c) == SubstituteList(args, n, c)
{
match args
case Nil => {}
case Cons(head, tail) => { SubstituteIdempotent(head, n, c); SubstituteListIdempotent(tail, n, c); }
}
// 5.8.1
// Optimization is correct
function Optimize(e: Expr): Expr
// intrinsic
// ensures forall env: map<string, nat> :: Eval(Optimize(e), env) == Eval(e, env)
{
if e.Node? then
var args := OptimizeAndFilter(e.args, Unit(e.op));
Shorten(e.op, args)
else
e
}
lemma OptimizeCorrect(e: Expr, env: map<string, nat>)
ensures Eval(Optimize(e), env) == Eval(e, env)
{
if e.Node? {
OptimizeAndFilterCorrect(e.args, e.op, env);
ShortenCorrect(OptimizeAndFilter(e.args, Unit(e.op)), e.op, env);
// calc {
// Eval(Optimize(e), env);
// == // defn Optimize
// Eval(Shorten(e.op, OptimizeAndFilter(e.args, Unit(e.op))), env);
// == { ShortenCorrect(OptimizeAndFilter(e.args, Unit(e.op)), e.op, env); }
// Eval(Node(e.op, OptimizeAndFilter(e.args, Unit(e.op))), env);
// == { OptimizeAndFilterCorrect(e.args, e.op, env); }
// Eval(e, env);
// }
}
}
function OptimizeAndFilter(args: List<Expr>, u: nat): List<Expr>
// intrinsic
// ensures forall op: Op, env: map<string, nat> :: u == Unit(op) ==> Eval(Node(op, OptimizeAndFilter(args, u)), env) == Eval(Node(op, args), env)
{
match args
case Nil => Nil
case Cons(head, tail) =>
var hd, tl := Optimize(head), OptimizeAndFilter(tail, u);
if hd == Const(u) then tl else Cons(hd, tl)
}
lemma OptimizeAndFilterCorrect(args: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(Node(op, OptimizeAndFilter(args, Unit(op))), env) == Eval(Node(op, args), env)
{
match args
case Nil => {}
case Cons(head, tail) => {
OptimizeCorrect(head, env);
OptimizeAndFilterCorrect(tail, op, env);
// var hd, tl := Optimize(head), OptimizeAndFilter(tail, Unit(op));
// var u := Unit(op);
// if hd == Const(u) {
// calc {
// Eval(Node(op, OptimizeAndFilter(args, u)), env);
// ==
// EvalList(op, OptimizeAndFilter(args, u), env);
// == { assert OptimizeAndFilter(args, u) == tl; }
// EvalList(op, tl, env);
// ==
// Eval(Node(op, tl), env);
// == { EvalListUnitHead(hd, tl, op, env); }
// Eval(Node(op, Cons(hd, tl)), env);
// == { OptimizeCorrect(head, env); OptimizeAndFilterCorrect(tail, op, env); }
// Eval(Node(op, args), env);
// }
// } else {
// calc {
// Eval(Node(op, OptimizeAndFilter(args, u)), env);
// ==
// EvalList(op, OptimizeAndFilter(args, u), env);
// == { assert OptimizeAndFilter(args, u) == Cons(hd, tl); }
// EvalList(op, Cons(hd, tl), env);
// ==
// Eval(Node(op, Cons(hd, tl)), env);
// == { OptimizeCorrect(head, env); OptimizeAndFilterCorrect(tail, op, env); }
// Eval(Node(op, args), env);
// }
// }
}
}
lemma EvalListUnitHead(head: Expr, tail: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(head, env) == Unit(op) ==> EvalList(op, Cons(head, tail), env) == EvalList(op, tail, env)
{
// Note: verifier can prove the whole lemma with empty body!
var ehead, etail := Eval(head, env), EvalList(op, tail, env);
if ehead == Unit(op) {
match op
case Add => {
calc {
EvalList(op, Cons(head, tail), env);
== // defn EvalList
ehead + etail;
== // { assert ehead == Unit(Add); assert Unit(Add) == 0; }
etail;
}
}
case Mul => {
calc {
EvalList(op, Cons(head, tail), env);
== // defn EvalList
ehead * etail;
== // { assert ehead == 1; }
etail;
}
}
}
}
function Shorten(op: Op, args: List<Expr>): Expr {
match args
case Nil => Const(Unit(op))
// shorten the singleton list
case Cons(head, Nil) => head
// reduce units from the head
case _ => Node(op, args)
}
lemma ShortenCorrect(args: List<Expr>, op: Op, env: map<string, nat>)
ensures Eval(Shorten(op, args), env) == Eval(Node(op, args), env)
{
match args
case Nil => {}
case Cons(head, Nil) => {
calc {
Eval(Node(op, args), env);
EvalList(op, Cons(head, Nil), env);
Eval(head, env);
/* Eval(Shorten(op, Cons(head, Nil)), env); */
/* Eval(Shorten(op, args), env); */
}
}
case _ => {}
}
|
751 | stunning-palm-tree_tmp_tmpr84c2iwh_ch8.dfy | // Ch. 8: Sorting
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function Length<T>(xs: List<T>): int
ensures Length(xs) >= 0
{
match xs
case Nil => 0
case Cons(_, tl) => 1 + Length(tl)
}
function At<T>(xs: List, i: nat): T
requires i < Length(xs)
{
if i == 0 then xs.head else At(xs.tail, i - 1)
}
ghost predicate Ordered(xs: List<int>) {
match xs
case Nil => true
case Cons(_, Nil) => true
case Cons(hd0, Cons(hd1, _)) => (hd0 <= hd1) && Ordered(xs.tail)
}
lemma AllOrdered(xs: List<int>, i: nat, j: nat)
requires Ordered(xs) && i <= j < Length(xs)
ensures At(xs, i) <= At(xs, j)
{
if i != 0 {
AllOrdered(xs.tail, i - 1, j - 1);
} else if i == j {
assert i == 0 && j == 0;
} else {
assert i == 0 && i < j;
assert xs.head <= xs.tail.head;
AllOrdered(xs.tail, 0, j - 1);
}
}
// Ex. 8.0 generalize fron int to T by: T(==)
ghost function Count<T(==)>(xs: List<T>, p: T): int
ensures Count(xs, p) >= 0
{
match xs
case Nil => 0
case Cons(hd, tl) => (if hd == p then 1 else 0) + Count(tl, p)
}
ghost function Project<T(==)>(xs: List<T>, p: T): List<T> {
match xs
case Nil => Nil
case Cons(hd, tl) => if hd == p then Cons(hd, Project(tl, p)) else Project(tl, p)
}
// Ex 8.1
lemma {:induction false} CountProject<T(==)>(xs: List<T>, ys: List<T>, p: T)
requires Project(xs, p) == Project(ys, p)
ensures Count(xs, p) == Count(ys, p)
{
match xs
case Nil => {
match ys
case Nil => {}
case Cons(yhd, ytl) => {
assert Count(xs, p) == 0;
assert Project(xs, p) == Nil;
assert Project(ys, p) == Nil;
assert yhd != p;
CountProject(xs, ytl, p);
}
}
case Cons(xhd, xtl) => {
match ys
case Nil => {
assert Count(ys, p) == 0;
CountProject(xtl, ys, p);
}
case Cons(yhd, ytl) => {
if xhd == p && yhd == p {
assert Count(xs, p) == 1 + Count(xtl, p);
assert Count(ys, p) == 1 + Count(ytl, p);
assert Project(xtl, p) == Project(ytl, p);
CountProject(xtl, ytl, p);
} else if xhd != p && yhd == p {
assert Count(xs, p) == Count(xtl, p);
assert Count(ys, p) == 1 + Count(ytl, p);
CountProject(xtl, ys, p);
} else if xhd == p && yhd != p {
assert Count(ys, p) == Count(ytl, p);
assert Count(xs, p) == 1 + Count(xtl, p);
CountProject(xs, ytl, p);
} else {
CountProject(xtl, ytl, p);
}
}
}
}
function InsertionSort(xs: List<int>): List<int>
{
match xs
case Nil => Nil
case Cons(x, rest) => Insert(x, InsertionSort(rest))
}
function Insert(x: int, xs: List<int>): List<int>
{
match xs
case Nil => Cons(x, Nil)
case Cons(hd, tl) => if x < hd then Cons(x, xs) else Cons(hd, Insert(x, tl))
}
lemma InsertionSortOrdered(xs: List<int>)
ensures Ordered(InsertionSort(xs))
{
match xs
case Nil =>
case Cons(hd, tl) => {
InsertionSortOrdered(tl);
InsertOrdered(hd, InsertionSort(tl));
}
}
lemma InsertOrdered(y: int, xs: List<int>)
requires Ordered(xs)
ensures Ordered(Insert(y, xs))
{
match xs
case Nil =>
case Cons(hd, tl) => {
if y < hd {
assert Ordered(Cons(y, xs));
} else {
InsertOrdered(y, tl);
assert Ordered(Cons(hd, Insert(y, tl)));
}
}
}
lemma InsertionSortSameElements(xs: List<int>, p: int)
ensures Project(xs, p) == Project(InsertionSort(xs), p)
{
match xs
case Nil =>
case Cons(hd, tl) => {
InsertSameElements(hd, InsertionSort(tl), p);
}
}
lemma InsertSameElements(y: int, xs: List<int>, p: int)
ensures Project(Cons(y, xs), p) == Project(Insert(y, xs), p)
{}
| // Ch. 8: Sorting
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function Length<T>(xs: List<T>): int
ensures Length(xs) >= 0
{
match xs
case Nil => 0
case Cons(_, tl) => 1 + Length(tl)
}
function At<T>(xs: List, i: nat): T
requires i < Length(xs)
{
if i == 0 then xs.head else At(xs.tail, i - 1)
}
ghost predicate Ordered(xs: List<int>) {
match xs
case Nil => true
case Cons(_, Nil) => true
case Cons(hd0, Cons(hd1, _)) => (hd0 <= hd1) && Ordered(xs.tail)
}
lemma AllOrdered(xs: List<int>, i: nat, j: nat)
requires Ordered(xs) && i <= j < Length(xs)
ensures At(xs, i) <= At(xs, j)
{
if i != 0 {
AllOrdered(xs.tail, i - 1, j - 1);
} else if i == j {
} else {
AllOrdered(xs.tail, 0, j - 1);
}
}
// Ex. 8.0 generalize fron int to T by: T(==)
ghost function Count<T(==)>(xs: List<T>, p: T): int
ensures Count(xs, p) >= 0
{
match xs
case Nil => 0
case Cons(hd, tl) => (if hd == p then 1 else 0) + Count(tl, p)
}
ghost function Project<T(==)>(xs: List<T>, p: T): List<T> {
match xs
case Nil => Nil
case Cons(hd, tl) => if hd == p then Cons(hd, Project(tl, p)) else Project(tl, p)
}
// Ex 8.1
lemma {:induction false} CountProject<T(==)>(xs: List<T>, ys: List<T>, p: T)
requires Project(xs, p) == Project(ys, p)
ensures Count(xs, p) == Count(ys, p)
{
match xs
case Nil => {
match ys
case Nil => {}
case Cons(yhd, ytl) => {
CountProject(xs, ytl, p);
}
}
case Cons(xhd, xtl) => {
match ys
case Nil => {
CountProject(xtl, ys, p);
}
case Cons(yhd, ytl) => {
if xhd == p && yhd == p {
CountProject(xtl, ytl, p);
} else if xhd != p && yhd == p {
CountProject(xtl, ys, p);
} else if xhd == p && yhd != p {
CountProject(xs, ytl, p);
} else {
CountProject(xtl, ytl, p);
}
}
}
}
function InsertionSort(xs: List<int>): List<int>
{
match xs
case Nil => Nil
case Cons(x, rest) => Insert(x, InsertionSort(rest))
}
function Insert(x: int, xs: List<int>): List<int>
{
match xs
case Nil => Cons(x, Nil)
case Cons(hd, tl) => if x < hd then Cons(x, xs) else Cons(hd, Insert(x, tl))
}
lemma InsertionSortOrdered(xs: List<int>)
ensures Ordered(InsertionSort(xs))
{
match xs
case Nil =>
case Cons(hd, tl) => {
InsertionSortOrdered(tl);
InsertOrdered(hd, InsertionSort(tl));
}
}
lemma InsertOrdered(y: int, xs: List<int>)
requires Ordered(xs)
ensures Ordered(Insert(y, xs))
{
match xs
case Nil =>
case Cons(hd, tl) => {
if y < hd {
} else {
InsertOrdered(y, tl);
}
}
}
lemma InsertionSortSameElements(xs: List<int>, p: int)
ensures Project(xs, p) == Project(InsertionSort(xs), p)
{
match xs
case Nil =>
case Cons(hd, tl) => {
InsertSameElements(hd, InsertionSort(tl), p);
}
}
lemma InsertSameElements(y: int, xs: List<int>, p: int)
ensures Project(Cons(y, xs), p) == Project(Insert(y, xs), p)
{}
|
752 | summer-school-2020_tmp_tmpn8nf7zf0_chapter01_solutions_exercise04_solution.dfy | // Predicates
// A common thing you'll want is a function with a boolean result.
function AtLeastTwiceAsBigFunction(a:int, b:int) : bool
{
a >= 2*b
}
// It's so fantastically common that there's a shorthand for it: `predicate`.
predicate AtLeastTwiceAsBigPredicate(a:int, b:int)
{
a >= 2*b
}
function Double(a:int) : int
{
2 * a
}
lemma TheseTwoPredicatesAreEquivalent(x:int, y:int)
{
assert AtLeastTwiceAsBigFunction(x, y) == AtLeastTwiceAsBigPredicate(x, y);
}
// Add a precondition to make this lemma verify.
lemma FourTimesIsPrettyBig(x:int)
requires x>=0
{
assert AtLeastTwiceAsBigPredicate(Double(Double(x)), x);
}
| // Predicates
// A common thing you'll want is a function with a boolean result.
function AtLeastTwiceAsBigFunction(a:int, b:int) : bool
{
a >= 2*b
}
// It's so fantastically common that there's a shorthand for it: `predicate`.
predicate AtLeastTwiceAsBigPredicate(a:int, b:int)
{
a >= 2*b
}
function Double(a:int) : int
{
2 * a
}
lemma TheseTwoPredicatesAreEquivalent(x:int, y:int)
{
}
// Add a precondition to make this lemma verify.
lemma FourTimesIsPrettyBig(x:int)
requires x>=0
{
}
|
753 | summer-school-2020_tmp_tmpn8nf7zf0_chapter01_solutions_exercise11_solution.dfy | // Algebraic datatypes in their full glory. The include statement.
// A struct is a product:
// There are 3 HAlign instances, and 3 VAlign instances;
// so there are 9 TextAlign instances (all combinations).
// Note that it's okay to omit the parens for zero-element constructors.
datatype HAlign = Left | Center | Right
datatype VAlign = Top | Middle | Bottom
datatype TextAlign = TextAlign(hAlign:HAlign, vAlign:VAlign)
// If you squint, you'll believe that unions are like
// sums. There's one "Top", one "Middle", and one "Bottom"
// element, so there are three things that are of type VAlign.
// There are two instances of GraphicsAlign
datatype GraphicsAlign = Square | Round
// So if we make another tagged-union (sum) of TextAlign or GraphicsAlign,
// it has how many instances?
// (That's the exercise, to answer that question. No Dafny required.)
datatype PageElement = Text(t:TextAlign) | Graphics(g:GraphicsAlign)
// The answer is 11:
// There are 9 TextAligns.
// There are 2 GraphicsAligns.
// So there are 11 PageElements.
// Here's a *proof* for the HAlign type (to keep it simple):
lemma NumPageElements()
ensures exists eltSet:set<HAlign> :: |eltSet| == 3 // bound is tight
ensures forall eltSet:set<HAlign> :: |eltSet| <= 3 // upper bound
{
var maxSet := { Left, Center, Right };
// Prove the bound is tight.
assert |maxSet| == 3;
// Prove upper bound.
forall eltSet:set<HAlign>
ensures |eltSet| <= 3
{
// Prove eltSet <= maxSet
forall elt | elt in eltSet ensures elt in maxSet {
if elt.Left? { } // hint at a case analysis
}
// Cardinality relation should have been obvious to Dafny;
// see comment on lemma below.
subsetCardinality(eltSet, maxSet);
}
}
// Dafny seems to be missing a heuristic to trigger this cardinality relation!
// So I proved it. This should get fixed in dafny, or at least tucked into a
// library! How embarrassing.
lemma subsetCardinality<T>(a:set<T>, b:set<T>)
requires a <= b
ensures |a| <= |b|
{
if a == {} {
assert |a| <= |b|;
} else {
var e :| e in a;
if e in b {
subsetCardinality(a - {e}, b - {e});
assert |a| <= |b|;
} else {
subsetCardinality(a - {e}, b);
assert |a| <= |b|;
}
}
}
| // Algebraic datatypes in their full glory. The include statement.
// A struct is a product:
// There are 3 HAlign instances, and 3 VAlign instances;
// so there are 9 TextAlign instances (all combinations).
// Note that it's okay to omit the parens for zero-element constructors.
datatype HAlign = Left | Center | Right
datatype VAlign = Top | Middle | Bottom
datatype TextAlign = TextAlign(hAlign:HAlign, vAlign:VAlign)
// If you squint, you'll believe that unions are like
// sums. There's one "Top", one "Middle", and one "Bottom"
// element, so there are three things that are of type VAlign.
// There are two instances of GraphicsAlign
datatype GraphicsAlign = Square | Round
// So if we make another tagged-union (sum) of TextAlign or GraphicsAlign,
// it has how many instances?
// (That's the exercise, to answer that question. No Dafny required.)
datatype PageElement = Text(t:TextAlign) | Graphics(g:GraphicsAlign)
// The answer is 11:
// There are 9 TextAligns.
// There are 2 GraphicsAligns.
// So there are 11 PageElements.
// Here's a *proof* for the HAlign type (to keep it simple):
lemma NumPageElements()
ensures exists eltSet:set<HAlign> :: |eltSet| == 3 // bound is tight
ensures forall eltSet:set<HAlign> :: |eltSet| <= 3 // upper bound
{
var maxSet := { Left, Center, Right };
// Prove the bound is tight.
// Prove upper bound.
forall eltSet:set<HAlign>
ensures |eltSet| <= 3
{
// Prove eltSet <= maxSet
forall elt | elt in eltSet ensures elt in maxSet {
if elt.Left? { } // hint at a case analysis
}
// Cardinality relation should have been obvious to Dafny;
// see comment on lemma below.
subsetCardinality(eltSet, maxSet);
}
}
// Dafny seems to be missing a heuristic to trigger this cardinality relation!
// So I proved it. This should get fixed in dafny, or at least tucked into a
// library! How embarrassing.
lemma subsetCardinality<T>(a:set<T>, b:set<T>)
requires a <= b
ensures |a| <= |b|
{
if a == {} {
} else {
var e :| e in a;
if e in b {
subsetCardinality(a - {e}, b - {e});
} else {
subsetCardinality(a - {e}, b);
}
}
}
|
754 | summer-school-2020_tmp_tmpn8nf7zf0_chapter02_solutions_exercise01_solution.dfy | predicate divides(f:nat, i:nat)
requires 1<=f
{
i % f == 0
}
predicate IsPrime(i:nat)
{
&& 1 < i
&& forall f :: 1 < f < i ==> !divides(f, i)
}
method Main()
{
assert !IsPrime(0);
assert !IsPrime(1);
assert IsPrime(2);
assert IsPrime(3);
assert divides(2, 6);
assert !IsPrime(6);
assert IsPrime(7);
assert divides(3, 9);
assert !IsPrime(9);
}
| predicate divides(f:nat, i:nat)
requires 1<=f
{
i % f == 0
}
predicate IsPrime(i:nat)
{
&& 1 < i
&& forall f :: 1 < f < i ==> !divides(f, i)
}
method Main()
{
}
|
755 | summer-school-2020_tmp_tmpn8nf7zf0_chapter02_solutions_exercise02_solution.dfy | predicate divides(f:nat, i:nat)
requires 1<=f
{
i % f == 0
}
predicate IsPrime(i:nat)
{
&& 1<i
&& ( forall f :: 1 < f < i ==> !divides(f, i) )
}
// Convincing the proof to go through requires adding
// a loop invariant and a triggering assert.
method test_prime(i:nat) returns (result:bool)
requires 1<i
ensures result == IsPrime(i)
{
var f := 2;
while (f < i)
// This loop invariant completes an inductive proof of the
// body of IsPrime. Go look at the IsPrime definition and
// see how this forall relates to it.
// Note that when f == i, this is IsPrime.
// Also note that, when the while loop exists, i<=f.
invariant forall g :: 1 < g < f ==> !divides(g, i)
{
if i % f == 0 {
// This assert is needed to witness that !IsPrime.
// !IsPrime is !forall !divides, which rewrites to exists divides.
// Dafny rarely triggers its way to a guess for an exists (apparently
// it's tough for Z3), but mention a witness and Z3's happy.
assert divides(f, i);
return false;
}
f := f + 1;
}
return true;
}
method Main()
{
var a := test_prime(3);
assert a;
var b := test_prime(4);
assert divides(2, 4);
assert !b;
var c := test_prime(5);
assert c;
}
| predicate divides(f:nat, i:nat)
requires 1<=f
{
i % f == 0
}
predicate IsPrime(i:nat)
{
&& 1<i
&& ( forall f :: 1 < f < i ==> !divides(f, i) )
}
// Convincing the proof to go through requires adding
// a loop invariant and a triggering assert.
method test_prime(i:nat) returns (result:bool)
requires 1<i
ensures result == IsPrime(i)
{
var f := 2;
while (f < i)
// This loop invariant completes an inductive proof of the
// body of IsPrime. Go look at the IsPrime definition and
// see how this forall relates to it.
// Note that when f == i, this is IsPrime.
// Also note that, when the while loop exists, i<=f.
{
if i % f == 0 {
// This assert is needed to witness that !IsPrime.
// !IsPrime is !forall !divides, which rewrites to exists divides.
// Dafny rarely triggers its way to a guess for an exists (apparently
// it's tough for Z3), but mention a witness and Z3's happy.
return false;
}
f := f + 1;
}
return true;
}
method Main()
{
var a := test_prime(3);
var b := test_prime(4);
var c := test_prime(5);
}
|
756 | summer-school-2020_tmp_tmpn8nf7zf0_chapter02_solutions_exercise03_solution.dfy | predicate IsSorted(s:seq<int>)
{
forall i :: 0 <= i < |s|-1 ==> s[i] <= s[i+1]
}
predicate SortSpec(input:seq<int>, output:seq<int>)
{
&& IsSorted(output)
&& multiset(output) == multiset(input)
}
//lemma SequenceConcat(s:seq<int>, pivot:int)
// requires 0<=pivot<|s|
// ensures s[..pivot] + s[pivot..] == s
//{
//}
method merge_sort(input:seq<int>) returns (output:seq<int>)
ensures SortSpec(input, output)
{
if |input| <= 1 {
output := input;
} else {
var pivotIndex := |input| / 2;
var left := input[..pivotIndex];
var right := input[pivotIndex..];
var leftSorted := left;
leftSorted := merge_sort(left);
var rightSorted := right;
rightSorted := merge_sort(right);
output := merge(leftSorted, rightSorted);
assert left + right == input; // derived via calc
// calc {
// multiset(output);
// multiset(leftSorted + rightSorted);
// multiset(leftSorted) + multiset(rightSorted);
// multiset(left) + multiset(right);
// multiset(left + right);
// { assert left + right == input; }
// multiset(input);
// }
}
}
method merge(a:seq<int>, b:seq<int>) returns (output:seq<int>)
requires IsSorted(a)
requires IsSorted(b)
// ensures IsSorted(output)
ensures SortSpec(a+b, output)
//decreases |a|+|b|
{
var ai := 0;
var bi := 0;
output := [];
while ai < |a| || bi < |b|
invariant 0 <= ai <= |a|
invariant 0 <= bi <= |b|
invariant 0 < |output| && ai < |a| ==> output[|output|-1] <= a[ai]
invariant 0 < |output| && bi < |b| ==> output[|output|-1] <= b[bi]
invariant forall i :: 0 <= i < |output|-1 ==> output[i] <= output[i+1]
invariant multiset(output) == multiset(a[..ai]) + multiset(b[..bi])
decreases |a|-ai + |b|-bi
{
ghost var outputo := output;
ghost var ao := ai;
ghost var bo := bi;
if ai == |a| || (bi < |b| && a[ai] > b[bi]) {
output := output + [b[bi]];
bi := bi + 1;
assert b[bo..bi] == [b[bo]]; // discovered by calc
} else {
output := output + [a[ai]];
ai := ai + 1;
assert a[ao..ai] == [a[ao]]; // discovered by calc
}
assert a[..ai] == a[..ao] + a[ao..ai]; // discovered by calc
assert b[..bi] == b[..bo] + b[bo..bi]; // discovered by calc
// calc {
// multiset(a[..ai]) + multiset(b[..bi]);
// multiset(a[..ao] + a[ao..ai]) + multiset(b[..bo] + b[bo..bi]);
// multiset(a[..ao]) + multiset(a[ao..ai]) + multiset(b[..bo]) + multiset(b[bo..bi]);
// multiset(a[..ao]) + multiset(b[..bo]) + multiset(a[ao..ai]) + multiset(b[bo..bi]);
// multiset(outputo) + multiset(a[ao..ai]) + multiset(b[bo..bi]);
// multiset(output);
// }
}
assert a == a[..ai]; // derived by calc
assert b == b[..bi];
// calc {
// multiset(output);
// multiset(a[..ai]) + multiset(b[..bi]);
// multiset(a) + multiset(b);
// multiset(a + b);
// }
}
method fast_sort(input:seq<int>) returns (output:seq<int>)
// ensures SortSpec(input, output)
{
output := [1, 2, 3];
}
| predicate IsSorted(s:seq<int>)
{
forall i :: 0 <= i < |s|-1 ==> s[i] <= s[i+1]
}
predicate SortSpec(input:seq<int>, output:seq<int>)
{
&& IsSorted(output)
&& multiset(output) == multiset(input)
}
//lemma SequenceConcat(s:seq<int>, pivot:int)
// requires 0<=pivot<|s|
// ensures s[..pivot] + s[pivot..] == s
//{
//}
method merge_sort(input:seq<int>) returns (output:seq<int>)
ensures SortSpec(input, output)
{
if |input| <= 1 {
output := input;
} else {
var pivotIndex := |input| / 2;
var left := input[..pivotIndex];
var right := input[pivotIndex..];
var leftSorted := left;
leftSorted := merge_sort(left);
var rightSorted := right;
rightSorted := merge_sort(right);
output := merge(leftSorted, rightSorted);
// calc {
// multiset(output);
// multiset(leftSorted + rightSorted);
// multiset(leftSorted) + multiset(rightSorted);
// multiset(left) + multiset(right);
// multiset(left + right);
// { assert left + right == input; }
// multiset(input);
// }
}
}
method merge(a:seq<int>, b:seq<int>) returns (output:seq<int>)
requires IsSorted(a)
requires IsSorted(b)
// ensures IsSorted(output)
ensures SortSpec(a+b, output)
//decreases |a|+|b|
{
var ai := 0;
var bi := 0;
output := [];
while ai < |a| || bi < |b|
{
ghost var outputo := output;
ghost var ao := ai;
ghost var bo := bi;
if ai == |a| || (bi < |b| && a[ai] > b[bi]) {
output := output + [b[bi]];
bi := bi + 1;
} else {
output := output + [a[ai]];
ai := ai + 1;
}
// calc {
// multiset(a[..ai]) + multiset(b[..bi]);
// multiset(a[..ao] + a[ao..ai]) + multiset(b[..bo] + b[bo..bi]);
// multiset(a[..ao]) + multiset(a[ao..ai]) + multiset(b[..bo]) + multiset(b[bo..bi]);
// multiset(a[..ao]) + multiset(b[..bo]) + multiset(a[ao..ai]) + multiset(b[bo..bi]);
// multiset(outputo) + multiset(a[ao..ai]) + multiset(b[bo..bi]);
// multiset(output);
// }
}
// calc {
// multiset(output);
// multiset(a[..ai]) + multiset(b[..bi]);
// multiset(a) + multiset(b);
// multiset(a + b);
// }
}
method fast_sort(input:seq<int>) returns (output:seq<int>)
// ensures SortSpec(input, output)
{
output := [1, 2, 3];
}
|
757 | t1_MF_tmp_tmpi_sqie4j_exemplos_classes_parte1_contadorV1b.dfy | class Contador
{
var valor: int;
//construtor anônimo
constructor ()
ensures valor == 0
{
valor := 0;
}
//construtor com nome
constructor Init(v:int)
ensures valor == v
{
valor := v;
}
method Incrementa()
modifies this
ensures valor == old(valor) + 1
{
valor := valor + 1;
}
method Decrementa()
modifies this
ensures valor == old(valor) - 1
{
valor := valor -1 ;
}
method GetValor() returns (v:int)
ensures v == valor
{
return valor;
}
}
method Main()
{
var c := new Contador(); //cria um novo objeto no heap via construtor anônimo
var c2 := new Contador.Init(10); //cria um novo objeto no heap via construtor nomeado
var v := c.GetValor();
assert v == 0;
var v2 := c2.GetValor();
assert v2 == 10;
c.Incrementa();
v := c.GetValor();
assert v == 1;
c.Decrementa();
v := c.GetValor();
assert v == 0;
}
| class Contador
{
var valor: int;
//construtor anônimo
constructor ()
ensures valor == 0
{
valor := 0;
}
//construtor com nome
constructor Init(v:int)
ensures valor == v
{
valor := v;
}
method Incrementa()
modifies this
ensures valor == old(valor) + 1
{
valor := valor + 1;
}
method Decrementa()
modifies this
ensures valor == old(valor) - 1
{
valor := valor -1 ;
}
method GetValor() returns (v:int)
ensures v == valor
{
return valor;
}
}
method Main()
{
var c := new Contador(); //cria um novo objeto no heap via construtor anônimo
var c2 := new Contador.Init(10); //cria um novo objeto no heap via construtor nomeado
var v := c.GetValor();
var v2 := c2.GetValor();
c.Incrementa();
v := c.GetValor();
c.Decrementa();
v := c.GetValor();
}
|
758 | t1_MF_tmp_tmpi_sqie4j_exemplos_colecoes_arrays_ex4.dfy | function SomaAte(a:array<nat>, i:nat):nat
requires 0 <= i <= a.Length
reads a
{
if i == 0
then 0
else a[i-1] + SomaAte(a,i-1)
}
method Somatorio(a:array<nat>) returns (s:nat)
ensures s == SomaAte(a,a.Length)
{
var i := 0;
s := 0;
while i < a.Length
invariant 0 <= i && i <= a.Length
invariant s == SomaAte(a,i)
{
s := s + a[i];
i := i + 1;
}
}
| function SomaAte(a:array<nat>, i:nat):nat
requires 0 <= i <= a.Length
reads a
{
if i == 0
then 0
else a[i-1] + SomaAte(a,i-1)
}
method Somatorio(a:array<nat>) returns (s:nat)
ensures s == SomaAte(a,a.Length)
{
var i := 0;
s := 0;
while i < a.Length
{
s := s + a[i];
i := i + 1;
}
}
|
759 | t1_MF_tmp_tmpi_sqie4j_exemplos_colecoes_arrays_ex5.dfy | method Busca<T(==)>(a:array<T>, x:T) returns (r:int)
ensures 0 <= r ==> r < a.Length && a[r] == x
ensures r < 0 ==> forall i :: 0 <= i < a.Length ==> a[i] != x
{
r :=0;
while r < a.Length
invariant 0 <= r <= a.Length
invariant forall i :: 0 <= i < r ==> a[i] != x
{
if a[r]==x
{
return;
}
r := r + 1;
}
r := -1;
}
| method Busca<T(==)>(a:array<T>, x:T) returns (r:int)
ensures 0 <= r ==> r < a.Length && a[r] == x
ensures r < 0 ==> forall i :: 0 <= i < a.Length ==> a[i] != x
{
r :=0;
while r < a.Length
{
if a[r]==x
{
return;
}
r := r + 1;
}
r := -1;
}
|
760 | t1_MF_tmp_tmpi_sqie4j_exemplos_colecoes_conjuntos_ex5.dfy | function to_seq<T>(a: array<T>, i: int) : (res: seq<T>)
requires 0 <= i <= a.Length
ensures res == a[i..]
reads a
decreases a.Length-i
{
if i == a.Length
then []
else [a[i]] + to_seq(a, i + 1)
}
method Main() {
var a: array<int> := new int[2];
a[0] := 2;
a[1] := 3;
var ms: multiset<int> := multiset(a[..]);
assert a[..] == to_seq(a, 0); //dica para o Dafny
assert ms[2] == 1;
}
| function to_seq<T>(a: array<T>, i: int) : (res: seq<T>)
requires 0 <= i <= a.Length
ensures res == a[i..]
reads a
{
if i == a.Length
then []
else [a[i]] + to_seq(a, i + 1)
}
method Main() {
var a: array<int> := new int[2];
a[0] := 2;
a[1] := 3;
var ms: multiset<int> := multiset(a[..]);
}
|
761 | t1_MF_tmp_tmpi_sqie4j_exemplos_colecoes_sequences_ex3.dfy | // line contém uma string de tamanho l
// remover p caracteres a partir da posição at
method Delete(line:array<char>, l:nat, at:nat, p:nat)
requires l <= line.Length
requires at+p <= l
modifies line
ensures line[..at] == old(line[..at])
ensures line[at..l-p] == old(line[at+p..l])
{
var i:nat := 0;
while i < l-(at+p)
invariant i <= l-(at+p)
invariant at+p+i >= at+i
invariant line[..at] == old(line[..at])
invariant line[at..at+i] == old(line[at+p..at+p+i])
invariant line[at+i..l] == old(line[at+i..l]) // futuro é intocável
{
line[at+i] := line[at+p+i];
i := i+1;
}
}
| // line contém uma string de tamanho l
// remover p caracteres a partir da posição at
method Delete(line:array<char>, l:nat, at:nat, p:nat)
requires l <= line.Length
requires at+p <= l
modifies line
ensures line[..at] == old(line[..at])
ensures line[at..l-p] == old(line[at+p..l])
{
var i:nat := 0;
while i < l-(at+p)
{
line[at+i] := line[at+p+i];
i := i+1;
}
}
|
762 | t1_MF_tmp_tmpi_sqie4j_exemplos_introducao_ex4.dfy | function Fat(n: nat): nat
{
if n == 0 then 1 else n * Fat(n-1)
}
method Fatorial(n:nat) returns (r:nat)
ensures r == Fat(n)
{
r := 1;
var i := 0;
while i < n
invariant 0 <= i <= n
invariant r == Fat(i)
{
i := i + 1;
r := r * i;
}
}
| function Fat(n: nat): nat
{
if n == 0 then 1 else n * Fat(n-1)
}
method Fatorial(n:nat) returns (r:nat)
ensures r == Fat(n)
{
r := 1;
var i := 0;
while i < n
{
i := i + 1;
r := r * i;
}
}
|
763 | tangent-finder_tmp_tmpgyzf44ve_circles.dfy | method Tangent(r: array<int>, x: array<int>) returns (b: bool)
requires forall i, j :: 0 <= i <= j < x.Length ==> x[i] <= x[j] // values in x will be in ascending order or empty
requires forall i, j :: (0 <= i < r.Length && 0 <= j < x.Length) ==> (r[i] >= 0 && x[j] >= 0) // x and r will contain no negative values
ensures !b ==> forall i, j :: 0 <= i< r.Length && 0 <= j < x.Length ==> r[i] != x[j]
ensures b ==> exists i, j :: 0 <= i< r.Length && 0 <= j < x.Length && r[i] == x[j]
{
var tempB, tangentMissing, k, l := false, false, 0, 0;
while k != r.Length && !tempB
invariant 0 <= k <= r.Length
invariant tempB ==> exists i, j :: 0 <= i < r.Length && 0 <= j < x.Length && r[i] == x[j]
invariant !tempB ==> forall i, j :: (0 <= i<k && 0 <= j < x.Length) ==> r[i] != x[j]
decreases r.Length - k
{
l:= 0;
tangentMissing := false;
while l != x.Length && !tangentMissing
invariant 0 <= l <= x.Length
invariant tempB ==> exists i, j :: 0 <= i < r.Length && 0 <= j < x.Length && r[i] == x[j]
invariant !tempB ==> forall i :: 0 <= i< l ==> r[k] != x[i]
invariant tangentMissing ==> forall i :: (l <= i < x.Length) ==> r[k] != x[i]
decreases x.Length - l, !tempB, !tangentMissing
{
if r[k] == x[l] {
tempB := true;
}
if (r[k] < x[l]) {
tangentMissing := true;
}
l := l + 1;
}
k := k + 1;
}
b := tempB;
}
| method Tangent(r: array<int>, x: array<int>) returns (b: bool)
requires forall i, j :: 0 <= i <= j < x.Length ==> x[i] <= x[j] // values in x will be in ascending order or empty
requires forall i, j :: (0 <= i < r.Length && 0 <= j < x.Length) ==> (r[i] >= 0 && x[j] >= 0) // x and r will contain no negative values
ensures !b ==> forall i, j :: 0 <= i< r.Length && 0 <= j < x.Length ==> r[i] != x[j]
ensures b ==> exists i, j :: 0 <= i< r.Length && 0 <= j < x.Length && r[i] == x[j]
{
var tempB, tangentMissing, k, l := false, false, 0, 0;
while k != r.Length && !tempB
{
l:= 0;
tangentMissing := false;
while l != x.Length && !tangentMissing
{
if r[k] == x[l] {
tempB := true;
}
if (r[k] < x[l]) {
tangentMissing := true;
}
l := l + 1;
}
k := k + 1;
}
b := tempB;
}
|
764 | test-generation-examples_tmp_tmptwyqofrp_IntegerSet_dafny_IntegerSet.dfy | module IntegerSet {
class Set {
var elements: seq<int>;
constructor Set0()
ensures this.elements == []
ensures |this.elements| == 0
{
this.elements := [];
}
constructor Set(elements: seq<int>)
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures this.elements == elements
ensures forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && j != i:: this.elements[i] != this.elements[j]
{
this.elements := elements;
}
method size() returns (size : int)
ensures size == |elements|
{
size := |elements|;
}
method addElement(element : int)
modifies this`elements
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures element in old(elements) ==> elements == old(elements)
ensures element !in old(elements) ==> |elements| == |old(elements)| + 1 && element in elements && forall i : int :: i in old(elements) ==> i in elements
ensures forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
{
if (element !in elements) {
elements := elements + [element];
}
}
method removeElement(element : int)
modifies this`elements
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures element in old(elements) ==> |elements| == |old(elements)| - 1 && (forall i : int :: i in old(elements) && i != element <==> i in elements) && element !in elements
ensures element !in old(elements) ==> elements == old(elements)
ensures forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
{
if (element in elements) {
var i := 0;
while (0 <= i < |elements|)
decreases |elements| - i
invariant 0 <= i < |elements|
invariant forall j : int :: 0 <= j < i < |elements| ==> elements[j] != element
{
if (elements[i] == element) {
if (i < |elements| - 1 && i != -1) {
elements := elements[..i] + elements[i+1..];
}
else if (i == |elements| - 1) {
elements := elements[..i];
}
break;
}
i := i + 1;
}
}
}
method contains(element : int) returns (contains : bool)
ensures contains == (element in elements)
ensures elements == old(elements)
{
contains := false;
if (element in elements) {
contains := true;
}
}
//for computing the length of the intersection of 2 sets
function intersect_length(s1 : seq<int>, s2 : seq<int>, count : int, start : int, stop : int) : int
requires 0 <= start <= stop
requires stop <= |s1|
decreases stop - start
{
if start == stop then count else (if s1[start] in s2 then intersect_length(s1, s2, count + 1, start + 1, stop) else intersect_length(s1, s2, count, start + 1, stop))
}
//for computing the length of the union of 2 sets
//pass in the length of s2 as the initial count
function union_length(s1 : seq<int>, s2 : seq<int>, count : int, i : int, stop : int) : int
requires 0 <= i <= stop
requires stop <= |s1|
decreases stop - i
{
if i == stop then count else (if s1[i] !in s2 then union_length(s1, s2, count + 1, i + 1, stop) else union_length(s1, s2, count, i + 1, stop))
}
method intersect(s : Set) returns (intersection : Set)
requires forall i, j | 0 <= i < |s.elements| && 0 <= j < |s.elements| && i != j :: s.elements[i] != s.elements[j]
requires forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && i != j :: this.elements[i] != this.elements[j]
ensures forall i : int :: i in intersection.elements <==> i in s.elements && i in this.elements
ensures forall i : int :: i !in intersection.elements <==> i !in s.elements || i !in this.elements
ensures forall j, k | 0 <= j < |intersection.elements| && 0 <= k < |intersection.elements| && j != k :: intersection.elements[j] != intersection.elements[k]
ensures fresh(intersection)
{
intersection := new Set.Set0();
var inter: seq<int> := [];
var i := 0;
while (0 <= i < |this.elements|)
decreases |this.elements| - i
invariant 0 <= i < |this.elements| || i == 0
invariant forall j, k | 0 <= j < |inter| && 0 <= k < |inter| && j != k :: inter[j] != inter[k]
invariant forall j :: 0 <= j < i < |this.elements| ==> (this.elements[j] in inter <==> this.elements[j] in s.elements)
invariant forall j :: 0 <= j < |inter| ==> inter[j] in this.elements && inter[j] in s.elements
invariant |inter| <= i <= |this.elements|
{
var old_len := |inter|;
if (this.elements[i] in s.elements && this.elements[i] !in inter) {
inter := inter + [this.elements[i]];
}
if (i == |this.elements| - 1) {
assert(old_len + 1 == |inter| || old_len == |inter|);
break;
}
assert(old_len + 1 == |inter| || old_len == |inter|);
i := i + 1;
}
intersection.elements := inter;
}
method union(s : Set) returns (union : Set)
requires forall i, j | 0 <= i < |s.elements| && 0 <= j < |s.elements| && i != j :: s.elements[i] != s.elements[j]
requires forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && i != j :: this.elements[i] != this.elements[j]
ensures forall i : int :: i in s.elements || i in this.elements <==> i in union.elements
ensures forall i : int :: i !in s.elements && i !in this.elements <==> i !in union.elements
ensures forall j, k | 0 <= j < |union.elements| && 0 <= k < |union.elements| && j != k :: union.elements[j] != union.elements[k]
ensures fresh(union)
{
var elems := s.elements;
union := new Set.Set0();
var i := 0;
while (0 <= i < |this.elements|)
decreases |this.elements| - i
invariant 0 <= i < |this.elements| || i == 0
invariant forall j : int :: 0 <= j < |s.elements| ==> s.elements[j] in elems
invariant forall j : int :: 0 <= j < i < |this.elements| ==> (this.elements[j] in elems <==> (this.elements[j] in s.elements || this.elements[j] in this.elements))
invariant forall j :: 0 <= j < |elems| ==> elems[j] in this.elements || elems[j] in s.elements
invariant forall j, k :: 0 <= j < |elems| && 0 <= k < |elems| && j != k ==> elems[j] != elems[k]
{
if (this.elements[i] !in elems) {
elems := elems + [this.elements[i]];
}
if (i == |this.elements| - 1) {
break;
}
i := i + 1;
}
union.elements := elems;
}
}
}
| module IntegerSet {
class Set {
var elements: seq<int>;
constructor Set0()
ensures this.elements == []
ensures |this.elements| == 0
{
this.elements := [];
}
constructor Set(elements: seq<int>)
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures this.elements == elements
ensures forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && j != i:: this.elements[i] != this.elements[j]
{
this.elements := elements;
}
method size() returns (size : int)
ensures size == |elements|
{
size := |elements|;
}
method addElement(element : int)
modifies this`elements
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures element in old(elements) ==> elements == old(elements)
ensures element !in old(elements) ==> |elements| == |old(elements)| + 1 && element in elements && forall i : int :: i in old(elements) ==> i in elements
ensures forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
{
if (element !in elements) {
elements := elements + [element];
}
}
method removeElement(element : int)
modifies this`elements
requires forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
ensures element in old(elements) ==> |elements| == |old(elements)| - 1 && (forall i : int :: i in old(elements) && i != element <==> i in elements) && element !in elements
ensures element !in old(elements) ==> elements == old(elements)
ensures forall i, j | 0 <= i < |elements| && 0 <= j < |elements| && j != i :: elements[i] != elements[j]
{
if (element in elements) {
var i := 0;
while (0 <= i < |elements|)
{
if (elements[i] == element) {
if (i < |elements| - 1 && i != -1) {
elements := elements[..i] + elements[i+1..];
}
else if (i == |elements| - 1) {
elements := elements[..i];
}
break;
}
i := i + 1;
}
}
}
method contains(element : int) returns (contains : bool)
ensures contains == (element in elements)
ensures elements == old(elements)
{
contains := false;
if (element in elements) {
contains := true;
}
}
//for computing the length of the intersection of 2 sets
function intersect_length(s1 : seq<int>, s2 : seq<int>, count : int, start : int, stop : int) : int
requires 0 <= start <= stop
requires stop <= |s1|
{
if start == stop then count else (if s1[start] in s2 then intersect_length(s1, s2, count + 1, start + 1, stop) else intersect_length(s1, s2, count, start + 1, stop))
}
//for computing the length of the union of 2 sets
//pass in the length of s2 as the initial count
function union_length(s1 : seq<int>, s2 : seq<int>, count : int, i : int, stop : int) : int
requires 0 <= i <= stop
requires stop <= |s1|
{
if i == stop then count else (if s1[i] !in s2 then union_length(s1, s2, count + 1, i + 1, stop) else union_length(s1, s2, count, i + 1, stop))
}
method intersect(s : Set) returns (intersection : Set)
requires forall i, j | 0 <= i < |s.elements| && 0 <= j < |s.elements| && i != j :: s.elements[i] != s.elements[j]
requires forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && i != j :: this.elements[i] != this.elements[j]
ensures forall i : int :: i in intersection.elements <==> i in s.elements && i in this.elements
ensures forall i : int :: i !in intersection.elements <==> i !in s.elements || i !in this.elements
ensures forall j, k | 0 <= j < |intersection.elements| && 0 <= k < |intersection.elements| && j != k :: intersection.elements[j] != intersection.elements[k]
ensures fresh(intersection)
{
intersection := new Set.Set0();
var inter: seq<int> := [];
var i := 0;
while (0 <= i < |this.elements|)
{
var old_len := |inter|;
if (this.elements[i] in s.elements && this.elements[i] !in inter) {
inter := inter + [this.elements[i]];
}
if (i == |this.elements| - 1) {
break;
}
i := i + 1;
}
intersection.elements := inter;
}
method union(s : Set) returns (union : Set)
requires forall i, j | 0 <= i < |s.elements| && 0 <= j < |s.elements| && i != j :: s.elements[i] != s.elements[j]
requires forall i, j | 0 <= i < |this.elements| && 0 <= j < |this.elements| && i != j :: this.elements[i] != this.elements[j]
ensures forall i : int :: i in s.elements || i in this.elements <==> i in union.elements
ensures forall i : int :: i !in s.elements && i !in this.elements <==> i !in union.elements
ensures forall j, k | 0 <= j < |union.elements| && 0 <= k < |union.elements| && j != k :: union.elements[j] != union.elements[k]
ensures fresh(union)
{
var elems := s.elements;
union := new Set.Set0();
var i := 0;
while (0 <= i < |this.elements|)
{
if (this.elements[i] !in elems) {
elems := elems + [this.elements[i]];
}
if (i == |this.elements| - 1) {
break;
}
i := i + 1;
}
union.elements := elems;
}
}
}
|
765 | test-generation-examples_tmp_tmptwyqofrp_IntegerSet_dafny_Utils.dfy | module Utils {
class Assertions<T> {
static method {:extern} assertEquals(expected : T, actual : T)
requires expected == actual
static method {:extern} expectEquals(expected : T, actual : T)
ensures expected == actual
static method {:extern} assertTrue(condition : bool)
requires condition
static method {:extern} expectTrue(condition : bool)
ensures condition
static method {:extern} assertFalse(condition : bool)
requires !condition
static method {:extern} expectFalse(condition : bool)
ensures !condition
}
}
| module Utils {
class Assertions<T> {
static method {:extern} assertEquals(expected : T, actual : T)
requires expected == actual
static method {:extern} expectEquals(expected : T, actual : T)
ensures expected == actual
static method {:extern} assertTrue(condition : bool)
requires condition
static method {:extern} expectTrue(condition : bool)
ensures condition
static method {:extern} assertFalse(condition : bool)
requires !condition
static method {:extern} expectFalse(condition : bool)
ensures !condition
}
}
|
766 | test-generation-examples_tmp_tmptwyqofrp_ParamTests_dafny_Utils.dfy | module Utils {
export
reveals Assertions
provides Assertions.assertEquals
class Assertions {
static method {:axiom} assertEquals<T>(left : T, right : T)
requires left == right
/*
public static void assertEquals<T>(T a, T b) {
Xunit.Assert.Equal(a, b);
}
*/
/*
static public <T> void assertEquals(dafny.TypeDescriptor<T> typeDescriptor, T left, T right) {
org.junit.jupiter.api.Assertions.assertEquals(left, right);
}
*/
static method {:axiom} assertTrue(value : bool)
requires value
static method {:axiom} assertFalse(value : bool)
requires !value
}
}
| module Utils {
export
reveals Assertions
provides Assertions.assertEquals
class Assertions {
static method {:axiom} assertEquals<T>(left : T, right : T)
requires left == right
/*
public static void assertEquals<T>(T a, T b) {
Xunit.Assert.Equal(a, b);
}
*/
/*
static public <T> void assertEquals(dafny.TypeDescriptor<T> typeDescriptor, T left, T right) {
org.junit.jupiter.api.Assertions.assertEquals(left, right);
}
*/
static method {:axiom} assertTrue(value : bool)
requires value
static method {:axiom} assertFalse(value : bool)
requires !value
}
}
|
767 | test-generation-examples_tmp_tmptwyqofrp_RussianMultiplication_dafny_RussianMultiplication.dfy | module RussianMultiplication {
export provides mult
method mult(n0 : int, m0 : int) returns (res : int)
ensures res == (n0 * m0);
{
var n, m : int;
res := 0;
if (n0 >= 0) {
n,m := n0, m0;
}
else {
n,m := -n0, -m0;
}
while (0 < n)
invariant (m * n + res) == (m0 * n0);
decreases n;
{
res := res + m;
n := n - 1;
}
}
}
| module RussianMultiplication {
export provides mult
method mult(n0 : int, m0 : int) returns (res : int)
ensures res == (n0 * m0);
{
var n, m : int;
res := 0;
if (n0 >= 0) {
n,m := n0, m0;
}
else {
n,m := -n0, -m0;
}
while (0 < n)
{
res := res + m;
n := n - 1;
}
}
}
|
768 | type-definition_tmp_tmp71kdzz3p_final.dfy | // -------------------------------------------------------------
// 1. Implementing type inference
// -------------------------------------------------------------
// Syntax:
//
// τ := Int | Bool | τ1->τ2
// e ::= x | λx : τ.e | true| false| e1 e2 | if e then e1 else e2
// v ::= true | false | λx : τ.e
// E ::= [·] | E e | v E | if E then e1 else e2
type VarName = string
type TypeVar = Type -> Type
datatype Type = Int | Bool | TypeVar
datatype Exp =
| Var(x: VarName)
| Lam(x: VarName, t: Type, e: Exp)
| App(e1: Exp, e2:Exp)
| True()
| False()
| Cond(e0: Exp, e1: Exp, e2: Exp)
datatype Value =
| TrueB()
| FalseB()
| Lam(x: VarName, t: Type, e: Exp)
datatype Eva =
| E()
| EExp(E : Eva, e : Exp)
| EVar(v : Value, E : Eva)
| ECond(E:Eva, e1 : Exp, e2 : Exp)
function FV(e: Exp): set<VarName> {
match(e) {
case Var(x) => {x}
case Lam(x, t, e) => FV(e) - {x} //不确定
case App(e1,e2) => FV(e1) + FV(e2)
case True() => {}
case False() => {}
case Cond(e0, e1, e2) => FV(e0) + FV(e1) + FV(e2)
}
}
// Typing rules system
// -------------------------------------------------------------
// Typing rules system
type Env = map<VarName, Type>
predicate hasType(gamma: Env, e: Exp, t: Type)
{
match e {
case Var(x) => x in gamma && t == gamma[x]
case Lam(x, t, e) => hasType(gamma, e, t)//错的
case App(e1,e2) => hasType(gamma, e1, t) && hasType(gamma, e2, t)
case True() => t == Bool
case False() => t == Bool
case Cond(e0, e1, e2) => hasType(gamma, e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t)
}
}
// -----------------------------------------------------------------
// 2. Extending While with tuples
// -----------------------------------------------------------------
/*lemma {:induction false} extendGamma(gamma: Env, e: Exp, t: Type, x1: VarName, t1: Type)
requires hasType(gamma, e, t)
requires x1 !in FV(e)
ensures hasType(gamma[x1 := t1], e, t)
{
match e {
case Var(x) => {
assert x in FV(e);
assert x != x1;
assert gamma[x1 := t1][x] == gamma[x];
assert hasType(gamma[x1 := t1], e, t);
}
case True() => {
assert t == Bool;
}
case False() => {
assert t == Bool;
}
//case Lam(x, t, e)
case App(e1, e2) =>{
calc{
hasType(gamma, e, t);
==>
hasType(gamma, e1, TypeVar) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, TypeVar, x1, t2); }
hasType(gamma[x1 := t1], e1, TypeVar) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e2, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma[x1 := t1], e2, t);
==>
hasType(gamma[x1 := t1], e, t);
}
}
case Cond(e0, e1, e2) => {
calc {
hasType(gamma, e, t);
==>
hasType(gamma, e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e0, Bool, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e2, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma[x1 := t1], e2, t);
==>
hasType(gamma[x1 := t1], e, t);
}
}
}
}
| // -------------------------------------------------------------
// 1. Implementing type inference
// -------------------------------------------------------------
// Syntax:
//
// τ := Int | Bool | τ1->τ2
// e ::= x | λx : τ.e | true| false| e1 e2 | if e then e1 else e2
// v ::= true | false | λx : τ.e
// E ::= [·] | E e | v E | if E then e1 else e2
type VarName = string
type TypeVar = Type -> Type
datatype Type = Int | Bool | TypeVar
datatype Exp =
| Var(x: VarName)
| Lam(x: VarName, t: Type, e: Exp)
| App(e1: Exp, e2:Exp)
| True()
| False()
| Cond(e0: Exp, e1: Exp, e2: Exp)
datatype Value =
| TrueB()
| FalseB()
| Lam(x: VarName, t: Type, e: Exp)
datatype Eva =
| E()
| EExp(E : Eva, e : Exp)
| EVar(v : Value, E : Eva)
| ECond(E:Eva, e1 : Exp, e2 : Exp)
function FV(e: Exp): set<VarName> {
match(e) {
case Var(x) => {x}
case Lam(x, t, e) => FV(e) - {x} //不确定
case App(e1,e2) => FV(e1) + FV(e2)
case True() => {}
case False() => {}
case Cond(e0, e1, e2) => FV(e0) + FV(e1) + FV(e2)
}
}
// Typing rules system
// -------------------------------------------------------------
// Typing rules system
type Env = map<VarName, Type>
predicate hasType(gamma: Env, e: Exp, t: Type)
{
match e {
case Var(x) => x in gamma && t == gamma[x]
case Lam(x, t, e) => hasType(gamma, e, t)//错的
case App(e1,e2) => hasType(gamma, e1, t) && hasType(gamma, e2, t)
case True() => t == Bool
case False() => t == Bool
case Cond(e0, e1, e2) => hasType(gamma, e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t)
}
}
// -----------------------------------------------------------------
// 2. Extending While with tuples
// -----------------------------------------------------------------
/*lemma {:induction false} extendGamma(gamma: Env, e: Exp, t: Type, x1: VarName, t1: Type)
requires hasType(gamma, e, t)
requires x1 !in FV(e)
ensures hasType(gamma[x1 := t1], e, t)
{
match e {
case Var(x) => {
}
case True() => {
}
case False() => {
}
//case Lam(x, t, e)
case App(e1, e2) =>{
calc{
hasType(gamma, e, t);
==>
hasType(gamma, e1, TypeVar) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, TypeVar, x1, t2); }
hasType(gamma[x1 := t1], e1, TypeVar) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e2, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma[x1 := t1], e2, t);
==>
hasType(gamma[x1 := t1], e, t);
}
}
case Cond(e0, e1, e2) => {
calc {
hasType(gamma, e, t);
==>
hasType(gamma, e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e0, Bool, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma, e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e1, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma, e2, t);
==> { extendGamma(gamma, e2, t, x1, t1); }
hasType(gamma[x1 := t1], e0, Bool) && hasType(gamma[x1 := t1], e1, t) && hasType(gamma[x1 := t1], e2, t);
==>
hasType(gamma[x1 := t1], e, t);
}
}
}
}
|
769 | veri-sparse_tmp_tmp15fywna6_dafny_concurrent_poc_6.dfy | class Process {
var row: nat;
var curColumn: nat;
var opsLeft: nat;
constructor (init_row: nat, initOpsLeft: nat)
ensures row == init_row
ensures opsLeft == initOpsLeft
ensures curColumn == 0
{
row := init_row;
curColumn := 0;
opsLeft := initOpsLeft;
}
}
function sum(s : seq<nat>) : nat
ensures sum(s) == 0 ==> forall i :: 0 <= i < |s| ==> s[i] == 0
{
if s == [] then 0 else s[0] + sum(s[1..])
}
lemma sum0(s : seq<nat>)
ensures sum(s) == 0 ==> forall i :: 0 <= i < |s| ==> s[i] == 0
{
if s == [] {
} else {
sum0(s[1..]);
}
}
lemma sum_const(s : seq<nat>, x : nat)
ensures (forall i :: 0 <= i < |s| ==> s[i] == x) ==> sum(s) == |s| * x
{
}
lemma sum_eq(s1 : seq<nat>, s2 : seq<nat>)
requires |s1| == |s2|
requires forall i :: 0 <= i < |s1| ==> s1[i] == s2[i]
ensures sum(s1) == sum(s2)
{
}
lemma sum_exept(s1 : seq<nat>, s2 : seq<nat>, x : nat, j : nat)
requires |s1| == |s2|
requires j < |s1|
requires forall i :: 0 <= i < |s1| ==> i != j ==> s1[i] == s2[i]
requires s1[j] == s2[j] + x
ensures sum(s1) == sum(s2) + x
{
if s1 == [] {
assert(j >= |s1|);
} else {
if j == 0 {
assert (sum(s1) == s1[0] + sum(s1[1..]));
assert (sum(s2) == s2[0] + sum(s2[1..]));
sum_eq(s1[1..], s2[1..]);
assert sum(s1[1..]) == sum(s2[1..]);
} else {
sum_exept(s1[1..], s2[1..], x, j - 1);
}
}
}
function calcRow(M : array2<int>, x : seq<int>, row: nat, start_index: nat) : (product: int)
reads M
requires M.Length1 == |x|
requires row < M.Length0
requires start_index <= M.Length1
decreases M.Length1 - start_index
{
if start_index == M.Length1 then
0
else
M[row, start_index] * x[start_index] + calcRow(M, x, row, start_index+1)
}
class MatrixVectorMultiplier
{
ghost predicate Valid(M: array2<int>, x: seq<int>, y: array<int>, P: set<Process>, leftOvers : array<nat>)
reads this, y, P, M, leftOvers
{
M.Length0 == y.Length &&
M.Length1 == |x| &&
|P| == y.Length &&
|P| == leftOvers.Length &&
(forall p, q :: p in P && q in P && p != q ==> p.row != q.row) &&
(forall p, q :: p in P && q in P ==> p != q) &&
(forall p :: p in P ==> 0 <= p.row < |P|) &&
(forall p :: p in P ==> 0 <= p.curColumn <= M.Length1) &&
(forall p :: p in P ==> 0 <= p.opsLeft <= M.Length1) &&
(forall p :: p in P ==> y[p.row] + calcRow(M, x, p.row, p.curColumn) == calcRow(M, x, p.row, 0)) &&
(forall p :: p in P ==> leftOvers[p.row] == p.opsLeft) &&
(forall p :: p in P ==> p.opsLeft == M.Length1 - p.curColumn) &&
(sum(leftOvers[..]) > 0 ==> exists p :: p in P && p.opsLeft > 0)
}
constructor (processes: set<Process>, M_: array2<int>, x_: seq<int>, y_: array<int>, leftOvers : array<nat>)
// Idea here is that we already have a set of processes such that each one is assigned one row.
// Daphny makes it a ginormous pain in the ass to actually create such a set, so we just assume
// we already have one.
//this states that the number of rows and processes are the same, and that there is one process
//for every row, and that no two processes are the same, nor do any two processes share the same
//row
requires (forall i :: 0 <= i < leftOvers.Length ==> leftOvers[i] == M_.Length1)
requires |processes| == leftOvers.Length
requires |processes| == M_.Length0
requires (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
requires (forall p, q :: p in processes && q in processes ==> p != q)
requires (forall p :: p in processes ==> 0 <= p.row < M_.Length0)
//initializes process start
requires (forall p :: p in processes ==> 0 == p.curColumn)
requires (forall p :: p in processes ==> p.opsLeft == M_.Length1)
requires (forall i :: 0 <= i < y_.Length ==> y_[i] == 0)
requires y_.Length == M_.Length0
requires |x_| == M_.Length1
requires M_.Length0 > 0
requires |x_| > 0
ensures (forall i :: 0 <= i < leftOvers.Length ==> leftOvers[i] == M_.Length1)
ensures Valid(M_, x_, y_, processes, leftOvers)
{
}
method processNext(M: array2<int>, x: seq<int>, y: array<int>, P : set<Process>, leftOvers : array<nat>)
requires Valid(M, x, y, P, leftOvers)
requires exists p :: (p in P && p.opsLeft > 0)
requires sum(leftOvers[..]) > 0
modifies this, y, P, leftOvers
requires (forall p, q :: p in P && q in P && p != q ==> p.row != q.row)
ensures Valid(M, x, y, P, leftOvers)
ensures sum(leftOvers[..]) == sum(old(leftOvers[..])) - 1
{
var p :| p in P && p.opsLeft > 0;
y[p.row] := y[p.row] + M[p.row, p.curColumn] * x[p.curColumn];
p.opsLeft := p.opsLeft - 1;
p.curColumn := p.curColumn + 1;
leftOvers[p.row] := leftOvers[p.row] - 1;
assert (forall i :: 0 <= i < leftOvers.Length ==> i != p.row ==> leftOvers[i] == old(leftOvers[i]));
assert (leftOvers[p.row] + 1 == old(leftOvers[p.row]));
assert((forall p :: p in P ==> leftOvers[p.row] == p.opsLeft));
sum_exept(old(leftOvers[..]), leftOvers[..], 1, p.row);
}
}
method Run(processes: set<Process>, M: array2<int>, x: array<int>) returns (y: array<int>)
requires |processes| == M.Length0
requires (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
requires (forall p, q :: p in processes && q in processes ==> p != q)
requires (forall p :: p in processes ==> 0 <= p.row < M.Length0)
requires (forall p :: p in processes ==> 0 == p.curColumn)
requires (forall p :: p in processes ==> p.opsLeft == M.Length1)
requires x.Length > 0
requires M.Length0 > 0
requires M.Length1 == x.Length
ensures M.Length0 == y.Length
modifies processes, M, x
{
var i := 0;
y := new int[M.Length0](i => 0);
var leftOvers := new nat[M.Length0](i => M.Length1);
var mv := new MatrixVectorMultiplier(processes, M, x[..], y, leftOvers);
while sum(leftOvers[..]) > 0 && exists p :: (p in processes && p.opsLeft > 0)
invariant mv.Valid(M, x[..], y, processes, leftOvers)
invariant (forall p :: p in processes ==> y[p.row] + calcRow(M, x[..], p.row, p.curColumn) == calcRow(M, x[..], p.row, 0))
invariant sum(leftOvers[..]) >= 0
invariant (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
decreases sum(leftOvers[..])
{
mv.processNext(M, x[..], y, processes, leftOvers);
}
assert(sum(leftOvers[..]) == 0);
assert(forall i :: 0 <= i < y.Length ==> y[i] == calcRow(M, x[..], i, 0));
}
// lemma lemma_newProcessNotInSet(process: Process, processes: set<Process>)
// requires (forall p :: p in processes ==> p.row != process.row)
// ensures process !in processes
// {
// }
// lemma diffRowMeansDiffProcess(p1: Process, p2: Process)
// requires p1.row != p2.row
// ensures p1 != p2
// {
// }
// method createSetProcesses(numRows: nat, numColumns: nat) returns (processes: set<Process>)
// ensures |processes| == numRows
// ensures (forall p, q :: p in processes && q in processes ==> p != q)
// ensures (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
// ensures (forall p :: p in processes ==> 0 <= p.row < numRows)
// ensures (forall p :: p in processes ==> 0 == p.curColumn)
// ensures (forall p :: p in processes ==> p.opsLeft == numColumns)
// {
// processes := {};
// assert (forall p, q :: p in processes && q in processes ==> p != q);
// assert (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row);
// var i := 0;
// while i < numRows
// invariant i == |processes|
// invariant 0 <= i <= numRows
// invariant (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
// invariant (forall p, q :: p in processes && q in processes ==> p != q)
// {
// var process := new Process(i, numColumns);
// processes := processes + {process};
// i := i + 1;
// }
// }
// method Main()
// {
// var M: array2<int> := new int[3, 3];
// M[0,0] := 1;
// M[0,1] := 2;
// M[0,2] := 3;
// M[1,0] := 1;
// M[1,1] := 2;
// M[1,2] := 3;
// M[2,0] := 1;
// M[2,1] := 20;
// M[2,2] := 3;
// var x := new int[3];
// x[0] := 1;
// x[1] := -3;
// x[2] := 3;
// var p0: Process := new Process(0, 3);
// var p1: Process := new Process(1, 3);
// var p2: Process := new Process(2, 3);
// var processes := {p0, p1, p2};
// assert (p0 != p1 && p1 != p2 && p0 != p2);
// assert (forall p :: p in processes ==> p == p0 || p == p1 || p == p2);
// assert (exists p :: p in processes && p == p0);
// assert (exists p :: p in processes && p == p1);
// assert (exists p :: p in processes && p == p2);
// assert (forall p, q :: p in processes && q in processes ==> p.row != q.row);
// assert (forall p, q :: p in processes && q in processes ==> p != q);
// var y := Run(processes, M, x);
// for i := 0 to 3 {
// print "output: ", y[i], "\n";
// }
// }
| class Process {
var row: nat;
var curColumn: nat;
var opsLeft: nat;
constructor (init_row: nat, initOpsLeft: nat)
ensures row == init_row
ensures opsLeft == initOpsLeft
ensures curColumn == 0
{
row := init_row;
curColumn := 0;
opsLeft := initOpsLeft;
}
}
function sum(s : seq<nat>) : nat
ensures sum(s) == 0 ==> forall i :: 0 <= i < |s| ==> s[i] == 0
{
if s == [] then 0 else s[0] + sum(s[1..])
}
lemma sum0(s : seq<nat>)
ensures sum(s) == 0 ==> forall i :: 0 <= i < |s| ==> s[i] == 0
{
if s == [] {
} else {
sum0(s[1..]);
}
}
lemma sum_const(s : seq<nat>, x : nat)
ensures (forall i :: 0 <= i < |s| ==> s[i] == x) ==> sum(s) == |s| * x
{
}
lemma sum_eq(s1 : seq<nat>, s2 : seq<nat>)
requires |s1| == |s2|
requires forall i :: 0 <= i < |s1| ==> s1[i] == s2[i]
ensures sum(s1) == sum(s2)
{
}
lemma sum_exept(s1 : seq<nat>, s2 : seq<nat>, x : nat, j : nat)
requires |s1| == |s2|
requires j < |s1|
requires forall i :: 0 <= i < |s1| ==> i != j ==> s1[i] == s2[i]
requires s1[j] == s2[j] + x
ensures sum(s1) == sum(s2) + x
{
if s1 == [] {
} else {
if j == 0 {
sum_eq(s1[1..], s2[1..]);
} else {
sum_exept(s1[1..], s2[1..], x, j - 1);
}
}
}
function calcRow(M : array2<int>, x : seq<int>, row: nat, start_index: nat) : (product: int)
reads M
requires M.Length1 == |x|
requires row < M.Length0
requires start_index <= M.Length1
{
if start_index == M.Length1 then
0
else
M[row, start_index] * x[start_index] + calcRow(M, x, row, start_index+1)
}
class MatrixVectorMultiplier
{
ghost predicate Valid(M: array2<int>, x: seq<int>, y: array<int>, P: set<Process>, leftOvers : array<nat>)
reads this, y, P, M, leftOvers
{
M.Length0 == y.Length &&
M.Length1 == |x| &&
|P| == y.Length &&
|P| == leftOvers.Length &&
(forall p, q :: p in P && q in P && p != q ==> p.row != q.row) &&
(forall p, q :: p in P && q in P ==> p != q) &&
(forall p :: p in P ==> 0 <= p.row < |P|) &&
(forall p :: p in P ==> 0 <= p.curColumn <= M.Length1) &&
(forall p :: p in P ==> 0 <= p.opsLeft <= M.Length1) &&
(forall p :: p in P ==> y[p.row] + calcRow(M, x, p.row, p.curColumn) == calcRow(M, x, p.row, 0)) &&
(forall p :: p in P ==> leftOvers[p.row] == p.opsLeft) &&
(forall p :: p in P ==> p.opsLeft == M.Length1 - p.curColumn) &&
(sum(leftOvers[..]) > 0 ==> exists p :: p in P && p.opsLeft > 0)
}
constructor (processes: set<Process>, M_: array2<int>, x_: seq<int>, y_: array<int>, leftOvers : array<nat>)
// Idea here is that we already have a set of processes such that each one is assigned one row.
// Daphny makes it a ginormous pain in the ass to actually create such a set, so we just assume
// we already have one.
//this states that the number of rows and processes are the same, and that there is one process
//for every row, and that no two processes are the same, nor do any two processes share the same
//row
requires (forall i :: 0 <= i < leftOvers.Length ==> leftOvers[i] == M_.Length1)
requires |processes| == leftOvers.Length
requires |processes| == M_.Length0
requires (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
requires (forall p, q :: p in processes && q in processes ==> p != q)
requires (forall p :: p in processes ==> 0 <= p.row < M_.Length0)
//initializes process start
requires (forall p :: p in processes ==> 0 == p.curColumn)
requires (forall p :: p in processes ==> p.opsLeft == M_.Length1)
requires (forall i :: 0 <= i < y_.Length ==> y_[i] == 0)
requires y_.Length == M_.Length0
requires |x_| == M_.Length1
requires M_.Length0 > 0
requires |x_| > 0
ensures (forall i :: 0 <= i < leftOvers.Length ==> leftOvers[i] == M_.Length1)
ensures Valid(M_, x_, y_, processes, leftOvers)
{
}
method processNext(M: array2<int>, x: seq<int>, y: array<int>, P : set<Process>, leftOvers : array<nat>)
requires Valid(M, x, y, P, leftOvers)
requires exists p :: (p in P && p.opsLeft > 0)
requires sum(leftOvers[..]) > 0
modifies this, y, P, leftOvers
requires (forall p, q :: p in P && q in P && p != q ==> p.row != q.row)
ensures Valid(M, x, y, P, leftOvers)
ensures sum(leftOvers[..]) == sum(old(leftOvers[..])) - 1
{
var p :| p in P && p.opsLeft > 0;
y[p.row] := y[p.row] + M[p.row, p.curColumn] * x[p.curColumn];
p.opsLeft := p.opsLeft - 1;
p.curColumn := p.curColumn + 1;
leftOvers[p.row] := leftOvers[p.row] - 1;
sum_exept(old(leftOvers[..]), leftOvers[..], 1, p.row);
}
}
method Run(processes: set<Process>, M: array2<int>, x: array<int>) returns (y: array<int>)
requires |processes| == M.Length0
requires (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
requires (forall p, q :: p in processes && q in processes ==> p != q)
requires (forall p :: p in processes ==> 0 <= p.row < M.Length0)
requires (forall p :: p in processes ==> 0 == p.curColumn)
requires (forall p :: p in processes ==> p.opsLeft == M.Length1)
requires x.Length > 0
requires M.Length0 > 0
requires M.Length1 == x.Length
ensures M.Length0 == y.Length
modifies processes, M, x
{
var i := 0;
y := new int[M.Length0](i => 0);
var leftOvers := new nat[M.Length0](i => M.Length1);
var mv := new MatrixVectorMultiplier(processes, M, x[..], y, leftOvers);
while sum(leftOvers[..]) > 0 && exists p :: (p in processes && p.opsLeft > 0)
{
mv.processNext(M, x[..], y, processes, leftOvers);
}
}
// lemma lemma_newProcessNotInSet(process: Process, processes: set<Process>)
// requires (forall p :: p in processes ==> p.row != process.row)
// ensures process !in processes
// {
// }
// lemma diffRowMeansDiffProcess(p1: Process, p2: Process)
// requires p1.row != p2.row
// ensures p1 != p2
// {
// }
// method createSetProcesses(numRows: nat, numColumns: nat) returns (processes: set<Process>)
// ensures |processes| == numRows
// ensures (forall p, q :: p in processes && q in processes ==> p != q)
// ensures (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
// ensures (forall p :: p in processes ==> 0 <= p.row < numRows)
// ensures (forall p :: p in processes ==> 0 == p.curColumn)
// ensures (forall p :: p in processes ==> p.opsLeft == numColumns)
// {
// processes := {};
// assert (forall p, q :: p in processes && q in processes ==> p != q);
// assert (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row);
// var i := 0;
// while i < numRows
// invariant i == |processes|
// invariant 0 <= i <= numRows
// invariant (forall p, q :: p in processes && q in processes && p != q ==> p.row != q.row)
// invariant (forall p, q :: p in processes && q in processes ==> p != q)
// {
// var process := new Process(i, numColumns);
// processes := processes + {process};
// i := i + 1;
// }
// }
// method Main()
// {
// var M: array2<int> := new int[3, 3];
// M[0,0] := 1;
// M[0,1] := 2;
// M[0,2] := 3;
// M[1,0] := 1;
// M[1,1] := 2;
// M[1,2] := 3;
// M[2,0] := 1;
// M[2,1] := 20;
// M[2,2] := 3;
// var x := new int[3];
// x[0] := 1;
// x[1] := -3;
// x[2] := 3;
// var p0: Process := new Process(0, 3);
// var p1: Process := new Process(1, 3);
// var p2: Process := new Process(2, 3);
// var processes := {p0, p1, p2};
// assert (p0 != p1 && p1 != p2 && p0 != p2);
// assert (forall p :: p in processes ==> p == p0 || p == p1 || p == p2);
// assert (exists p :: p in processes && p == p0);
// assert (exists p :: p in processes && p == p1);
// assert (exists p :: p in processes && p == p2);
// assert (forall p, q :: p in processes && q in processes ==> p.row != q.row);
// assert (forall p, q :: p in processes && q in processes ==> p != q);
// var y := Run(processes, M, x);
// for i := 0 to 3 {
// print "output: ", y[i], "\n";
// }
// }
|
770 | veri-sparse_tmp_tmp15fywna6_dafny_dspmspv.dfy | function sum(X_val : array<int>, X_crd : array<nat>,
v_val : array<int>, v_crd : array<nat>, kX : nat, kV : nat, pX_end : nat, pV_end : nat) : (s : int)
reads X_val, X_crd
requires X_val.Length == X_crd.Length
requires pX_end <= X_crd.Length
requires 0 <= kX <= X_crd.Length
reads v_crd, v_val
requires v_val.Length == v_crd.Length
requires pV_end <= v_crd.Length
requires 0 <= kV <= v_crd.Length
decreases pX_end + pV_end - (kX + kV)
{
if pV_end <= kV || pX_end <= kX then
0
else if X_crd[kX] == v_crd[kV] then
sum(X_val, X_crd, v_val, v_crd, kX + 1, kV + 1, pX_end, pV_end) + v_val[kV] * X_val[kX]
else if X_crd[kX] < v_crd[kV] then
sum(X_val, X_crd, v_val, v_crd, kX + 1, kV, pX_end, pV_end)
else sum(X_val, X_crd, v_val, v_crd, kX, kV + 1, pX_end, pV_end)
}
function min(x : nat, y : nat) : nat {
if x <= y then x else y
}
predicate notin(y: nat, x : array<nat>)
reads x
{
forall i :: 0 <= i < x.Length ==> y != x[i]
}
predicate notin_seq(y: nat, x : seq<nat>)
{
forall i :: 0 <= i < |x| ==> y != x[i]
}
function index_seq(x : nat, y: seq<nat>) : (i : nat)
ensures i >= |y| ==> notin_seq(x, y)
ensures i < |y| ==> y[i] == x
{
if |y| == 0 then 0
else
if y[0] == x then 0
else 1 + index_seq(x, y[1..])
}
function index(x : nat, y: array<nat>) : (i : nat)
reads y
ensures i >= y.Length ==> notin(x, y)
ensures i < y.Length ==> y[i] == x
{
index_seq(x, y[.. ])
}
method DSpMSpV(X_val : array<int>, X_crd : array<nat>, X_pos : array<nat>,
X_crd1 : array<nat>, X_len: nat,
v_val : array<int>, v_crd : array<nat>) returns (y : array<int>)
// X requirements
requires X_pos.Length >= 1
requires X_val.Length == X_crd.Length
requires forall i, j :: 0 <= i < j < X_pos.Length ==> X_pos[i] <= X_pos[j];
requires forall i :: 0 <= i < X_pos.Length ==> 0 <= X_pos[i] <= X_val.Length
requires X_len >= X_crd1.Length
requires forall i :: 0 <= i < X_crd1.Length ==> X_crd1[i] < X_len
requires X_crd1.Length < X_pos.Length
requires forall i, j :: 0 <= i < j < X_crd1.Length ==> X_crd1[i] < X_crd1[j]
// v requirements
requires v_val.Length == v_crd.Length
ensures y.Length == X_len
ensures forall i :: 0 <= i < y.Length ==>
y[i] ==
if index(i, X_crd1) < X_crd1.Length then
sum(X_val, X_crd, v_val, v_crd, X_pos[index(i, X_crd1)], 0, X_pos[index(i, X_crd1)+1], v_val.Length)
else 0
{
var N : nat := X_len;
y := new int[N](i => 0);
var n : nat := 0;
var kX , pX_end : nat;
var kV : nat;
var pV_end : nat := v_val.Length;
var kX0, kV0 : nat;
var k : nat;
var pX_end1 := X_crd1.Length;
while n < pX_end1
invariant n <= X_crd1.Length
invariant forall i :: 0 <= i < n ==> y[X_crd1[i]] == sum(X_val, X_crd, v_val, v_crd, X_pos[i], 0, X_pos[i+1], pV_end)
invariant forall i :: n <= i < X_crd1.Length ==> y[X_crd1[i]] == 0
invariant forall i :: 0 <= i < y.Length ==> notin(i, X_crd1) ==> y[i] == 0
{
kX := X_pos[n];
pX_end := X_pos[n + 1];
kV := 0;
while (kX < pX_end && kV < pV_end)
invariant X_pos[n] <= kX <= pX_end
invariant 0 <= kV <= pV_end
invariant forall i :: n < i < X_crd1.Length ==> y[X_crd1[i]] == 0
invariant forall i :: 0 <= i < y.Length ==> notin(i, X_crd1) ==> y[i] == 0
invariant forall i :: 0 <= i < n ==> y[X_crd1[i]] == sum(X_val, X_crd, v_val, v_crd, X_pos[i], 0, X_pos[i+1], pV_end)
invariant y[X_crd1[n]] + sum(X_val, X_crd, v_val, v_crd, kX, kV, pX_end, pV_end) == sum(X_val, X_crd, v_val, v_crd, X_pos[n], 0, X_pos[n+1], pV_end)
decreases pX_end + pV_end - (kX + kV)
{
kX0 := X_crd[kX];
kV0 := v_crd[kV];
k := min(kV0, kX0);
if (kX0 == k && kV0 == k) {
y[X_crd1[n]] := y[X_crd1[n]] + X_val[kX] * v_val[kV];
kX := kX + 1;
kV := kV + 1;
} else if (kX0 == k) {
kX := kX + 1;
} else if (kV0 == k) {
kV := kV + 1;
}
}
n := n + 1;
}
}
method Main() {
var X_val := new int[4](i => 1);
var X_crd := new nat[4](i => if i <= 3 then (3 - i) * 2 else 0);
var X_pos := new nat[5](i => i);
var X_crd1 := new nat[4](i => i * 2);
var X_pos1 := new nat[2](i => i * 8);
var X_len := 8;
var v_val := new int[4](i => 30 + i);
var v_crd := new nat[4](i => i * 2);
var v_pos := new nat[2](i => if i == 0 then 0 else 4);
var y := DSpMSpV(
X_val,
X_crd,
X_pos,
X_crd1,
X_len,
v_val,
v_crd
);
var i := 0;
while i < 8 { print y[i]; print "; "; i := i + 1; }
}
| function sum(X_val : array<int>, X_crd : array<nat>,
v_val : array<int>, v_crd : array<nat>, kX : nat, kV : nat, pX_end : nat, pV_end : nat) : (s : int)
reads X_val, X_crd
requires X_val.Length == X_crd.Length
requires pX_end <= X_crd.Length
requires 0 <= kX <= X_crd.Length
reads v_crd, v_val
requires v_val.Length == v_crd.Length
requires pV_end <= v_crd.Length
requires 0 <= kV <= v_crd.Length
{
if pV_end <= kV || pX_end <= kX then
0
else if X_crd[kX] == v_crd[kV] then
sum(X_val, X_crd, v_val, v_crd, kX + 1, kV + 1, pX_end, pV_end) + v_val[kV] * X_val[kX]
else if X_crd[kX] < v_crd[kV] then
sum(X_val, X_crd, v_val, v_crd, kX + 1, kV, pX_end, pV_end)
else sum(X_val, X_crd, v_val, v_crd, kX, kV + 1, pX_end, pV_end)
}
function min(x : nat, y : nat) : nat {
if x <= y then x else y
}
predicate notin(y: nat, x : array<nat>)
reads x
{
forall i :: 0 <= i < x.Length ==> y != x[i]
}
predicate notin_seq(y: nat, x : seq<nat>)
{
forall i :: 0 <= i < |x| ==> y != x[i]
}
function index_seq(x : nat, y: seq<nat>) : (i : nat)
ensures i >= |y| ==> notin_seq(x, y)
ensures i < |y| ==> y[i] == x
{
if |y| == 0 then 0
else
if y[0] == x then 0
else 1 + index_seq(x, y[1..])
}
function index(x : nat, y: array<nat>) : (i : nat)
reads y
ensures i >= y.Length ==> notin(x, y)
ensures i < y.Length ==> y[i] == x
{
index_seq(x, y[.. ])
}
method DSpMSpV(X_val : array<int>, X_crd : array<nat>, X_pos : array<nat>,
X_crd1 : array<nat>, X_len: nat,
v_val : array<int>, v_crd : array<nat>) returns (y : array<int>)
// X requirements
requires X_pos.Length >= 1
requires X_val.Length == X_crd.Length
requires forall i, j :: 0 <= i < j < X_pos.Length ==> X_pos[i] <= X_pos[j];
requires forall i :: 0 <= i < X_pos.Length ==> 0 <= X_pos[i] <= X_val.Length
requires X_len >= X_crd1.Length
requires forall i :: 0 <= i < X_crd1.Length ==> X_crd1[i] < X_len
requires X_crd1.Length < X_pos.Length
requires forall i, j :: 0 <= i < j < X_crd1.Length ==> X_crd1[i] < X_crd1[j]
// v requirements
requires v_val.Length == v_crd.Length
ensures y.Length == X_len
ensures forall i :: 0 <= i < y.Length ==>
y[i] ==
if index(i, X_crd1) < X_crd1.Length then
sum(X_val, X_crd, v_val, v_crd, X_pos[index(i, X_crd1)], 0, X_pos[index(i, X_crd1)+1], v_val.Length)
else 0
{
var N : nat := X_len;
y := new int[N](i => 0);
var n : nat := 0;
var kX , pX_end : nat;
var kV : nat;
var pV_end : nat := v_val.Length;
var kX0, kV0 : nat;
var k : nat;
var pX_end1 := X_crd1.Length;
while n < pX_end1
{
kX := X_pos[n];
pX_end := X_pos[n + 1];
kV := 0;
while (kX < pX_end && kV < pV_end)
{
kX0 := X_crd[kX];
kV0 := v_crd[kV];
k := min(kV0, kX0);
if (kX0 == k && kV0 == k) {
y[X_crd1[n]] := y[X_crd1[n]] + X_val[kX] * v_val[kV];
kX := kX + 1;
kV := kV + 1;
} else if (kX0 == k) {
kX := kX + 1;
} else if (kV0 == k) {
kV := kV + 1;
}
}
n := n + 1;
}
}
method Main() {
var X_val := new int[4](i => 1);
var X_crd := new nat[4](i => if i <= 3 then (3 - i) * 2 else 0);
var X_pos := new nat[5](i => i);
var X_crd1 := new nat[4](i => i * 2);
var X_pos1 := new nat[2](i => i * 8);
var X_len := 8;
var v_val := new int[4](i => 30 + i);
var v_crd := new nat[4](i => i * 2);
var v_pos := new nat[2](i => if i == 0 then 0 else 4);
var y := DSpMSpV(
X_val,
X_crd,
X_pos,
X_crd1,
X_len,
v_val,
v_crd
);
var i := 0;
while i < 8 { print y[i]; print "; "; i := i + 1; }
}
|
771 | veri-sparse_tmp_tmp15fywna6_dafny_spmv.dfy | function sum(X_val: array<int>, X_crd: array<nat>, v : array<int>, b : int, k : int) : (s : int)
reads X_val, X_crd, v
requires X_val.Length >= b >= 0
requires k <= X_val.Length
requires X_val.Length == X_crd.Length
requires forall i :: 0 <= i < X_crd.Length ==> 0 <= X_crd[i] < v.Length
decreases k - b
{
if k <= b then
0
else sum(X_val, X_crd, v, b + 1, k) + X_val[b] * v[X_crd[b]]
}
method SpMV(X_val: array<int>, X_crd: array<nat>, X_pos: array<nat>, v : array<int>) returns (y : array<int>)
requires X_crd.Length >= 1
requires X_crd.Length == X_val.Length;
requires forall i, j :: 0 <= i < j < X_pos.Length ==> X_pos[i] <= X_pos[j];
requires forall i :: 0 <= i < X_crd.Length ==> X_crd[i] < v.Length
requires forall i :: 0 <= i < X_pos.Length ==> X_pos[i] <= X_val.Length
requires X_pos.Length >= 1
ensures y.Length + 1 == X_pos.Length
ensures forall i :: 0 <= i < y.Length ==> y[i] == sum(X_val, X_crd, v, X_pos[i], X_pos[i + 1])
{
var N: nat := X_pos.Length - 1;
y := new int[N](i => 0);
var n: nat := 0;
while n < N
invariant n <= y.Length
invariant forall i :: 0 <= i < n ==> y[i] == sum(X_val, X_crd, v, X_pos[i], X_pos[i + 1])
invariant forall i :: n <= i < y.Length ==> y[i] == 0
{
var k: nat := X_pos[n];
while k < X_pos[n + 1]
invariant k <= X_pos[n + 1]
invariant forall i :: n < i < y.Length ==> y[i] == 0
invariant forall i :: 0 <= i < n ==> y[i] == sum(X_val, X_crd, v, X_pos[i], X_pos[i + 1])
invariant y[n] + sum(X_val, X_crd, v, k, X_pos[n+1]) == sum(X_val, X_crd, v, X_pos[n], X_pos[n+1])
{
y[n] := y[n] + X_val[k] * v[X_crd[k]];
k := k + 1;
}
n := n + 1;
}
}
// 0 0 0 0 0 0 1 0
// 0 0 0 0 0 0 0 0
// 0 0 0 0 1 0 0 0
// 0 0 0 0 0 0 0 0
// 0 0 1 0 0 0 0 0
// 0 0 0 0 0 0 0 0
// 1 0 0 0 0 0 0 0
// 0 0 0 0 0 0 0 0
method Main() {
var X_val := new int[4](i => 1);
var X_crd := new nat[4](i => if i <= 3 then (3 - i) * 2 else 0);
var X_pos := new nat[9];
X_pos[0] := 0;
X_pos[1] := 1;
X_pos[2] := 1;
X_pos[3] := 2;
X_pos[4] := 2;
X_pos[5] := 3;
X_pos[6] := 3;
X_pos[7] := 4;
X_pos[8] := 4;
var v := new int[8];
v[0] := 30;
v[1] := 0;
v[2] := 31;
v[3] := 0;
v[4] := 32;
v[5] := 0;
v[6] := 33;
v[7] := 0;
var y := SpMV(
X_val,
X_crd,
X_pos,
v
);
var i := 0;
while i < 8 { print y[i]; print "; "; i := i + 1; }
}
| function sum(X_val: array<int>, X_crd: array<nat>, v : array<int>, b : int, k : int) : (s : int)
reads X_val, X_crd, v
requires X_val.Length >= b >= 0
requires k <= X_val.Length
requires X_val.Length == X_crd.Length
requires forall i :: 0 <= i < X_crd.Length ==> 0 <= X_crd[i] < v.Length
{
if k <= b then
0
else sum(X_val, X_crd, v, b + 1, k) + X_val[b] * v[X_crd[b]]
}
method SpMV(X_val: array<int>, X_crd: array<nat>, X_pos: array<nat>, v : array<int>) returns (y : array<int>)
requires X_crd.Length >= 1
requires X_crd.Length == X_val.Length;
requires forall i, j :: 0 <= i < j < X_pos.Length ==> X_pos[i] <= X_pos[j];
requires forall i :: 0 <= i < X_crd.Length ==> X_crd[i] < v.Length
requires forall i :: 0 <= i < X_pos.Length ==> X_pos[i] <= X_val.Length
requires X_pos.Length >= 1
ensures y.Length + 1 == X_pos.Length
ensures forall i :: 0 <= i < y.Length ==> y[i] == sum(X_val, X_crd, v, X_pos[i], X_pos[i + 1])
{
var N: nat := X_pos.Length - 1;
y := new int[N](i => 0);
var n: nat := 0;
while n < N
{
var k: nat := X_pos[n];
while k < X_pos[n + 1]
{
y[n] := y[n] + X_val[k] * v[X_crd[k]];
k := k + 1;
}
n := n + 1;
}
}
// 0 0 0 0 0 0 1 0
// 0 0 0 0 0 0 0 0
// 0 0 0 0 1 0 0 0
// 0 0 0 0 0 0 0 0
// 0 0 1 0 0 0 0 0
// 0 0 0 0 0 0 0 0
// 1 0 0 0 0 0 0 0
// 0 0 0 0 0 0 0 0
method Main() {
var X_val := new int[4](i => 1);
var X_crd := new nat[4](i => if i <= 3 then (3 - i) * 2 else 0);
var X_pos := new nat[9];
X_pos[0] := 0;
X_pos[1] := 1;
X_pos[2] := 1;
X_pos[3] := 2;
X_pos[4] := 2;
X_pos[5] := 3;
X_pos[6] := 3;
X_pos[7] := 4;
X_pos[8] := 4;
var v := new int[8];
v[0] := 30;
v[1] := 0;
v[2] := 31;
v[3] := 0;
v[4] := 32;
v[5] := 0;
v[6] := 33;
v[7] := 0;
var y := SpMV(
X_val,
X_crd,
X_pos,
v
);
var i := 0;
while i < 8 { print y[i]; print "; "; i := i + 1; }
}
|
772 | veri-titan_tmp_tmpbg2iy0kf_spec_crypto_fntt512.dfy | // include "ct_std2rev_model.dfy"
// abstract module ntt_impl {
// import opened Seq
// import opened Power
// import opened Power2
// import opened DivMod
// import opened Mul
// import opened pows_of_2
// import opened ntt_index
// import opened ntt_512_params
// import opened mq_polys
// import opened poly_view
// import opened nth_root
// import opened forward_ntt
// method j_loop(a: elems, p: elems, t: pow2_t, d: pow2_t, j: nat, u: nat, ghost view: loop_view)
// returns (a': elems)
// requires u == j * (2 * d.full);
// requires view.j_loop_inv(a, d, j);
// requires t == view.lsize();
// requires p == rev_mixed_powers_mont_table();
// requires j < view.lsize().full;
// ensures view.j_loop_inv(a', d, j + 1);
// {
// view.s_loop_inv_pre_lemma(a, d, j);
// assert (2 * j) * d.full == j * (2 * d.full) by {
// LemmaMulProperties();
// }
// rev_mixed_powers_mont_table_lemma(t, d, j);
// var w := p[t.full + j];
// // modmul(x_value(2 * j, d), R);
// var s := u;
// a' := a;
// ghost var bi := 0;
// while (s < u + d.full)
// invariant view.s_loop_inv(a', d, j, s-u);
// {
// var a :elems := a';
// var bi := s-u;
// var _ := view.higher_points_view_index_lemma(a, d, j, bi);
// var e := a[s];
// var o := a[s + d.full];
// var x := montmul(o, w);
// a' := a[s+d.full := mqsub(e, x)];
// a' := a'[s := mqadd(e, x)];
// s := s + 1;
// view.s_loop_inv_peri_lemma(a, a', d, j, bi);
// }
// assert s == u + d.full;
// view.s_loop_inv_post_lemma(a', d, j, d.full);
// }
// method t_loop(a: elems, p: elems, t: pow2_t, d: pow2_t, ghost coeffs: elems)
// returns (a': elems)
// requires 0 <= d.exp < N.exp;
// requires t_loop_inv(a, pow2_double(d), coeffs);
// requires p == rev_mixed_powers_mont_table();
// requires t == block_size(pow2_double(d));
// ensures t_loop_inv(a', d, coeffs);
// {
// ghost var view := build_loop_view(coeffs, d);
// view.j_loop_inv_pre_lemma(a, d);
// var j := 0;
// var u: nat := 0;
// a' := a;
// while (j < t.full)
// invariant t == view.lsize();
// invariant u == j * (2 * d.full);
// invariant view.j_loop_inv(a', d, j);
// {
// a' := j_loop(a', p, t, d, j, u, view);
// calc == {
// u + 2 * d.full;
// j * (2 * d.full) + 2 * d.full;
// {
// LemmaMulProperties();
// }
// (j + 1) * (2 * d.full);
// }
// j := j + 1;
// u := u + 2 * d.full;
// }
// view.j_loop_inv_post_lemma(a', d, j);
// }
// method mulntt_ct(a: elems, p: elems)
// returns (a': elems)
// requires N == pow2_t_cons(512, 9);
// requires p == rev_mixed_powers_mont_table();
// ensures points_eval_inv(a', a, x_value, pow2(0));
// {
// var d := pow2(9);
// assert d == N by {
// Nth_root_lemma();
// }
// var t := pow2(0);
// ghost var coeffs := a;
// t_loop_inv_pre_lemma(a);
// a' := a;
// while (t.exp < 9)
// invariant 0 <= d.exp <= N.exp;
// invariant t == block_size(d);
// invariant t_loop_inv(a', d, coeffs);
// {
// d := pow2_half(d);
// a' := t_loop(a', p, t, d, coeffs);
// t := pow2_double(t);
// }
// t_loop_inv_post_lemma(a', d, coeffs);
// }
// }
| // include "ct_std2rev_model.dfy"
// abstract module ntt_impl {
// import opened Seq
// import opened Power
// import opened Power2
// import opened DivMod
// import opened Mul
// import opened pows_of_2
// import opened ntt_index
// import opened ntt_512_params
// import opened mq_polys
// import opened poly_view
// import opened nth_root
// import opened forward_ntt
// method j_loop(a: elems, p: elems, t: pow2_t, d: pow2_t, j: nat, u: nat, ghost view: loop_view)
// returns (a': elems)
// requires u == j * (2 * d.full);
// requires view.j_loop_inv(a, d, j);
// requires t == view.lsize();
// requires p == rev_mixed_powers_mont_table();
// requires j < view.lsize().full;
// ensures view.j_loop_inv(a', d, j + 1);
// {
// view.s_loop_inv_pre_lemma(a, d, j);
// assert (2 * j) * d.full == j * (2 * d.full) by {
// LemmaMulProperties();
// }
// rev_mixed_powers_mont_table_lemma(t, d, j);
// var w := p[t.full + j];
// // modmul(x_value(2 * j, d), R);
// var s := u;
// a' := a;
// ghost var bi := 0;
// while (s < u + d.full)
// invariant view.s_loop_inv(a', d, j, s-u);
// {
// var a :elems := a';
// var bi := s-u;
// var _ := view.higher_points_view_index_lemma(a, d, j, bi);
// var e := a[s];
// var o := a[s + d.full];
// var x := montmul(o, w);
// a' := a[s+d.full := mqsub(e, x)];
// a' := a'[s := mqadd(e, x)];
// s := s + 1;
// view.s_loop_inv_peri_lemma(a, a', d, j, bi);
// }
// assert s == u + d.full;
// view.s_loop_inv_post_lemma(a', d, j, d.full);
// }
// method t_loop(a: elems, p: elems, t: pow2_t, d: pow2_t, ghost coeffs: elems)
// returns (a': elems)
// requires 0 <= d.exp < N.exp;
// requires t_loop_inv(a, pow2_double(d), coeffs);
// requires p == rev_mixed_powers_mont_table();
// requires t == block_size(pow2_double(d));
// ensures t_loop_inv(a', d, coeffs);
// {
// ghost var view := build_loop_view(coeffs, d);
// view.j_loop_inv_pre_lemma(a, d);
// var j := 0;
// var u: nat := 0;
// a' := a;
// while (j < t.full)
// invariant t == view.lsize();
// invariant u == j * (2 * d.full);
// invariant view.j_loop_inv(a', d, j);
// {
// a' := j_loop(a', p, t, d, j, u, view);
// calc == {
// u + 2 * d.full;
// j * (2 * d.full) + 2 * d.full;
// {
// LemmaMulProperties();
// }
// (j + 1) * (2 * d.full);
// }
// j := j + 1;
// u := u + 2 * d.full;
// }
// view.j_loop_inv_post_lemma(a', d, j);
// }
// method mulntt_ct(a: elems, p: elems)
// returns (a': elems)
// requires N == pow2_t_cons(512, 9);
// requires p == rev_mixed_powers_mont_table();
// ensures points_eval_inv(a', a, x_value, pow2(0));
// {
// var d := pow2(9);
// assert d == N by {
// Nth_root_lemma();
// }
// var t := pow2(0);
// ghost var coeffs := a;
// t_loop_inv_pre_lemma(a);
// a' := a;
// while (t.exp < 9)
// invariant 0 <= d.exp <= N.exp;
// invariant t == block_size(d);
// invariant t_loop_inv(a', d, coeffs);
// {
// d := pow2_half(d);
// a' := t_loop(a', p, t, d, coeffs);
// t := pow2_double(t);
// }
// t_loop_inv_post_lemma(a', d, coeffs);
// }
// }
|
773 | veribetrkv-osdi2020_tmp_tmpra431m8q_docker-hdd_src_veribetrkv-linear_lib_Base_SetBijectivity.dfy | module SetBijectivity {
lemma BijectivityImpliesEqualCardinality<A, B>(setA: set<A>, setB: set<B>, relation: iset<(A, B)>)
requires forall a :: a in setA ==> exists b :: b in setB && (a, b) in relation
requires forall a1, a2, b :: a1 in setA && a2 in setA && b in setB && (a1, b) in relation && (a2, b) in relation ==> a1 == a2
requires forall b :: b in setB ==> exists a :: a in setA && (a, b) in relation
requires forall a, b1, b2 :: b1 in setB && b2 in setB && a in setA && (a, b1) in relation && (a, b2) in relation ==> b1 == b2
ensures |setA| == |setB|
{
if |setA| == 0 {
} else {
var a :| a in setA;
var b :| b in setB && (a, b) in relation;
var setA' := setA - {a};
var setB' := setB - {b};
BijectivityImpliesEqualCardinality(setA', setB', relation);
}
}
lemma CrossProductCardinality<A, B>(setA: set<A>, setB: set<B>, cp: set<(A,B)>)
requires cp == (set a, b | a in setA && b in setB :: (a,b))
ensures |cp| == |setA| * |setB|;
{
if |setA| == 0 {
assert setA == {};
assert cp == {};
} else {
var x :| x in setA;
var setA' := setA - {x};
var cp' := (set a, b | a in setA' && b in setB :: (a,b));
var line := (set a, b | a == x && b in setB :: (a,b));
assert |line| == |setB| by {
var relation := iset p : ((A, B), B) | p.0.1 == p.1;
forall b | b in setB
ensures exists p :: p in line && (p, b) in relation
{
var p := (x, b);
assert p in line && (p, b) in relation;
}
BijectivityImpliesEqualCardinality(line, setB, relation);
}
CrossProductCardinality(setA', setB, cp');
assert cp == cp' + line;
assert cp' !! line;
assert |cp'| == |setA'| * |setB|;
assert |setA'| == |setA| - 1;
assert |cp|
== |cp' + line|
== |cp'| + |line|
== (|setA| - 1) * |setB| + |setB|
== |setA| * |setB|;
}
}
}
| module SetBijectivity {
lemma BijectivityImpliesEqualCardinality<A, B>(setA: set<A>, setB: set<B>, relation: iset<(A, B)>)
requires forall a :: a in setA ==> exists b :: b in setB && (a, b) in relation
requires forall a1, a2, b :: a1 in setA && a2 in setA && b in setB && (a1, b) in relation && (a2, b) in relation ==> a1 == a2
requires forall b :: b in setB ==> exists a :: a in setA && (a, b) in relation
requires forall a, b1, b2 :: b1 in setB && b2 in setB && a in setA && (a, b1) in relation && (a, b2) in relation ==> b1 == b2
ensures |setA| == |setB|
{
if |setA| == 0 {
} else {
var a :| a in setA;
var b :| b in setB && (a, b) in relation;
var setA' := setA - {a};
var setB' := setB - {b};
BijectivityImpliesEqualCardinality(setA', setB', relation);
}
}
lemma CrossProductCardinality<A, B>(setA: set<A>, setB: set<B>, cp: set<(A,B)>)
requires cp == (set a, b | a in setA && b in setB :: (a,b))
ensures |cp| == |setA| * |setB|;
{
if |setA| == 0 {
} else {
var x :| x in setA;
var setA' := setA - {x};
var cp' := (set a, b | a in setA' && b in setB :: (a,b));
var line := (set a, b | a == x && b in setB :: (a,b));
var relation := iset p : ((A, B), B) | p.0.1 == p.1;
forall b | b in setB
ensures exists p :: p in line && (p, b) in relation
{
var p := (x, b);
}
BijectivityImpliesEqualCardinality(line, setB, relation);
}
CrossProductCardinality(setA', setB, cp');
== |cp' + line|
== |cp'| + |line|
== (|setA| - 1) * |setB| + |setB|
== |setA| * |setB|;
}
}
}
|
774 | veribetrkv-osdi2020_tmp_tmpra431m8q_docker-hdd_src_veribetrkv-linear_lib_Base_Sets.dfy | module Sets {
lemma {:opaque} ProperSubsetImpliesSmallerCardinality<T>(a: set<T>, b: set<T>)
requires a < b
ensures |a| < |b|
{
assert |b| == |a| + |b-a|;
}
lemma {:opaque} SetInclusionImpliesSmallerCardinality<T>(a: set<T>, b: set<T>)
requires a <= b
ensures |a| <= |b|
{
assert b == a + (b - a);
}
lemma {:opaque} SetInclusionImpliesStrictlySmallerCardinality<T>(a: set<T>, b: set<T>)
requires a < b
ensures |a| < |b|
{
assert b == a + (b - a);
}
lemma {:opaque} SetInclusionAndEqualCardinalityImpliesSetEquality<T>(a: set<T>, b: set<T>)
requires a <= b
requires |a| == |b|
ensures a == b
{
assert b == a + (b - a);
}
function SetRange(n: int) : set<int>
{
set i | 0 <= i < n
}
lemma CardinalitySetRange(n: int)
requires n >= 0
ensures |SetRange(n)| == n
{
if n == 0 {
} else {
CardinalitySetRange(n-1);
assert SetRange(n)
== SetRange(n-1) + {n-1};
}
}
}
| module Sets {
lemma {:opaque} ProperSubsetImpliesSmallerCardinality<T>(a: set<T>, b: set<T>)
requires a < b
ensures |a| < |b|
{
}
lemma {:opaque} SetInclusionImpliesSmallerCardinality<T>(a: set<T>, b: set<T>)
requires a <= b
ensures |a| <= |b|
{
}
lemma {:opaque} SetInclusionImpliesStrictlySmallerCardinality<T>(a: set<T>, b: set<T>)
requires a < b
ensures |a| < |b|
{
}
lemma {:opaque} SetInclusionAndEqualCardinalityImpliesSetEquality<T>(a: set<T>, b: set<T>)
requires a <= b
requires |a| == |b|
ensures a == b
{
}
function SetRange(n: int) : set<int>
{
set i | 0 <= i < n
}
lemma CardinalitySetRange(n: int)
requires n >= 0
ensures |SetRange(n)| == n
{
if n == 0 {
} else {
CardinalitySetRange(n-1);
== SetRange(n-1) + {n-1};
}
}
}
|
775 | verification-class_tmp_tmpz9ik148s_2022_chapter05-distributed-state-machines_exercises_UtilitiesLibrary.dfy | module UtilitiesLibrary {
function DropLast<T>(theSeq: seq<T>) : seq<T>
requires 0 < |theSeq|
{
theSeq[..|theSeq|-1]
}
function Last<T>(theSeq: seq<T>) : T
requires 0 < |theSeq|
{
theSeq[|theSeq|-1]
}
function UnionSeqOfSets<T>(theSets: seq<set<T>>) : set<T>
{
if |theSets| == 0 then {} else
UnionSeqOfSets(DropLast(theSets)) + Last(theSets)
}
// As you can see, Dafny's recursion heuristics easily complete the recursion
// induction proofs, so these two statements could easily be ensures of
// UnionSeqOfSets. However, the quantifiers combine with native map axioms
// to be a bit trigger-happy, so we've pulled them into independent lemmas
// you can invoke only when needed.
// Suggestion: hide calls to this lemma in a an
// assert P by { SetsAreSubsetsOfUnion(...) }
// construct so you can get your conclusion without "polluting" the rest of the
// lemma proof context with this enthusiastic forall.
lemma SetsAreSubsetsOfUnion<T>(theSets: seq<set<T>>)
ensures forall idx | 0<=idx<|theSets| :: theSets[idx] <= UnionSeqOfSets(theSets)
{
}
lemma EachUnionMemberBelongsToASet<T>(theSets: seq<set<T>>)
ensures forall member | member in UnionSeqOfSets(theSets) ::
exists idx :: 0<=idx<|theSets| && member in theSets[idx]
{
}
// Convenience function for learning a particular index (invoking Hilbert's
// Choose on the exists in EachUnionMemberBelongsToASet).
lemma GetIndexForMember<T>(theSets: seq<set<T>>, member: T) returns (idx:int)
requires member in UnionSeqOfSets(theSets)
ensures 0<=idx<|theSets|
ensures member in theSets[idx]
{
EachUnionMemberBelongsToASet(theSets);
var chosenIdx :| 0<=chosenIdx<|theSets| && member in theSets[chosenIdx];
idx := chosenIdx;
}
datatype Option<T> = Some(value:T) | None
function {:opaque} MapRemoveOne<K,V>(m:map<K,V>, key:K) : (m':map<K,V>)
ensures forall k :: k in m && k != key ==> k in m'
ensures forall k :: k in m' ==> k in m && k != key
ensures forall j :: j in m' ==> m'[j] == m[j]
ensures |m'.Keys| <= |m.Keys|
ensures |m'| <= |m|
{
var m':= map j | j in m && j != key :: m[j];
assert m'.Keys == m.Keys - {key};
m'
}
////////////// Library code for exercises 12 and 14 /////////////////////
// This is tagged union, a "sum" datatype.
datatype Direction = North() | East() | South() | West()
function TurnRight(direction:Direction) : Direction
{
// This function introduces two new bis of syntax.
// First, the if-else expression: if <bool> then T else T
// Second, the element.Ctor? built-in predicate, which tests whether
// the datatype `element` was built by `Ctor`.
if direction.North?
then East
else if direction.East?
then South
else if direction.South?
then West
else // By elimination, West!
North
}
lemma Rotation()
{
assert TurnRight(North) == East;
}
function TurnLeft(direction:Direction) : Direction
{
// Another nice way to take apart a datatype element is with match-case
// construct. Each case argument is a constructor; each body must be of the
// same type, which is the type of the entire `match` expression.
match direction {
case North => West
case West => South
case South => East // Try changing "East" to 7.
case East => North
}
}
////////////// Library code for exercises 13 and 14 /////////////////////
// This whole product-sum idea gets clearer when we use both powers
// (struct/product, union/sum) at the same time.
datatype Meat = Salami | Ham
datatype Cheese = Provolone | Swiss | Cheddar | Jack
datatype Veggie = Olive | Onion | Pepper
datatype Order =
Sandwich(meat:Meat, cheese:Cheese)
| Pizza(meat:Meat, veggie:Veggie)
| Appetizer(cheese:Cheese)
// There are 2 Meats, 4 Cheeses, and 3 Veggies.
// Thus there are 8 Sandwiches, 6 Pizzas, and 4 Appetizers.
// Thus there are 8+6+4 = 18 Orders.
// This is why they're called "algebraic" datatypes.
}
| module UtilitiesLibrary {
function DropLast<T>(theSeq: seq<T>) : seq<T>
requires 0 < |theSeq|
{
theSeq[..|theSeq|-1]
}
function Last<T>(theSeq: seq<T>) : T
requires 0 < |theSeq|
{
theSeq[|theSeq|-1]
}
function UnionSeqOfSets<T>(theSets: seq<set<T>>) : set<T>
{
if |theSets| == 0 then {} else
UnionSeqOfSets(DropLast(theSets)) + Last(theSets)
}
// As you can see, Dafny's recursion heuristics easily complete the recursion
// induction proofs, so these two statements could easily be ensures of
// UnionSeqOfSets. However, the quantifiers combine with native map axioms
// to be a bit trigger-happy, so we've pulled them into independent lemmas
// you can invoke only when needed.
// Suggestion: hide calls to this lemma in a an
// assert P by { SetsAreSubsetsOfUnion(...) }
// construct so you can get your conclusion without "polluting" the rest of the
// lemma proof context with this enthusiastic forall.
lemma SetsAreSubsetsOfUnion<T>(theSets: seq<set<T>>)
ensures forall idx | 0<=idx<|theSets| :: theSets[idx] <= UnionSeqOfSets(theSets)
{
}
lemma EachUnionMemberBelongsToASet<T>(theSets: seq<set<T>>)
ensures forall member | member in UnionSeqOfSets(theSets) ::
exists idx :: 0<=idx<|theSets| && member in theSets[idx]
{
}
// Convenience function for learning a particular index (invoking Hilbert's
// Choose on the exists in EachUnionMemberBelongsToASet).
lemma GetIndexForMember<T>(theSets: seq<set<T>>, member: T) returns (idx:int)
requires member in UnionSeqOfSets(theSets)
ensures 0<=idx<|theSets|
ensures member in theSets[idx]
{
EachUnionMemberBelongsToASet(theSets);
var chosenIdx :| 0<=chosenIdx<|theSets| && member in theSets[chosenIdx];
idx := chosenIdx;
}
datatype Option<T> = Some(value:T) | None
function {:opaque} MapRemoveOne<K,V>(m:map<K,V>, key:K) : (m':map<K,V>)
ensures forall k :: k in m && k != key ==> k in m'
ensures forall k :: k in m' ==> k in m && k != key
ensures forall j :: j in m' ==> m'[j] == m[j]
ensures |m'.Keys| <= |m.Keys|
ensures |m'| <= |m|
{
var m':= map j | j in m && j != key :: m[j];
m'
}
////////////// Library code for exercises 12 and 14 /////////////////////
// This is tagged union, a "sum" datatype.
datatype Direction = North() | East() | South() | West()
function TurnRight(direction:Direction) : Direction
{
// This function introduces two new bis of syntax.
// First, the if-else expression: if <bool> then T else T
// Second, the element.Ctor? built-in predicate, which tests whether
// the datatype `element` was built by `Ctor`.
if direction.North?
then East
else if direction.East?
then South
else if direction.South?
then West
else // By elimination, West!
North
}
lemma Rotation()
{
}
function TurnLeft(direction:Direction) : Direction
{
// Another nice way to take apart a datatype element is with match-case
// construct. Each case argument is a constructor; each body must be of the
// same type, which is the type of the entire `match` expression.
match direction {
case North => West
case West => South
case South => East // Try changing "East" to 7.
case East => North
}
}
////////////// Library code for exercises 13 and 14 /////////////////////
// This whole product-sum idea gets clearer when we use both powers
// (struct/product, union/sum) at the same time.
datatype Meat = Salami | Ham
datatype Cheese = Provolone | Swiss | Cheddar | Jack
datatype Veggie = Olive | Onion | Pepper
datatype Order =
Sandwich(meat:Meat, cheese:Cheese)
| Pizza(meat:Meat, veggie:Veggie)
| Appetizer(cheese:Cheese)
// There are 2 Meats, 4 Cheeses, and 3 Veggies.
// Thus there are 8 Sandwiches, 6 Pizzas, and 4 Appetizers.
// Thus there are 8+6+4 = 18 Orders.
// This is why they're called "algebraic" datatypes.
}
|
776 | verified-isort_tmp_tmp7hhb8ei__dafny_isort.dfy | // Dafny is designed to be familiar to those programming in an OOP language like
// Java, so, we have plain old ordinary mutable arrays rather than the functional
// list data structures that we saw in Coq. This means that unlike our Coq
// and Python examples, we can sort our array in-place and avoid needing a whole
// bunch of intermediary allocations.
// Just as before, we need a way of defining what it means for an array of nats
// to be sorted:
predicate sorted(a: seq<nat>)
{
true // TODO
}
method Isort(a: array<nat>)
modifies a
ensures sorted(a[..])
{
if a.Length == 0 {
return;
}
// Here is the thing that we have to get Dafny to understand:
//
// We are going to iterate from left to right in the input array. As we
// progress, everything to the left of the current element is going to be
// in sorted order, so that when we finish iterating through the array all
// elements are going to be in their correct order.
//
// Let's look at some iteration of that main loop, where we're neither at the
// beginning nor at the end of the process:
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | ✓ | ✓ | ✓ | ✓ | ✓ | = | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \------------------/^
// These elements are |
// in sorted order n == 5: this element will be placed in its right place
// by the end of the current loop iteration...
//
// In particular, there is some j on [0..n) such that:
//
// 1. j on [1..n) when a[j-1] <= a[n] and a[j] > a[n];
// 2. j == 0 when a[0] > a[n].
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | <=| <=| <=| > | > | = | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \----------/^\-----/
// <= a[n] | > a[n]
// |
// +--- k == 3: This is the index of where a[5] should go!
// So, we'll shift all the elements on [j, n) over by one, so they're now
// located on [j+1, n+1), and then place the old value of a[n] into a[j].
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | <=| <=| <=| = | > | > | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \----------/ \-----/
// <= a[n] > a[n]
//
// And now we have one more element in the correct place! We are now ready
// to begin the next iteration of the loop.
var n := 1;
while n < a.Length
{
var curr := a[n];
// 1. Find our pivot position k, the location where we should insert the
// current value.
var k := n;
while k > 0 && a[k-1] > curr
{
k := k-1;
}
a[n] := a[n-1]; // Hack to help the verifier with invariant sorted(a[k..n+1])
// 2. Shift all elements between k and n to the right by one.
var j := n-1;
while j >= k
{
a[j+1] := a[j];
j := j-1;
}
// 3. Put curr in its place!
a[k] := curr;
n := n + 1;
}
}
| // Dafny is designed to be familiar to those programming in an OOP language like
// Java, so, we have plain old ordinary mutable arrays rather than the functional
// list data structures that we saw in Coq. This means that unlike our Coq
// and Python examples, we can sort our array in-place and avoid needing a whole
// bunch of intermediary allocations.
// Just as before, we need a way of defining what it means for an array of nats
// to be sorted:
predicate sorted(a: seq<nat>)
{
true // TODO
}
method Isort(a: array<nat>)
modifies a
ensures sorted(a[..])
{
if a.Length == 0 {
return;
}
// Here is the thing that we have to get Dafny to understand:
//
// We are going to iterate from left to right in the input array. As we
// progress, everything to the left of the current element is going to be
// in sorted order, so that when we finish iterating through the array all
// elements are going to be in their correct order.
//
// Let's look at some iteration of that main loop, where we're neither at the
// beginning nor at the end of the process:
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | ✓ | ✓ | ✓ | ✓ | ✓ | = | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \------------------/^
// These elements are |
// in sorted order n == 5: this element will be placed in its right place
// by the end of the current loop iteration...
//
// In particular, there is some j on [0..n) such that:
//
// 1. j on [1..n) when a[j-1] <= a[n] and a[j] > a[n];
// 2. j == 0 when a[0] > a[n].
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | <=| <=| <=| > | > | = | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \----------/^\-----/
// <= a[n] | > a[n]
// |
// +--- k == 3: This is the index of where a[5] should go!
// So, we'll shift all the elements on [j, n) over by one, so they're now
// located on [j+1, n+1), and then place the old value of a[n] into a[j].
//
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// a | <=| <=| <=| = | > | > | | | | | | | | | | |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// \----------/ \-----/
// <= a[n] > a[n]
//
// And now we have one more element in the correct place! We are now ready
// to begin the next iteration of the loop.
var n := 1;
while n < a.Length
{
var curr := a[n];
// 1. Find our pivot position k, the location where we should insert the
// current value.
var k := n;
while k > 0 && a[k-1] > curr
{
k := k-1;
}
a[n] := a[n-1]; // Hack to help the verifier with invariant sorted(a[k..n+1])
// 2. Shift all elements between k and n to the right by one.
var j := n-1;
while j >= k
{
a[j+1] := a[j];
j := j-1;
}
// 3. Put curr in its place!
a[k] := curr;
n := n + 1;
}
}
|
777 | verified-using-dafny_tmp_tmp7jatpjyn_longestZero.dfy | function getSize(i: int, j:int) : int
{
j - i + 1
}
// For a given integer array, let's find the longest subesquence of 0s.
// sz: size, pos: position. a[pos..(pos+sz)] will be all zeros
method longestZero(a: array<int>) returns (sz:int, pos:int)
requires 1 <= a.Length
ensures 0 <= sz <= a.Length
ensures 0 <= pos < a.Length
ensures pos + sz <= a.Length
ensures forall i:int :: pos <= i < pos + sz ==> a[i] == 0
ensures forall i,j :: (0 <= i < j < a.Length && getSize(i, j) > sz) ==> exists k :: i <= k <= j && a[k] != 0
{
var b := new int[a.Length]; // if b[i] == n, then a[i], a[i-1], ... a[i-n+1] will be all zeros and (i-n ==0 or a[i-n] !=0)
if a[0] == 0
{b[0] := 1;}
else
{b[0] := 0;}
var idx:int := 0;
while idx < a.Length - 1 // idx <- 0 to a.Length - 2
invariant 0 <= idx <= a.Length - 1
invariant forall i:int :: 0 <= i <= idx ==> 0 <= b[i] <= a.Length
invariant forall i:int :: 0 <= i <= idx ==> -1 <= i - b[i]
invariant forall i:int :: 0 <= i <= idx ==> (forall j:int :: i-b[i] < j <= i ==> a[j] == 0)
invariant forall i:int :: 0 <= i <= idx ==> ( 0 <= i - b[i] ==> a[i - b[i]] != 0 )
{
if a[idx + 1] == 0
{ b[idx + 1] := b[idx] + 1; }
else
{ b[idx + 1] := 0;}
idx := idx + 1;
}
idx := 1;
sz := b[0];
pos := 0;
// Let's find maximum of array b. That is the desired sz.
while idx < a.Length
invariant 1 <= idx <= b.Length
invariant 0 <= sz <= a.Length
invariant 0 <= pos < a.Length
invariant pos + sz <= a.Length
invariant forall i:int :: 0 <= i < idx ==> b[i] <= sz
invariant forall i:int :: pos <= i < pos + sz ==> a[i] == 0
invariant forall i, j:int :: (0 <= i < j < idx && getSize(i,j) > sz) ==> a[j-b[j]] != 0
{
// find max
if b[idx] > sz
{
sz := b[idx];
pos := idx - b[idx] + 1;
}
idx := idx + 1;
}
}
method Main()
{
var a := new int[10];
forall i | 0 <= i < a.Length
{ a[i] := 0;}
a[3] := 1;
var sz, pos := longestZero(a);
print a[..], "\n";
print a[pos..(sz+pos)], "\n";
}
| function getSize(i: int, j:int) : int
{
j - i + 1
}
// For a given integer array, let's find the longest subesquence of 0s.
// sz: size, pos: position. a[pos..(pos+sz)] will be all zeros
method longestZero(a: array<int>) returns (sz:int, pos:int)
requires 1 <= a.Length
ensures 0 <= sz <= a.Length
ensures 0 <= pos < a.Length
ensures pos + sz <= a.Length
ensures forall i:int :: pos <= i < pos + sz ==> a[i] == 0
ensures forall i,j :: (0 <= i < j < a.Length && getSize(i, j) > sz) ==> exists k :: i <= k <= j && a[k] != 0
{
var b := new int[a.Length]; // if b[i] == n, then a[i], a[i-1], ... a[i-n+1] will be all zeros and (i-n ==0 or a[i-n] !=0)
if a[0] == 0
{b[0] := 1;}
else
{b[0] := 0;}
var idx:int := 0;
while idx < a.Length - 1 // idx <- 0 to a.Length - 2
{
if a[idx + 1] == 0
{ b[idx + 1] := b[idx] + 1; }
else
{ b[idx + 1] := 0;}
idx := idx + 1;
}
idx := 1;
sz := b[0];
pos := 0;
// Let's find maximum of array b. That is the desired sz.
while idx < a.Length
{
// find max
if b[idx] > sz
{
sz := b[idx];
pos := idx - b[idx] + 1;
}
idx := idx + 1;
}
}
method Main()
{
var a := new int[10];
forall i | 0 <= i < a.Length
{ a[i] := 0;}
a[3] := 1;
var sz, pos := longestZero(a);
print a[..], "\n";
print a[pos..(sz+pos)], "\n";
}
|
778 | vfag_tmp_tmpc29dxm1j_Verificacion_torneo.dfy | method torneo(Valores : array?<real>, i : int, j : int, k : int) returns (pos_padre : int, pos_madre : int)
requires Valores != null && Valores.Length >= 20 && Valores.Length < 50 && i >= 0 && j >= 0 && k >= 0
requires i < Valores.Length && j < Valores.Length && k < Valores.Length && i != j && j != k && k != i
ensures exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q // Q
{
assert (Valores[i] < Valores[j] && ((Valores[j] < Valores[k] && exists r | r in {i, j, k} && k != j && j != r && k != r :: Valores[k] >= Valores[j] >= Valores[r]) || (Valores[j] >= Valores[k] && ((Valores[i] < Valores[k] && exists r | r in {i, j, k} && j != k && k != r && j != r :: Valores[j] >= Valores[k] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && j != i && i != r && j != r :: Valores[j] >= Valores[i] >= Valores[r]))))) || (Valores[i] >= Valores[j] && ((Valores[j] >= Valores[k] && exists r | r in {i, j, k} && i != j && j != r && i != r :: Valores[i] >= Valores[j] >= Valores[r]) || (Valores[j] < Valores[k] && ((Valores[i] < Valores[k] && exists r | r in {i, j, k} && k != i && i != r && k != r :: Valores[k] >= Valores[i] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && i != k && k != r && i != r :: Valores[i] >= Valores[k] >= Valores[r]))))) ; // R : pmd(if..., Q)
if Valores[i] < Valores[j] {
assert (Valores[j] < Valores[k] && exists r | r in {i, j, k} && k != j && j != r && k != r :: Valores[k] >= Valores[j] >= Valores[r]) || (Valores[j] >= Valores[k] && ((Valores[i] < Valores[k] && exists r | r in {i, j, k} && j != k && k != r && j != r :: Valores[j] >= Valores[k] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && j != i && i != r && j != r :: Valores[j] >= Valores[i] >= Valores[r]))) ; // R1 : pmd(if..., Q)
if Valores[j] < Valores[k] {
assert exists r | r in {i, j, k} && k != j && j != r && k != r :: Valores[k] >= Valores[j] >= Valores[r] ; // R11 : pmd(pos_padre := k, R12)
pos_padre := k ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != j && j != r && p != r :: Valores[p] >= Valores[j] >= Valores[r] && pos_padre == p ; // R12 : pmd(pos_madre := j, Q)
pos_madre := j ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
} else {
assert (Valores[i] < Valores[k] && exists r | r in {i, j, k} && j != k && k != r && j != r :: Valores[j] >= Valores[k] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && j != i && i != r && j != r :: Valores[j] >= Valores[i] >= Valores[r]) ; // R2 : pmd(if..., Q)
if Valores[i] < Valores[k] {
assert exists r | r in {i, j, k} && j != k && k != r && j != r :: Valores[j] >= Valores[k] >= Valores[r] ; // R13 : pmd(pos_padre := j, R14)
pos_padre := j ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != k && k != r && p != r :: Valores[p] >= Valores[k] >= Valores[r] && pos_padre == p ; // R14 : pmd(pos_madre := k, Q)
pos_madre := k ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
} else {
assert exists r | r in {i, j, k} && j != i && i != r && j != r :: Valores[j] >= Valores[i] >= Valores[r] ; // R15 : pmd(pos_padre := j, R16)
pos_padre := j ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != i && i != r && p != r :: Valores[p] >= Valores[i] >= Valores[r] && pos_padre == p ; // R16 : pmd(pos_madre := i, Q)
pos_madre := i ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
}
}
} else {
assert (Valores[j] >= Valores[k] && exists r | r in {i, j, k} && i != j && j != r && i != r :: Valores[i] >= Valores[j] >= Valores[r]) || (Valores[j] < Valores[k] && ((Valores[i] < Valores[k] && exists r | r in {i, j, k} && k != i && i != r && k != r :: Valores[k] >= Valores[i] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && i != k && k != r && i != r :: Valores[i] >= Valores[k] >= Valores[r]))) ; // R3 : pmd(if..., Q)
if Valores[j] >= Valores[k] {
assert exists r | r in {i, j, k} && i != j && j != r && i != r :: Valores[i] >= Valores[j] >= Valores[r] ; // R17 : pmd(pos_padre := i, R18)
pos_padre := i ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != j && j != r && p != r :: Valores[p] >= Valores[j] >= Valores[r] && pos_padre == p ; // R18 : pmd(pos_madre := j, Q)
pos_madre := j ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
} else {
assert (Valores[i] < Valores[k] && exists r | r in {i, j, k} && k != i && i != r && k != r :: Valores[k] >= Valores[i] >= Valores[r]) || (Valores[i] >= Valores[k] && exists r | r in {i, j, k} && i != k && k != r && i != r :: Valores[i] >= Valores[k] >= Valores[r]) ; // R4 : pmd(if..., Q)
if Valores[i] < Valores[k] {
assert exists r | r in {i, j, k} && k != i && i != r && k != r :: Valores[k] >= Valores[i] >= Valores[r] ; // R19 : pmd(pos_padre := k, R110)
pos_padre := k ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != i && i != r && p != r :: Valores[p] >= Valores[i] >= Valores[r] && pos_padre == p ; // R110 : pmd(pos_madre := i, Q)
pos_madre := i ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
} else {
assert exists r | r in {i, j, k} && i != k && k != r && i != r :: Valores[i] >= Valores[k] >= Valores[r] ; // R111 : pmd(pos_padre := i, R112)
pos_padre := i ;
assert exists p, r | p in {i, j, k} && r in {i, j, k} && p != k && k != r && p != r :: Valores[p] >= Valores[k] >= Valores[r] && pos_padre == p ; // R112 : pmd(pos_madre := k, Q)
pos_madre := k ;
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
}
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
}
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
}
assert exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q ; // Q
}
| method torneo(Valores : array?<real>, i : int, j : int, k : int) returns (pos_padre : int, pos_madre : int)
requires Valores != null && Valores.Length >= 20 && Valores.Length < 50 && i >= 0 && j >= 0 && k >= 0
requires i < Valores.Length && j < Valores.Length && k < Valores.Length && i != j && j != k && k != i
ensures exists p, q, r | p in {i, j, k} && q in {i, j, k} && r in {i, j, k} && p != q && q != r && p != r :: Valores[p] >= Valores[q] >= Valores[r] && pos_padre == p && pos_madre == q // Q
{
if Valores[i] < Valores[j] {
if Valores[j] < Valores[k] {
pos_padre := k ;
pos_madre := j ;
} else {
if Valores[i] < Valores[k] {
pos_padre := j ;
pos_madre := k ;
} else {
pos_padre := j ;
pos_madre := i ;
}
}
} else {
if Valores[j] >= Valores[k] {
pos_padre := i ;
pos_madre := j ;
} else {
if Valores[i] < Valores[k] {
pos_padre := k ;
pos_madre := i ;
} else {
pos_padre := i ;
pos_madre := k ;
}
}
}
}
|
779 | vfag_tmp_tmpc29dxm1j_mergesort.dfy | method ordenar_mergesort(V : array?<int>)
requires V != null
modifies V
{
mergesort(V, 0, V.Length - 1) ;
}
method mergesort(V : array?<int>, c : int, f : int)
requires V != null
requires c >= 0 && f <= V.Length
decreases f - c
modifies V
{
if c < f {
var m : int ;
m := c + (f - c) / 2 ;
mergesort(V, c, m) ;
mergesort(V, m + 1, f) ;
mezclar(V, c, m, f) ;
}
}
method mezclar(V: array?<int>, c : int, m : int, f : int)
requires V != null
requires c <= m <= f
requires 0 <= c <= V.Length
requires 0 <= m <= V.Length
requires 0 <= f <= V.Length
modifies V
{
var V1 : array?<int> ;
var j : nat ;
V1 := new int[m - c + 1] ;
j := 0 ;
while j < V1.Length && c + j < V.Length
invariant 0 <= j <= V1.Length
invariant 0 <= c + j <= V.Length
decreases V1.Length - j
{
V1[j] := V[c + j] ;
j := j + 1 ;
}
var V2 : array?<int> ;
var k : nat ;
V2 := new int[f - m] ;
k := 0 ;
while k < V2.Length && m + k + 1 < V.Length
invariant 0 <= k <= V2.Length
invariant 0 <= m + k <= V.Length
decreases V2.Length - k
{
V2[k] := V[m + k + 1] ;
k := k + 1 ;
}
var i : nat ;
i := 0 ;
j := 0 ;
k := 0 ;
while i < f - c + 1 &&
j <= V1.Length &&
k <= V2.Length &&
c + i < V.Length
invariant 0 <= i <= f - c + 1
decreases f - c - i
{
if j >= V1.Length && k >= V2.Length {
break ;
}
else if j >= V1.Length {
V[c + i] := V2[k] ;
k := k + 1 ;
}
else if k >= V2.Length {
V[c + i] := V1[j] ;
j := j + 1 ;
}
else {
if V1[j] <= V2[k] {
V[c + i] := V1[j] ;
j := j + 1 ;
}
else if V1[j] > V2[k] {
V[c + i] := V2[k] ;
k := k + 1 ;
}
}
i := i + 1 ;
}
}
| method ordenar_mergesort(V : array?<int>)
requires V != null
modifies V
{
mergesort(V, 0, V.Length - 1) ;
}
method mergesort(V : array?<int>, c : int, f : int)
requires V != null
requires c >= 0 && f <= V.Length
modifies V
{
if c < f {
var m : int ;
m := c + (f - c) / 2 ;
mergesort(V, c, m) ;
mergesort(V, m + 1, f) ;
mezclar(V, c, m, f) ;
}
}
method mezclar(V: array?<int>, c : int, m : int, f : int)
requires V != null
requires c <= m <= f
requires 0 <= c <= V.Length
requires 0 <= m <= V.Length
requires 0 <= f <= V.Length
modifies V
{
var V1 : array?<int> ;
var j : nat ;
V1 := new int[m - c + 1] ;
j := 0 ;
while j < V1.Length && c + j < V.Length
{
V1[j] := V[c + j] ;
j := j + 1 ;
}
var V2 : array?<int> ;
var k : nat ;
V2 := new int[f - m] ;
k := 0 ;
while k < V2.Length && m + k + 1 < V.Length
{
V2[k] := V[m + k + 1] ;
k := k + 1 ;
}
var i : nat ;
i := 0 ;
j := 0 ;
k := 0 ;
while i < f - c + 1 &&
j <= V1.Length &&
k <= V2.Length &&
c + i < V.Length
{
if j >= V1.Length && k >= V2.Length {
break ;
}
else if j >= V1.Length {
V[c + i] := V2[k] ;
k := k + 1 ;
}
else if k >= V2.Length {
V[c + i] := V1[j] ;
j := j + 1 ;
}
else {
if V1[j] <= V2[k] {
V[c + i] := V1[j] ;
j := j + 1 ;
}
else if V1[j] > V2[k] {
V[c + i] := V2[k] ;
k := k + 1 ;
}
}
i := i + 1 ;
}
}
|
780 | vfag_tmp_tmpc29dxm1j_sumar_componentes.dfy | method suma_componentes(V : array?<int>) returns (suma : int)
requires V != null
ensures suma == suma_aux(V, 0) // x = V[0] + V[1] + ... + V[N - 1]
{
var n : int ;
assert V != null ; // P
assert 0 <= V.Length <= V.Length && 0 == suma_aux(V, V.Length) ; // P ==> pmd(n := V.Length ; x := 0, I)
n := V.Length ; // n := N
suma := 0 ;
assert 0 <= n <= V.Length && suma == suma_aux(V, n) ; // I
while n != 0
invariant 0 <= n <= V.Length && suma == suma_aux(V, n) // I
decreases n // C
{
assert 0 <= n <= V.Length && suma == suma_aux(V, n) && n != 0 ; // I /\ B ==> R
assert 0 <= n - 1 <= V.Length ;
assert suma + V[n - 1] == suma_aux(V, n - 1) ; // R : pmd(x := x + V[n - 1], R1)
suma := suma + V[n - 1] ;
assert 0 <= n - 1 <= V.Length && suma == suma_aux(V, n - 1) ; // R1 : pmd(n := n - 1, I)
n := n - 1 ;
assert 0 <= n <= V.Length && suma == suma_aux(V, n) ; // I
}
assert 0 <= n <= V.Length && suma == suma_aux(V, n) && n == 0 ; // I /\ ¬B ==> Q
assert suma == suma_aux(V, 0) ; // Q
}
function suma_aux(V : array?<int>, n : int) : int
// suma_aux(V, n) = V[n] + V[n + 1] + ... + V[N - 1]
requires V != null // P_0
requires 0 <= n <= V.Length // Q_0
decreases V.Length - n // C_0
reads V
{
if (n == V.Length) then 0 // Caso base: n = N
else V[n] + suma_aux(V, n + 1) // Caso recursivo: n < N
}
| method suma_componentes(V : array?<int>) returns (suma : int)
requires V != null
ensures suma == suma_aux(V, 0) // x = V[0] + V[1] + ... + V[N - 1]
{
var n : int ;
n := V.Length ; // n := N
suma := 0 ;
while n != 0
{
suma := suma + V[n - 1] ;
n := n - 1 ;
}
}
function suma_aux(V : array?<int>, n : int) : int
// suma_aux(V, n) = V[n] + V[n + 1] + ... + V[N - 1]
requires V != null // P_0
requires 0 <= n <= V.Length // Q_0
reads V
{
if (n == V.Length) then 0 // Caso base: n = N
else V[n] + suma_aux(V, n + 1) // Caso recursivo: n < N
}
|
781 | vmware-verification-2023_tmp_tmpoou5u54i_demos_leader_election.dfy | // Each node's identifier (address)
datatype Constants = Constants(ids: seq<nat>) {
predicate ValidIdx(i: int) {
0<=i<|ids|
}
ghost predicate UniqueIds() {
(forall i, j | ValidIdx(i) && ValidIdx(j) && ids[i]==ids[j] :: i == j)
}
ghost predicate WF() {
&& 0 < |ids|
&& UniqueIds()
}
}
// The highest other identifier this node has heard about.
datatype Variables = Variables(highest_heard: seq<int>) {
ghost predicate WF(c: Constants)
{
&& c.WF()
&& |highest_heard| == |c.ids|
}
}
ghost predicate Init(c: Constants, v: Variables)
{
&& v.WF(c)
&& c.UniqueIds()
// Everyone begins having heard about nobody, not even themselves.
&& (forall i | c.ValidIdx(i) :: v.highest_heard[i] == -1)
}
function max(a: int, b: int) : int {
if a > b then a else b
}
function NextIdx(c: Constants, idx: nat) : nat
requires c.WF()
requires c.ValidIdx(idx)
{
if idx + 1 == |c.ids| then 0 else idx + 1
}
ghost predicate Transmission(c: Constants, v: Variables, v': Variables, srcidx: nat)
{
&& v.WF(c)
&& v'.WF(c)
&& c.ValidIdx(srcidx)
// Neighbor address in ring.
&& var dstidx := NextIdx(c, srcidx);
// srcidx sends the max of its highest_heard value and its own id.
&& var message := max(v.highest_heard[srcidx], c.ids[srcidx]);
// dstidx only overwrites its highest_heard if the message is higher.
&& var dst_new_max := max(v.highest_heard[dstidx], message);
// XXX Manos: How could this have been a bug!? How could a srcidx, having sent message X, ever send message Y < X!?
&& v' == v.(highest_heard := v.highest_heard[dstidx := dst_new_max])
}
datatype Step = TransmissionStep(srcidx: nat)
ghost predicate NextStep(c: Constants, v: Variables, v': Variables, step: Step)
{
match step {
case TransmissionStep(srcidx) => Transmission(c, v, v', srcidx)
}
}
ghost predicate Next(c: Constants, v: Variables, v': Variables)
{
exists step :: NextStep(c, v, v', step)
}
//////////////////////////////////////////////////////////////////////////////
// Spec (proof goal)
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsLeader(c: Constants, v: Variables, i: int)
requires v.WF(c)
{
&& c.ValidIdx(i)
&& v.highest_heard[i] == c.ids[i]
}
ghost predicate Safety(c: Constants, v: Variables)
requires v.WF(c)
{
forall i, j | IsLeader(c, v, i) && IsLeader(c, v, j) :: i == j
}
//////////////////////////////////////////////////////////////////////////////
// Proof
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsChord(c: Constants, v: Variables, start: int, end: int)
{
&& v.WF(c)
&& c.ValidIdx(start)
&& c.ValidIdx(end)
&& c.ids[start] == v.highest_heard[end]
}
ghost predicate Between(start: int, node: int, end: int)
{
if start < end
then start < node < end // not wrapped
else node < end || start < node // wrapped
}
ghost predicate OnChordHeardDominatesId(c: Constants, v: Variables, start: int, end: int)
requires v.WF(c)
{
forall node | Between(start, node, end) && c.ValidIdx(node)
:: v.highest_heard[node] > c.ids[node]
}
ghost predicate OnChordHeardDominatesIdInv(c: Constants, v: Variables)
{
&& v.WF(c)
&& (forall start, end
| IsChord(c, v, start, end)
:: OnChordHeardDominatesId(c, v, start, end)
)
}
ghost predicate Inv(c: Constants, v: Variables)
{
&& v.WF(c)
&& OnChordHeardDominatesIdInv(c, v)
&& Safety(c, v)
}
lemma InitImpliesInv(c: Constants, v: Variables)
requires Init(c, v)
ensures Inv(c, v)
{
}
lemma NextPreservesInv(c: Constants, v: Variables, v': Variables)
requires Inv(c, v)
requires Next(c, v, v')
ensures Inv(c, v')
{
var step :| NextStep(c, v, v', step);
var srcidx := step.srcidx;
var dstidx := NextIdx(c, srcidx);
var message := max(v.highest_heard[srcidx], c.ids[srcidx]);
var dst_new_max := max(v.highest_heard[dstidx], message);
forall start, end
| IsChord(c, v', start, end)
ensures OnChordHeardDominatesId(c, v', start, end)
{
forall node | Between(start, node, end) && c.ValidIdx(node)
ensures v'.highest_heard[node] > c.ids[node]
{
if dstidx == end {
// maybe this chord just sprung into existence
if v'.highest_heard[end] == v.highest_heard[end] {
// no change --
assert v' == v; // trigger
} else if v'.highest_heard[end] == c.ids[srcidx] {
assert false; // proof by contridiction
} else if v'.highest_heard[end] == v.highest_heard[srcidx] {
assert IsChord(c, v, start, srcidx); // trigger
}
} else {
// this chord was already here
assert IsChord(c, v, start, end); // trigger
}
}
}
assert OnChordHeardDominatesIdInv(c, v');
forall i, j | IsLeader(c, v', i) && IsLeader(c, v', j) ensures i == j {
assert IsChord(c, v', i, i); // trigger
assert IsChord(c, v', j, j); // trigger
}
assert Safety(c, v');
}
lemma InvImpliesSafety(c: Constants, v: Variables)
requires Inv(c, v)
ensures Safety(c, v)
{
}
| // Each node's identifier (address)
datatype Constants = Constants(ids: seq<nat>) {
predicate ValidIdx(i: int) {
0<=i<|ids|
}
ghost predicate UniqueIds() {
(forall i, j | ValidIdx(i) && ValidIdx(j) && ids[i]==ids[j] :: i == j)
}
ghost predicate WF() {
&& 0 < |ids|
&& UniqueIds()
}
}
// The highest other identifier this node has heard about.
datatype Variables = Variables(highest_heard: seq<int>) {
ghost predicate WF(c: Constants)
{
&& c.WF()
&& |highest_heard| == |c.ids|
}
}
ghost predicate Init(c: Constants, v: Variables)
{
&& v.WF(c)
&& c.UniqueIds()
// Everyone begins having heard about nobody, not even themselves.
&& (forall i | c.ValidIdx(i) :: v.highest_heard[i] == -1)
}
function max(a: int, b: int) : int {
if a > b then a else b
}
function NextIdx(c: Constants, idx: nat) : nat
requires c.WF()
requires c.ValidIdx(idx)
{
if idx + 1 == |c.ids| then 0 else idx + 1
}
ghost predicate Transmission(c: Constants, v: Variables, v': Variables, srcidx: nat)
{
&& v.WF(c)
&& v'.WF(c)
&& c.ValidIdx(srcidx)
// Neighbor address in ring.
&& var dstidx := NextIdx(c, srcidx);
// srcidx sends the max of its highest_heard value and its own id.
&& var message := max(v.highest_heard[srcidx], c.ids[srcidx]);
// dstidx only overwrites its highest_heard if the message is higher.
&& var dst_new_max := max(v.highest_heard[dstidx], message);
// XXX Manos: How could this have been a bug!? How could a srcidx, having sent message X, ever send message Y < X!?
&& v' == v.(highest_heard := v.highest_heard[dstidx := dst_new_max])
}
datatype Step = TransmissionStep(srcidx: nat)
ghost predicate NextStep(c: Constants, v: Variables, v': Variables, step: Step)
{
match step {
case TransmissionStep(srcidx) => Transmission(c, v, v', srcidx)
}
}
ghost predicate Next(c: Constants, v: Variables, v': Variables)
{
exists step :: NextStep(c, v, v', step)
}
//////////////////////////////////////////////////////////////////////////////
// Spec (proof goal)
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsLeader(c: Constants, v: Variables, i: int)
requires v.WF(c)
{
&& c.ValidIdx(i)
&& v.highest_heard[i] == c.ids[i]
}
ghost predicate Safety(c: Constants, v: Variables)
requires v.WF(c)
{
forall i, j | IsLeader(c, v, i) && IsLeader(c, v, j) :: i == j
}
//////////////////////////////////////////////////////////////////////////////
// Proof
//////////////////////////////////////////////////////////////////////////////
ghost predicate IsChord(c: Constants, v: Variables, start: int, end: int)
{
&& v.WF(c)
&& c.ValidIdx(start)
&& c.ValidIdx(end)
&& c.ids[start] == v.highest_heard[end]
}
ghost predicate Between(start: int, node: int, end: int)
{
if start < end
then start < node < end // not wrapped
else node < end || start < node // wrapped
}
ghost predicate OnChordHeardDominatesId(c: Constants, v: Variables, start: int, end: int)
requires v.WF(c)
{
forall node | Between(start, node, end) && c.ValidIdx(node)
:: v.highest_heard[node] > c.ids[node]
}
ghost predicate OnChordHeardDominatesIdInv(c: Constants, v: Variables)
{
&& v.WF(c)
&& (forall start, end
| IsChord(c, v, start, end)
:: OnChordHeardDominatesId(c, v, start, end)
)
}
ghost predicate Inv(c: Constants, v: Variables)
{
&& v.WF(c)
&& OnChordHeardDominatesIdInv(c, v)
&& Safety(c, v)
}
lemma InitImpliesInv(c: Constants, v: Variables)
requires Init(c, v)
ensures Inv(c, v)
{
}
lemma NextPreservesInv(c: Constants, v: Variables, v': Variables)
requires Inv(c, v)
requires Next(c, v, v')
ensures Inv(c, v')
{
var step :| NextStep(c, v, v', step);
var srcidx := step.srcidx;
var dstidx := NextIdx(c, srcidx);
var message := max(v.highest_heard[srcidx], c.ids[srcidx]);
var dst_new_max := max(v.highest_heard[dstidx], message);
forall start, end
| IsChord(c, v', start, end)
ensures OnChordHeardDominatesId(c, v', start, end)
{
forall node | Between(start, node, end) && c.ValidIdx(node)
ensures v'.highest_heard[node] > c.ids[node]
{
if dstidx == end {
// maybe this chord just sprung into existence
if v'.highest_heard[end] == v.highest_heard[end] {
// no change --
} else if v'.highest_heard[end] == c.ids[srcidx] {
} else if v'.highest_heard[end] == v.highest_heard[srcidx] {
}
} else {
// this chord was already here
}
}
}
forall i, j | IsLeader(c, v', i) && IsLeader(c, v', j) ensures i == j {
}
}
lemma InvImpliesSafety(c: Constants, v: Variables)
requires Inv(c, v)
ensures Safety(c, v)
{
}
|