test_ID
stringlengths 3
3
| test_file
stringlengths 14
119
| ground_truth
stringlengths 70
28.7k
| hints_removed
stringlengths 58
28.7k
|
---|---|---|---|
300 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_basic examples_add_by_one.dfy | method add_by_one (x:int, y:int) returns (r:int)
requires y >= 0;
ensures r == x + y;
{
var i:int := 0;
r := x;
while (i < y)
invariant i <= y;
invariant r == x + i;
decreases y-i;
{
r := r + 1;
i := i + 1;
}
return r;
}
/*
* Illustrates de-sugaring of the while loop.
*/
method bar (x:int, y:int) returns (r:int)
requires y >= 0;
ensures r == x + y;
{
var i := 0;
r := x;
// the invariant is true before the loop
assert (i <= y && r == x + i);
// the ranking function is positive before the loop
assert (y-i >= 0);
// havoc variables assigned by the loop
i, r := *, *;
// assume the invariant holds
assume (i <= y && r == x + i);
// assume the ranking function is positive
assume (y-i >= 0);
// store the value of ranking function to compare against later
ghost var rank_before := y-i;
// one body of the loop
if (i < y)
{
r := r + 1;
i := i + 1;
// invariant is true at the end of the loop
assert (i <= y && r == x + i);
// ranking function is positive at the end of the loop
assert (y-i >= 0);
// ranking function has decreased
assert (rank_before - (y-i) > 0);
// if got to here, stop verification of this branch
assume (false);
}
// at this point only know the invariant of the loop + negation of
// the loop condition
return r;
}
| method add_by_one (x:int, y:int) returns (r:int)
requires y >= 0;
ensures r == x + y;
{
var i:int := 0;
r := x;
while (i < y)
{
r := r + 1;
i := i + 1;
}
return r;
}
/*
* Illustrates de-sugaring of the while loop.
*/
method bar (x:int, y:int) returns (r:int)
requires y >= 0;
ensures r == x + y;
{
var i := 0;
r := x;
// the invariant is true before the loop
// the ranking function is positive before the loop
// havoc variables assigned by the loop
i, r := *, *;
// assume the invariant holds
assume (i <= y && r == x + i);
// assume the ranking function is positive
assume (y-i >= 0);
// store the value of ranking function to compare against later
ghost var rank_before := y-i;
// one body of the loop
if (i < y)
{
r := r + 1;
i := i + 1;
// invariant is true at the end of the loop
// ranking function is positive at the end of the loop
// ranking function has decreased
// if got to here, stop verification of this branch
assume (false);
}
// at this point only know the invariant of the loop + negation of
// the loop condition
return r;
}
|
301 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_basic examples_add_by_one_details.dfy | method plus_one (x: int) returns (r:int)
requires x >= 0;
ensures r == x + 1;
{return x+1;}
method add_by_one (x:int, y:int) returns (r:int)
{
assume (y >= 0);
var i:int := 0;
r := x;
assert (i <= y);
assert (r == x + i);
r := *;
i := *;
assume (i <= y);
assume (r == x + i);
if (i < y)
// decreases y-i;
{
// assert (i >= -2);
assume (i < -2);
var t := y - i;
r := r + 1;
i := i + 1;
assert (i <= y);
assert (r == x + i);
assert (y-i >= 0);
assert (y-i < t);
assume (false);
}
assert (r == x + y);
return r;
}
| method plus_one (x: int) returns (r:int)
requires x >= 0;
ensures r == x + 1;
{return x+1;}
method add_by_one (x:int, y:int) returns (r:int)
{
assume (y >= 0);
var i:int := 0;
r := x;
r := *;
i := *;
assume (i <= y);
assume (r == x + i);
if (i < y)
// decreases y-i;
{
// assert (i >= -2);
assume (i < -2);
var t := y - i;
r := r + 1;
i := i + 1;
assume (false);
}
return r;
}
|
302 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_basic examples_find_max.dfy | method FindMax(a: array<int>) returns (max: int)
requires a != null && a.Length > 0;
ensures 0 <= max < a.Length;
ensures forall x :: 0 <= x < a.Length ==> a[max] >= a[x];
{
var i := 0;
max := 0;
while (i < a.Length)
invariant i <= a.Length;
invariant 0 <= max;
invariant max == 0 || 0 < max < i;
invariant forall k :: 0 <= k < i ==> a[max] >= a[k]
{
if (a[i] > a[max]) { max := i; }
i := i + 1;
}
return max;
}
| method FindMax(a: array<int>) returns (max: int)
requires a != null && a.Length > 0;
ensures 0 <= max < a.Length;
ensures forall x :: 0 <= x < a.Length ==> a[max] >= a[x];
{
var i := 0;
max := 0;
while (i < a.Length)
{
if (a[i] > a[max]) { max := i; }
i := i + 1;
}
return max;
}
|
303 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_basic examples_product_details.dfy | method CalcProduct(m: nat, n: nat) returns (res: nat)
ensures res == m*n;
{
var m1: nat := m;
res := 0;
assert res == (m-m1)*n;
m1, res := *, *;
assume res == (m-m1)*n;
if (m1!=0)
{
var n1: nat := n;
assert (res == (m-m1)*n + (n-n1));
// havoc res, n1;
res, n1 := *, *;
assume res == (m-m1)*n + (n-n1);
if (n1 != 0)
{
ghost var old_n1 := n1;
res := res+1;
n1 := n1-1;
assert (res == (m-m1)*n + (n-n1));
assert n1 < old_n1;
assert n1 >= 0;
assume (false);
}
m1 := m1-1;
assert res == (m-m1)*n;
assume false;
}
}
| method CalcProduct(m: nat, n: nat) returns (res: nat)
ensures res == m*n;
{
var m1: nat := m;
res := 0;
m1, res := *, *;
assume res == (m-m1)*n;
if (m1!=0)
{
var n1: nat := n;
// havoc res, n1;
res, n1 := *, *;
assume res == (m-m1)*n + (n-n1);
if (n1 != 0)
{
ghost var old_n1 := n1;
res := res+1;
n1 := n1-1;
assume (false);
}
m1 := m1-1;
assume false;
}
}
|
304 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_basic examples_sumto_sol.dfy | function sum_up_to (n: nat): nat
{
if (n == 0) then 0
else sum_up_to (n-1) + 1
}
method SumUpTo (n: nat) returns (r: nat)
ensures r == sum_up_to (n);
{
var i := 0;
r := 0;
while (i < n)
invariant 0 <= i <= n;
invariant r == sum_up_to (i);
{
r := r + 1;
i := i + 1;
}
}
function total (a: seq<nat>) : nat
{
if |a| == 0 then 0
else total (a[0..|a|-1]) + a[|a|-1]
}
lemma total_lemma (a: seq<nat>, i:nat)
requires |a| > 0;
requires 0 <= i < |a|;
ensures total (a[0..i]) + a[i] == total (a[0..i+1]);
{
ghost var b := a[0..i+1];
calc
{
total (a[0..i+1]);
total (b);
total (b[0..|b|-1]) + b[|b|-1];
total (b[0..|b|-1]) + a[i];
{assert (b[0..|b|-1] == a[0..i]);}
total (a[0..i]) + a[i];
}
}
method Total (a: seq<nat>) returns (r:nat)
ensures r == total (a[0..|a|]);
{
var i := 0;
r := 0;
while i < |a|
invariant 0 <= i <= |a|;
invariant r == total (a[0..i]);
{
total_lemma (a, i);
r := r + a[i];
i := i + 1;
}
}
| function sum_up_to (n: nat): nat
{
if (n == 0) then 0
else sum_up_to (n-1) + 1
}
method SumUpTo (n: nat) returns (r: nat)
ensures r == sum_up_to (n);
{
var i := 0;
r := 0;
while (i < n)
{
r := r + 1;
i := i + 1;
}
}
function total (a: seq<nat>) : nat
{
if |a| == 0 then 0
else total (a[0..|a|-1]) + a[|a|-1]
}
lemma total_lemma (a: seq<nat>, i:nat)
requires |a| > 0;
requires 0 <= i < |a|;
ensures total (a[0..i]) + a[i] == total (a[0..i+1]);
{
ghost var b := a[0..i+1];
calc
{
total (a[0..i+1]);
total (b);
total (b[0..|b|-1]) + b[|b|-1];
total (b[0..|b|-1]) + a[i];
{assert (b[0..|b|-1] == a[0..i]);}
total (a[0..i]) + a[i];
}
}
method Total (a: seq<nat>) returns (r:nat)
ensures r == total (a[0..|a|]);
{
var i := 0;
r := 0;
while i < |a|
{
total_lemma (a, i);
r := r + a[i];
i := i + 1;
}
}
|
305 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny0_GhostITECompilation.dfy | // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --function-syntax:4 --relax-definite-assignment
function F(x: nat, ghost y: nat): nat
{
if x == 0 then
0
else if y != 0 then
F(x, y - 1) // this branch is not compiled (which even makes F auto-accumulator tail recursive)
else
F(x - 1, 60) + 13
}
lemma AboutF(x: nat, y: nat)
ensures F(x, y) == 13 * x
{
}
function G(x: nat, ghost y: nat): nat
{
if x == 0 then
0
else if y != 0 then
var z := x + x;
var a, b, c := 100, if x < z then G(x, y - 1) else G(x, y - 1), 200;
assert a + b + c == G(x, y - 1) + 300;
b // this branch is not compiled (which even makes G auto-accumulator tail recursive)
else
G(x - 1, 60) + 13
}
// Ostensibly, the following function is tail recursive. But the ghost-ITE optimization
// removes the tail call. This test ensures that the unused setup for the tail optimization
// does not cause problems.
function H(x: int, ghost y: nat): int {
if y == 0 then
x
else
H(x, y - 1)
}
// Like function H, function J looks like it's tail recursive. The compiler probably will
// emit the tail-call label, even though the tail-call is never taken.
function J(x: int): int {
if true then
x
else
J(x)
}
// The following function would never verify, and its execution wouldn't terminate.
// Nevertheless, we'll test here that it compiles into legal target code.
function {:verify false} K(x: int, ghost y: nat): int {
K(x, y - 1)
}
method Main() {
print F(5, 3), "\n"; // 65
print G(5, 3), "\n"; // 65
print H(65, 3), "\n"; // 65
print J(65), "\n"; // 65
}
| // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --function-syntax:4 --relax-definite-assignment
function F(x: nat, ghost y: nat): nat
{
if x == 0 then
0
else if y != 0 then
F(x, y - 1) // this branch is not compiled (which even makes F auto-accumulator tail recursive)
else
F(x - 1, 60) + 13
}
lemma AboutF(x: nat, y: nat)
ensures F(x, y) == 13 * x
{
}
function G(x: nat, ghost y: nat): nat
{
if x == 0 then
0
else if y != 0 then
var z := x + x;
var a, b, c := 100, if x < z then G(x, y - 1) else G(x, y - 1), 200;
b // this branch is not compiled (which even makes G auto-accumulator tail recursive)
else
G(x - 1, 60) + 13
}
// Ostensibly, the following function is tail recursive. But the ghost-ITE optimization
// removes the tail call. This test ensures that the unused setup for the tail optimization
// does not cause problems.
function H(x: int, ghost y: nat): int {
if y == 0 then
x
else
H(x, y - 1)
}
// Like function H, function J looks like it's tail recursive. The compiler probably will
// emit the tail-call label, even though the tail-call is never taken.
function J(x: int): int {
if true then
x
else
J(x)
}
// The following function would never verify, and its execution wouldn't terminate.
// Nevertheless, we'll test here that it compiles into legal target code.
function {:verify false} K(x: int, ghost y: nat): int {
K(x, y - 1)
}
method Main() {
print F(5, 3), "\n"; // 65
print G(5, 3), "\n"; // 65
print H(65, 3), "\n"; // 65
print J(65), "\n"; // 65
}
|
306 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny0_ModulePrint.dfy | // NONUNIFORM: Tests printing much more than compilation
// RUN: %dafny /dafnyVerify:0 /compile:0 /env:0 /dprint:"%t.dfy" "%s" > "%t"
// RUN: %dafny /dafnyVerify:0 /compile:0 /env:0 /printMode:DllEmbed /dprint:"%t1.dfy" "%t.dfy" >> "%t"
// RUN: %dafny /env:0 /compile:3 /printMode:DllEmbed /dprint:"%t2.dfy" "%t1.dfy" >> "%t"
// RUN: %diff "%t1.dfy" "%t2.dfy" >> "%t"
// RUN: %diff "%s.expect" "%t"
abstract module S {
class C {
var f: int
ghost var g: int
var h: int
method m()
modifies this
}
}
module T refines S {
class C ... {
ghost var h: int // change from non-ghost to ghost
ghost var j: int
var k: int
constructor () { }
method m()
ensures h == h
ensures j == j
{
assert k == k;
}
}
}
method Main() {
var c := new T.C();
c.m();
}
| // NONUNIFORM: Tests printing much more than compilation
// RUN: %dafny /dafnyVerify:0 /compile:0 /env:0 /dprint:"%t.dfy" "%s" > "%t"
// RUN: %dafny /dafnyVerify:0 /compile:0 /env:0 /printMode:DllEmbed /dprint:"%t1.dfy" "%t.dfy" >> "%t"
// RUN: %dafny /env:0 /compile:3 /printMode:DllEmbed /dprint:"%t2.dfy" "%t1.dfy" >> "%t"
// RUN: %diff "%t1.dfy" "%t2.dfy" >> "%t"
// RUN: %diff "%s.expect" "%t"
abstract module S {
class C {
var f: int
ghost var g: int
var h: int
method m()
modifies this
}
}
module T refines S {
class C ... {
ghost var h: int // change from non-ghost to ghost
ghost var j: int
var k: int
constructor () { }
method m()
ensures h == h
ensures j == j
{
}
}
}
method Main() {
var c := new T.C();
c.m();
}
|
307 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny1_BDD.dfy | // RUN: %testDafnyForEachResolver "%s"
module SimpleBDD
{
class BDDNode
{
static ghost predicate bitfunc(f: map<seq<bool>, bool>, n: nat)
{
forall i:seq<bool> :: i in f <==> |i| == n
}
ghost var Contents: map<seq<bool>, bool>
ghost var Repr: set<object>
ghost var n: nat
var f: BDDNode?, t: BDDNode?
var b: bool
ghost predicate valid()
reads this, Repr
{
bitfunc(Contents,n) &&
(0 == n ==> (b <==> Contents[[]])) &&
(0 < n ==>
this in Repr &&
f != null && t != null && t in Repr && f in Repr &&
t.Repr <= Repr && f.Repr <= Repr &&
this !in f.Repr && this !in t.Repr &&
t.valid() && f.valid() &&
t.n == f.n == n-1 &&
(forall s | s in t.Contents :: Contents[[true] + s] <==> t.Contents[s]) &&
(forall s | s in f.Contents :: Contents[[false] + s] <==> f.Contents[s]))
}
}
class BDD
{
var root: BDDNode
ghost predicate valid()
reads this, Repr
{
root in Repr && root.Repr <= Repr && root.valid() &&
n == root.n && Contents == root.Contents
}
constructor () {
root := new BDDNode;
}
ghost var Contents: map<seq<bool>, bool>
var n: nat
ghost var Repr: set<object>
method Eval(s: seq<bool>) returns(b: bool)
requires valid() && |s| == n
ensures b == Contents[s]
{
var node: BDDNode := root;
var i := n;
assert s[n-i..] == s;
while i > 0
invariant node.valid()
invariant 0 <= i == node.n <= n
invariant Contents[s] == node.Contents[s[n-i..]]
{
assert s[n-i..] == [s[n-i]] + s[n-i+1..];
node := if s[n-i] then node.t else node.f;
i := i - 1;
}
assert s[n-i..] == [];
b := node.b;
}
}
}
| // RUN: %testDafnyForEachResolver "%s"
module SimpleBDD
{
class BDDNode
{
static ghost predicate bitfunc(f: map<seq<bool>, bool>, n: nat)
{
forall i:seq<bool> :: i in f <==> |i| == n
}
ghost var Contents: map<seq<bool>, bool>
ghost var Repr: set<object>
ghost var n: nat
var f: BDDNode?, t: BDDNode?
var b: bool
ghost predicate valid()
reads this, Repr
{
bitfunc(Contents,n) &&
(0 == n ==> (b <==> Contents[[]])) &&
(0 < n ==>
this in Repr &&
f != null && t != null && t in Repr && f in Repr &&
t.Repr <= Repr && f.Repr <= Repr &&
this !in f.Repr && this !in t.Repr &&
t.valid() && f.valid() &&
t.n == f.n == n-1 &&
(forall s | s in t.Contents :: Contents[[true] + s] <==> t.Contents[s]) &&
(forall s | s in f.Contents :: Contents[[false] + s] <==> f.Contents[s]))
}
}
class BDD
{
var root: BDDNode
ghost predicate valid()
reads this, Repr
{
root in Repr && root.Repr <= Repr && root.valid() &&
n == root.n && Contents == root.Contents
}
constructor () {
root := new BDDNode;
}
ghost var Contents: map<seq<bool>, bool>
var n: nat
ghost var Repr: set<object>
method Eval(s: seq<bool>) returns(b: bool)
requires valid() && |s| == n
ensures b == Contents[s]
{
var node: BDDNode := root;
var i := n;
while i > 0
{
node := if s[n-i] then node.t else node.f;
i := i - 1;
}
b := node.b;
}
}
}
|
308 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny1_ListContents.dfy | // RUN: %testDafnyForEachResolver "%s"
class Node<T> {
ghost var List: seq<T>
ghost var Repr: set<Node<T>>
var data: T
var next: Node?<T>
ghost predicate Valid()
reads this, Repr
{
this in Repr &&
(next == null ==> List == [data]) &&
(next != null ==>
next in Repr && next.Repr <= Repr &&
this !in next.Repr &&
List == [data] + next.List &&
next.Valid())
}
constructor (d: T)
ensures Valid() && fresh(Repr)
ensures List == [d]
{
data, next := d, null;
List, Repr := [d], {this};
}
constructor InitAsPredecessor(d: T, succ: Node<T>)
requires succ.Valid()
ensures Valid() && fresh(Repr - succ.Repr)
ensures List == [d] + succ.List
{
data, next := d, succ;
List := [d] + succ.List;
Repr := {this} + succ.Repr;
}
method Prepend(d: T) returns (r: Node<T>)
requires Valid()
ensures r.Valid() && fresh(r.Repr - old(Repr))
ensures r.List == [d] + List
{
r := new Node.InitAsPredecessor(d, this);
}
method SkipHead() returns (r: Node?<T>)
requires Valid()
ensures r == null ==> |List| == 1
ensures r != null ==> r.Valid() && r.List == List[1..] && r.Repr <= Repr
{
r := next;
}
method ReverseInPlace() returns (reverse: Node<T>)
requires Valid()
modifies Repr
ensures reverse.Valid() && reverse.Repr <= old(Repr)
ensures |reverse.List| == |old(List)|
ensures forall i :: 0 <= i < |reverse.List| ==> reverse.List[i] == old(List)[|old(List)|-1-i]
{
var current := next;
reverse := this;
reverse.next := null;
reverse.Repr := {reverse};
reverse.List := [data];
while current != null
invariant reverse.Valid() && reverse.Repr <= old(Repr)
invariant current == null ==> |old(List)| == |reverse.List|
invariant current != null ==>
current.Valid() &&
current in old(Repr) && current.Repr <= old(Repr) &&
current.Repr !! reverse.Repr
invariant current != null ==>
|old(List)| == |reverse.List| + |current.List| &&
current.List == old(List)[|reverse.List|..]
invariant forall i :: 0 <= i < |reverse.List| ==> reverse.List[i] == old(List)[|reverse.List|-1-i]
decreases if current != null then |current.List| else -1
{
var nx := current.next;
// ..., reverse, current, nx, ...
current.next := reverse;
current.Repr := {current} + reverse.Repr;
current.List := [current.data] + reverse.List;
reverse := current;
current := nx;
}
}
}
| // RUN: %testDafnyForEachResolver "%s"
class Node<T> {
ghost var List: seq<T>
ghost var Repr: set<Node<T>>
var data: T
var next: Node?<T>
ghost predicate Valid()
reads this, Repr
{
this in Repr &&
(next == null ==> List == [data]) &&
(next != null ==>
next in Repr && next.Repr <= Repr &&
this !in next.Repr &&
List == [data] + next.List &&
next.Valid())
}
constructor (d: T)
ensures Valid() && fresh(Repr)
ensures List == [d]
{
data, next := d, null;
List, Repr := [d], {this};
}
constructor InitAsPredecessor(d: T, succ: Node<T>)
requires succ.Valid()
ensures Valid() && fresh(Repr - succ.Repr)
ensures List == [d] + succ.List
{
data, next := d, succ;
List := [d] + succ.List;
Repr := {this} + succ.Repr;
}
method Prepend(d: T) returns (r: Node<T>)
requires Valid()
ensures r.Valid() && fresh(r.Repr - old(Repr))
ensures r.List == [d] + List
{
r := new Node.InitAsPredecessor(d, this);
}
method SkipHead() returns (r: Node?<T>)
requires Valid()
ensures r == null ==> |List| == 1
ensures r != null ==> r.Valid() && r.List == List[1..] && r.Repr <= Repr
{
r := next;
}
method ReverseInPlace() returns (reverse: Node<T>)
requires Valid()
modifies Repr
ensures reverse.Valid() && reverse.Repr <= old(Repr)
ensures |reverse.List| == |old(List)|
ensures forall i :: 0 <= i < |reverse.List| ==> reverse.List[i] == old(List)[|old(List)|-1-i]
{
var current := next;
reverse := this;
reverse.next := null;
reverse.Repr := {reverse};
reverse.List := [data];
while current != null
current.Valid() &&
current in old(Repr) && current.Repr <= old(Repr) &&
current.Repr !! reverse.Repr
|old(List)| == |reverse.List| + |current.List| &&
current.List == old(List)[|reverse.List|..]
{
var nx := current.next;
// ..., reverse, current, nx, ...
current.next := reverse;
current.Repr := {current} + reverse.Repr;
current.List := [current.data] + reverse.List;
reverse := current;
current := nx;
}
}
}
|
309 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny1_Queue.dfy | // RUN: %testDafnyForEachResolver "%s"
// Queue.dfy
// Dafny version of Queue.bpl
// Rustan Leino, 2008
class Queue<T(0)> {
var head: Node<T>
var tail: Node<T>
ghost var contents: seq<T>
ghost var footprint: set<object>
ghost var spine: set<Node<T>>
ghost predicate Valid()
reads this, footprint
{
this in footprint && spine <= footprint &&
head in spine &&
tail in spine &&
tail.next == null &&
(forall n ::
n in spine ==>
n.footprint <= footprint && this !in n.footprint &&
n.Valid() &&
(n.next == null ==> n == tail)) &&
(forall n ::
n in spine ==>
n.next != null ==> n.next in spine) &&
contents == head.tailContents
}
constructor Init()
ensures Valid() && fresh(footprint - {this})
ensures |contents| == 0
{
var n: Node<T> := new Node<T>.Init();
head := n;
tail := n;
contents := n.tailContents;
footprint := {this} + n.footprint;
spine := {n};
}
method Rotate()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents)[1..] + old(contents)[..1]
{
var t := Front();
Dequeue();
Enqueue(t);
}
method RotateAny()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures |contents| == |old(contents)|
ensures exists i :: 0 <= i && i <= |contents| &&
contents == old(contents)[i..] + old(contents)[..i]
{
var t := Front();
Dequeue();
Enqueue(t);
}
method IsEmpty() returns (isEmpty: bool)
requires Valid()
ensures isEmpty <==> |contents| == 0
{
isEmpty := head == tail;
}
method Enqueue(t: T)
requires Valid()
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents) + [t]
{
var n := new Node<T>.Init();
n.data := t;
tail.next := n;
tail := n;
forall m | m in spine {
m.tailContents := m.tailContents + [t];
}
contents := head.tailContents;
forall m | m in spine {
m.footprint := m.footprint + n.footprint;
}
footprint := footprint + n.footprint;
spine := spine + {n};
}
method Front() returns (t: T)
requires Valid()
requires 0 < |contents|
ensures t == contents[0]
{
t := head.next.data;
}
method Dequeue()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents)[1..]
{
var n := head.next;
head := n;
contents := n.tailContents;
}
}
class Node<T(0)> {
var data: T
var next: Node?<T>
ghost var tailContents: seq<T>
ghost var footprint: set<object>
ghost predicate Valid()
reads this, footprint
{
this in footprint &&
(next != null ==> next in footprint && next.footprint <= footprint) &&
(next == null ==> tailContents == []) &&
(next != null ==> tailContents == [next.data] + next.tailContents)
}
constructor Init()
ensures Valid() && fresh(footprint - {this})
ensures next == null
{
next := null;
tailContents := [];
footprint := {this};
}
}
class Main<U(0)> {
method A<T(0)>(t: T, u: T, v: T)
{
var q0 := new Queue<T>.Init();
var q1 := new Queue<T>.Init();
q0.Enqueue(t);
q0.Enqueue(u);
q1.Enqueue(v);
assert |q0.contents| == 2;
var w := q0.Front();
assert w == t;
q0.Dequeue();
w := q0.Front();
assert w == u;
assert |q0.contents| == 1;
assert |q1.contents| == 1;
}
method Main2(t: U, u: U, v: U, q0: Queue<U>, q1: Queue<U>)
requires q0.Valid()
requires q1.Valid()
requires q0.footprint !! q1.footprint
requires |q0.contents| == 0
modifies q0.footprint, q1.footprint
ensures fresh(q0.footprint - old(q0.footprint))
ensures fresh(q1.footprint - old(q1.footprint))
{
q0.Enqueue(t);
q0.Enqueue(u);
q1.Enqueue(v);
assert |q0.contents| == 2;
var w := q0.Front();
assert w == t;
q0.Dequeue();
w := q0.Front();
assert w == u;
assert |q0.contents| == 1;
assert |q1.contents| == old(|q1.contents|) + 1;
}
}
| // RUN: %testDafnyForEachResolver "%s"
// Queue.dfy
// Dafny version of Queue.bpl
// Rustan Leino, 2008
class Queue<T(0)> {
var head: Node<T>
var tail: Node<T>
ghost var contents: seq<T>
ghost var footprint: set<object>
ghost var spine: set<Node<T>>
ghost predicate Valid()
reads this, footprint
{
this in footprint && spine <= footprint &&
head in spine &&
tail in spine &&
tail.next == null &&
(forall n ::
n in spine ==>
n.footprint <= footprint && this !in n.footprint &&
n.Valid() &&
(n.next == null ==> n == tail)) &&
(forall n ::
n in spine ==>
n.next != null ==> n.next in spine) &&
contents == head.tailContents
}
constructor Init()
ensures Valid() && fresh(footprint - {this})
ensures |contents| == 0
{
var n: Node<T> := new Node<T>.Init();
head := n;
tail := n;
contents := n.tailContents;
footprint := {this} + n.footprint;
spine := {n};
}
method Rotate()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents)[1..] + old(contents)[..1]
{
var t := Front();
Dequeue();
Enqueue(t);
}
method RotateAny()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures |contents| == |old(contents)|
ensures exists i :: 0 <= i && i <= |contents| &&
contents == old(contents)[i..] + old(contents)[..i]
{
var t := Front();
Dequeue();
Enqueue(t);
}
method IsEmpty() returns (isEmpty: bool)
requires Valid()
ensures isEmpty <==> |contents| == 0
{
isEmpty := head == tail;
}
method Enqueue(t: T)
requires Valid()
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents) + [t]
{
var n := new Node<T>.Init();
n.data := t;
tail.next := n;
tail := n;
forall m | m in spine {
m.tailContents := m.tailContents + [t];
}
contents := head.tailContents;
forall m | m in spine {
m.footprint := m.footprint + n.footprint;
}
footprint := footprint + n.footprint;
spine := spine + {n};
}
method Front() returns (t: T)
requires Valid()
requires 0 < |contents|
ensures t == contents[0]
{
t := head.next.data;
}
method Dequeue()
requires Valid()
requires 0 < |contents|
modifies footprint
ensures Valid() && fresh(footprint - old(footprint))
ensures contents == old(contents)[1..]
{
var n := head.next;
head := n;
contents := n.tailContents;
}
}
class Node<T(0)> {
var data: T
var next: Node?<T>
ghost var tailContents: seq<T>
ghost var footprint: set<object>
ghost predicate Valid()
reads this, footprint
{
this in footprint &&
(next != null ==> next in footprint && next.footprint <= footprint) &&
(next == null ==> tailContents == []) &&
(next != null ==> tailContents == [next.data] + next.tailContents)
}
constructor Init()
ensures Valid() && fresh(footprint - {this})
ensures next == null
{
next := null;
tailContents := [];
footprint := {this};
}
}
class Main<U(0)> {
method A<T(0)>(t: T, u: T, v: T)
{
var q0 := new Queue<T>.Init();
var q1 := new Queue<T>.Init();
q0.Enqueue(t);
q0.Enqueue(u);
q1.Enqueue(v);
var w := q0.Front();
q0.Dequeue();
w := q0.Front();
}
method Main2(t: U, u: U, v: U, q0: Queue<U>, q1: Queue<U>)
requires q0.Valid()
requires q1.Valid()
requires q0.footprint !! q1.footprint
requires |q0.contents| == 0
modifies q0.footprint, q1.footprint
ensures fresh(q0.footprint - old(q0.footprint))
ensures fresh(q1.footprint - old(q1.footprint))
{
q0.Enqueue(t);
q0.Enqueue(u);
q1.Enqueue(v);
var w := q0.Front();
q0.Dequeue();
w := q0.Front();
}
}
|
310 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny2_COST-verif-comp-2011-2-MaxTree-class.dfy | // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
/*
Rustan Leino, 5 Oct 2011
COST Verification Competition, Challenge 2: Maximum in a tree
http://foveoos2011.cost-ic0701.org/verification-competition
Given: A non-empty binary tree, where every node carries an integer.
Implement and verify a program that computes the maximum of the values
in the tree.
Please base your program on the following data structure signature:
public class Tree {
int value;
Tree left;
Tree right;
}
You may represent empty trees as null references or as you consider
appropriate.
*/
// Remarks:
// The specification of this program uses the common dynamic-frames idiom in Dafny: the
// ghost field 'Contents' stores the abstract value of an object, the ghost field 'Repr'
// stores the set of (references to) objects that make up the representation of the object
// (which in this case is the Tree itself plus the 'Repr' sets of the left and right
// subtrees), and a function 'Valid()' that returns 'true' when an object is in a
// consistent state (that is, when an object satisfies the "class invariant").
// The design I used was to represent an empty tree as a Tree object whose left and
// right pointers point to the object iself. This is convenient, because it lets
// clients of Tree and the implementation of Tree always use non-null pointers to
// Tree objects.
// What needs to be human-trusted about this program is that the 'requires' and
// 'ensures' clauses (that is, the pre- and postconditions, respectively) of
// 'ComputeMax' are correct. And, since the specification talks about the ghost
// variable 'Contents', one also needs to trust that the 'Valid()' function
// constrains 'Contents' in a way that a human thinks matches the intuitive
// definition of what the contents of a tree is.
// To give a taste of that the 'Valid()' function does not over-constrain the
// object, I have included two instance constructors, 'Empty()' and 'Node(...)'.
// To take this a step further, one could also write a 'Main' method that
// builds somme tree and then calls 'ComputeMax', but I didn't do that here.
// About Dafny:
// As always (when it is successful), Dafny verifies that the program does not
// cause any run-time errors (like array index bounds errors), that the program
// terminates, that expressions and functions are well defined, and that all
// specifications are satisfied. The language prevents type errors by being type
// safe, prevents dangling pointers by not having an "address-of" or "deallocate"
// operation (which is accommodated at run time by a garbage collector), and
// prevents arithmetic overflow errors by using mathematical integers (which
// is accommodated at run time by using BigNum's). By proving that programs
// terminate, Dafny proves that a program's time usage is finite, which implies
// that the program's space usage is finite too. However, executing the
// program may fall short of your hopes if you don't have enough time or
// space; that is, the program may run out of space or may fail to terminate in
// your lifetime, because Dafny does not prove that the time or space needed by
// the program matches your execution environment. The only input fed to
// the Dafny verifier/compiler is the program text below; Dafny then automatically
// verifies and compiles the program (for this program in less than 2.5 seconds)
// without further human intervention.
class Tree {
// an empty tree is represented by a Tree object with left==this==right
var value: int
var left: Tree?
var right: Tree?
ghost var Contents: seq<int>
ghost var Repr: set<object>
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
{
this in Repr &&
left != null && right != null &&
((left == this == right && Contents == []) ||
(left in Repr && left.Repr <= Repr && this !in left.Repr &&
right in Repr && right.Repr <= Repr && this !in right.Repr &&
left.Valid() && right.Valid() &&
Contents == left.Contents + [value] + right.Contents))
}
function IsEmpty(): bool
requires Valid();
reads this, Repr;
ensures IsEmpty() <==> Contents == [];
{
left == this
}
constructor Empty()
ensures Valid() && Contents == [];
{
left, right := this, this;
Contents := [];
Repr := {this};
}
constructor Node(lft: Tree, val: int, rgt: Tree)
requires lft.Valid() && rgt.Valid();
ensures Valid() && Contents == lft.Contents + [val] + rgt.Contents;
{
left, value, right := lft, val, rgt;
Contents := lft.Contents + [val] + rgt.Contents;
Repr := lft.Repr + {this} + rgt.Repr;
}
lemma exists_intro<T>(P: T ~> bool, x: T)
requires P.requires(x)
requires P(x)
ensures exists y :: P.requires(y) && P(y)
{
}
method ComputeMax() returns (mx: int)
requires Valid() && !IsEmpty();
ensures forall x :: x in Contents ==> x <= mx;
ensures exists x :: x in Contents && x == mx;
decreases Repr;
{
mx := value;
if (!left.IsEmpty()) {
var m := left.ComputeMax();
mx := if mx < m then m else mx;
}
if (!right.IsEmpty()) {
var m := right.ComputeMax();
mx := if mx < m then m else mx;
}
exists_intro(x reads this => x in Contents && x == mx, mx);
}
}
| // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
/*
Rustan Leino, 5 Oct 2011
COST Verification Competition, Challenge 2: Maximum in a tree
http://foveoos2011.cost-ic0701.org/verification-competition
Given: A non-empty binary tree, where every node carries an integer.
Implement and verify a program that computes the maximum of the values
in the tree.
Please base your program on the following data structure signature:
public class Tree {
int value;
Tree left;
Tree right;
}
You may represent empty trees as null references or as you consider
appropriate.
*/
// Remarks:
// The specification of this program uses the common dynamic-frames idiom in Dafny: the
// ghost field 'Contents' stores the abstract value of an object, the ghost field 'Repr'
// stores the set of (references to) objects that make up the representation of the object
// (which in this case is the Tree itself plus the 'Repr' sets of the left and right
// subtrees), and a function 'Valid()' that returns 'true' when an object is in a
// consistent state (that is, when an object satisfies the "class invariant").
// The design I used was to represent an empty tree as a Tree object whose left and
// right pointers point to the object iself. This is convenient, because it lets
// clients of Tree and the implementation of Tree always use non-null pointers to
// Tree objects.
// What needs to be human-trusted about this program is that the 'requires' and
// 'ensures' clauses (that is, the pre- and postconditions, respectively) of
// 'ComputeMax' are correct. And, since the specification talks about the ghost
// variable 'Contents', one also needs to trust that the 'Valid()' function
// constrains 'Contents' in a way that a human thinks matches the intuitive
// definition of what the contents of a tree is.
// To give a taste of that the 'Valid()' function does not over-constrain the
// object, I have included two instance constructors, 'Empty()' and 'Node(...)'.
// To take this a step further, one could also write a 'Main' method that
// builds somme tree and then calls 'ComputeMax', but I didn't do that here.
// About Dafny:
// As always (when it is successful), Dafny verifies that the program does not
// cause any run-time errors (like array index bounds errors), that the program
// terminates, that expressions and functions are well defined, and that all
// specifications are satisfied. The language prevents type errors by being type
// safe, prevents dangling pointers by not having an "address-of" or "deallocate"
// operation (which is accommodated at run time by a garbage collector), and
// prevents arithmetic overflow errors by using mathematical integers (which
// is accommodated at run time by using BigNum's). By proving that programs
// terminate, Dafny proves that a program's time usage is finite, which implies
// that the program's space usage is finite too. However, executing the
// program may fall short of your hopes if you don't have enough time or
// space; that is, the program may run out of space or may fail to terminate in
// your lifetime, because Dafny does not prove that the time or space needed by
// the program matches your execution environment. The only input fed to
// the Dafny verifier/compiler is the program text below; Dafny then automatically
// verifies and compiles the program (for this program in less than 2.5 seconds)
// without further human intervention.
class Tree {
// an empty tree is represented by a Tree object with left==this==right
var value: int
var left: Tree?
var right: Tree?
ghost var Contents: seq<int>
ghost var Repr: set<object>
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
{
this in Repr &&
left != null && right != null &&
((left == this == right && Contents == []) ||
(left in Repr && left.Repr <= Repr && this !in left.Repr &&
right in Repr && right.Repr <= Repr && this !in right.Repr &&
left.Valid() && right.Valid() &&
Contents == left.Contents + [value] + right.Contents))
}
function IsEmpty(): bool
requires Valid();
reads this, Repr;
ensures IsEmpty() <==> Contents == [];
{
left == this
}
constructor Empty()
ensures Valid() && Contents == [];
{
left, right := this, this;
Contents := [];
Repr := {this};
}
constructor Node(lft: Tree, val: int, rgt: Tree)
requires lft.Valid() && rgt.Valid();
ensures Valid() && Contents == lft.Contents + [val] + rgt.Contents;
{
left, value, right := lft, val, rgt;
Contents := lft.Contents + [val] + rgt.Contents;
Repr := lft.Repr + {this} + rgt.Repr;
}
lemma exists_intro<T>(P: T ~> bool, x: T)
requires P.requires(x)
requires P(x)
ensures exists y :: P.requires(y) && P(y)
{
}
method ComputeMax() returns (mx: int)
requires Valid() && !IsEmpty();
ensures forall x :: x in Contents ==> x <= mx;
ensures exists x :: x in Contents && x == mx;
{
mx := value;
if (!left.IsEmpty()) {
var m := left.ComputeMax();
mx := if mx < m then m else mx;
}
if (!right.IsEmpty()) {
var m := right.ComputeMax();
mx := if mx < m then m else mx;
}
exists_intro(x reads this => x in Contents && x == mx, mx);
}
}
|
311 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny2_COST-verif-comp-2011-3-TwoDuplicates.dfy | // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
/*
Rustan Leino, 5 Oct 2011
COST Verification Competition, Challenge 3: Two equal elements
http://foveoos2011.cost-ic0701.org/verification-competition
Given: An integer array a of length n+2 with n>=2. It is known that at
least two values stored in the array appear twice (i.e., there are at
least two duplets).
Implement and verify a program finding such two values.
You may assume that the array contains values between 0 and n-1.
*/
// Remarks:
// The implementation of method 'Search' takes one pass through the elements of
// the given array. To keep track of what it has seen, it allocates an array as
// temporary storage--I imagine that this is what the competition designers
// had in mind, since the problem description says one can assume the values
// of the given array to lie in the range 0..n.
// To keep track of whether it already has found one duplicate, the method
// sets the output variables p and q as follows:
// p != q - no duplicates found yet
// p == q - one duplicate found so far, namely the value stored in p and q
// Note, the loop invariant does not need to say anything about the state
// of two duplicates having been found, because when the second duplicate is
// found, the method returns.
// What needs to be human-trusted about this program is the specification of
// 'Search'. The specification straightforwardly lists the assumptions stated
// in the problem description, including the given fact that the array contains
// (at least) two distinct elements that each occurs (at least) twice. To
// trust the specification of 'Search', a human also needs to trust the definition
// of 'IsDuplicate' and its auxiliary function 'IsPrefixDuplicate'.
// About Dafny:
// As always (when it is successful), Dafny verifies that the program does not
// cause any run-time errors (like array index bounds errors), that the program
// terminates, that expressions and functions are well defined, and that all
// specifications are satisfied. The language prevents type errors by being type
// safe, prevents dangling pointers by not having an "address-of" or "deallocate"
// operation (which is accommodated at run time by a garbage collector), and
// prevents arithmetic overflow errors by using mathematical integers (which
// is accommodated at run time by using BigNum's). By proving that programs
// terminate, Dafny proves that a program's time usage is finite, which implies
// that the program's space usage is finite too. However, executing the
// program may fall short of your hopes if you don't have enough time or
// space; that is, the program may run out of space or may fail to terminate in
// your lifetime, because Dafny does not prove that the time or space needed by
// the program matches your execution environment. The only input fed to
// the Dafny verifier/compiler is the program text below; Dafny then automatically
// verifies and compiles the program (for this program in less than 11 seconds)
// without further human intervention.
ghost predicate IsDuplicate(a: array<int>, p: int)
reads a
{
IsPrefixDuplicate(a, a.Length, p)
}
ghost predicate IsPrefixDuplicate(a: array<int>, k: int, p: int)
requires 0 <= k <= a.Length;
reads a;
{
exists i,j :: 0 <= i < j < k && a[i] == a[j] == p
}
method Search(a: array<int>) returns (p: int, q: int)
requires 4 <= a.Length;
requires exists p,q :: p != q && IsDuplicate(a, p) && IsDuplicate(a, q); // two distinct duplicates exist
requires forall i :: 0 <= i < a.Length ==> 0 <= a[i] < a.Length - 2; // the elements of "a" in the range [0.. a.Length-2]
ensures p != q && IsDuplicate(a, p) && IsDuplicate(a, q);
{
// allocate an array "d" and initialize its elements to -1.
var d := new int[a.Length-2];
var i := 0;
while (i < d.Length)
invariant 0 <= i <= d.Length && forall j :: 0 <= j < i ==> d[j] == -1;
{
d[i], i := -1, i+1;
}
i, p, q := 0, 0, 1;
while (true)
invariant 0 <= i < a.Length;
invariant forall j :: 0 <= j < d.Length ==>
(d[j] == -1 && forall k :: 0 <= k < i ==> a[k] != j) ||
(0 <= d[j] < i && a[d[j]] == j);
invariant p == q ==> IsDuplicate(a, p); //WISH remove the trigger on the next line
invariant forall k {:trigger old(a[k])} :: 0 <= k < i && IsPrefixDuplicate(a, i, a[k]) ==> p == q == a[k];
decreases a.Length - i;
{
var k := d[a[i]];
assert k < i; // note, this assertion is really for human consumption; it is not needed by the verifier, and it does not change the performance of the verifier
if (k == -1) {
// a[i] does not exist in a[..i]
d[a[i]] := i;
} else {
// we have encountered a duplicate
assert a[i] == a[k] && IsDuplicate(a, a[i]); // note, this assertion is really for human consumption; it is not needed by the verifier, and it does not change the performance of the verifier
if (p != q) {
// this is the first duplicate encountered
p, q := a[i], a[i];
} else if (p == a[i]) {
// this is another copy of the same duplicate we have seen before
} else {
// this is the second duplicate
q := a[i];
return;
}
}
i := i + 1;
}
}
| // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
/*
Rustan Leino, 5 Oct 2011
COST Verification Competition, Challenge 3: Two equal elements
http://foveoos2011.cost-ic0701.org/verification-competition
Given: An integer array a of length n+2 with n>=2. It is known that at
least two values stored in the array appear twice (i.e., there are at
least two duplets).
Implement and verify a program finding such two values.
You may assume that the array contains values between 0 and n-1.
*/
// Remarks:
// The implementation of method 'Search' takes one pass through the elements of
// the given array. To keep track of what it has seen, it allocates an array as
// temporary storage--I imagine that this is what the competition designers
// had in mind, since the problem description says one can assume the values
// of the given array to lie in the range 0..n.
// To keep track of whether it already has found one duplicate, the method
// sets the output variables p and q as follows:
// p != q - no duplicates found yet
// p == q - one duplicate found so far, namely the value stored in p and q
// Note, the loop invariant does not need to say anything about the state
// of two duplicates having been found, because when the second duplicate is
// found, the method returns.
// What needs to be human-trusted about this program is the specification of
// 'Search'. The specification straightforwardly lists the assumptions stated
// in the problem description, including the given fact that the array contains
// (at least) two distinct elements that each occurs (at least) twice. To
// trust the specification of 'Search', a human also needs to trust the definition
// of 'IsDuplicate' and its auxiliary function 'IsPrefixDuplicate'.
// About Dafny:
// As always (when it is successful), Dafny verifies that the program does not
// cause any run-time errors (like array index bounds errors), that the program
// terminates, that expressions and functions are well defined, and that all
// specifications are satisfied. The language prevents type errors by being type
// safe, prevents dangling pointers by not having an "address-of" or "deallocate"
// operation (which is accommodated at run time by a garbage collector), and
// prevents arithmetic overflow errors by using mathematical integers (which
// is accommodated at run time by using BigNum's). By proving that programs
// terminate, Dafny proves that a program's time usage is finite, which implies
// that the program's space usage is finite too. However, executing the
// program may fall short of your hopes if you don't have enough time or
// space; that is, the program may run out of space or may fail to terminate in
// your lifetime, because Dafny does not prove that the time or space needed by
// the program matches your execution environment. The only input fed to
// the Dafny verifier/compiler is the program text below; Dafny then automatically
// verifies and compiles the program (for this program in less than 11 seconds)
// without further human intervention.
ghost predicate IsDuplicate(a: array<int>, p: int)
reads a
{
IsPrefixDuplicate(a, a.Length, p)
}
ghost predicate IsPrefixDuplicate(a: array<int>, k: int, p: int)
requires 0 <= k <= a.Length;
reads a;
{
exists i,j :: 0 <= i < j < k && a[i] == a[j] == p
}
method Search(a: array<int>) returns (p: int, q: int)
requires 4 <= a.Length;
requires exists p,q :: p != q && IsDuplicate(a, p) && IsDuplicate(a, q); // two distinct duplicates exist
requires forall i :: 0 <= i < a.Length ==> 0 <= a[i] < a.Length - 2; // the elements of "a" in the range [0.. a.Length-2]
ensures p != q && IsDuplicate(a, p) && IsDuplicate(a, q);
{
// allocate an array "d" and initialize its elements to -1.
var d := new int[a.Length-2];
var i := 0;
while (i < d.Length)
{
d[i], i := -1, i+1;
}
i, p, q := 0, 0, 1;
while (true)
(d[j] == -1 && forall k :: 0 <= k < i ==> a[k] != j) ||
(0 <= d[j] < i && a[d[j]] == j);
{
var k := d[a[i]];
if (k == -1) {
// a[i] does not exist in a[..i]
d[a[i]] := i;
} else {
// we have encountered a duplicate
if (p != q) {
// this is the first duplicate encountered
p, q := a[i], a[i];
} else if (p == a[i]) {
// this is another copy of the same duplicate we have seen before
} else {
// this is the second duplicate
q := a[i];
return;
}
}
i := i + 1;
}
}
|
312 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny2_Classics.dfy | // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
// A version of Turing's additive factorial program [Dr. A. Turing, "Checking a large routine",
// In "Report of a Conference of High Speed Automatic Calculating Machines", pp. 67-69, 1949].
ghost function Factorial(n: nat): nat
{
if n == 0 then 1 else n * Factorial(n-1)
}
method AdditiveFactorial(n: nat) returns (u: nat)
ensures u == Factorial(n);
{
u := 1;
var r := 0;
while (r < n)
invariant 0 <= r <= n;
invariant u == Factorial(r);
{
var v := u;
var s := 1;
while (s <= r)
invariant 1 <= s <= r+1;
invariant u == s * Factorial(r);
{
u := u + v;
s := s + 1;
}
r := r + 1;
}
}
// Hoare's FIND program [C.A.R. Hoare, "Proof of a program: FIND", CACM 14(1): 39-45, 1971].
// The proof annotations here are not the same as in Hoare's article.
// In Hoare's words:
// This program operates on an array A[1:N], and a value of f (1 <= f <= N).
// Its effect is to rearrange the elements of A in such a way that:
// forall p,q (1 <= p <= f <= q <= N ==> A[p] <= A[f] <= A[q]).
//
// Here, we use 0-based indices, so we would say:
// This method operates on an array A[0..N], and a value of f (0 <= f < N).
// Its effect is to rearrange the elements of A in such a way that:
// forall p,q :: 0 <= p <= f <= q < N ==> A[p] <= A[f] <= A[q]).
method FIND(A: array<int>, N: int, f: int)
requires A.Length == N;
requires 0 <= f < N;
modifies A;
ensures forall p,q :: 0 <= p <= f <= q < N ==> A[p] <= A[q];
{
var m, n := 0, N-1;
while (m < n)
invariant 0 <= m <= f <= n < N;
invariant forall p,q :: 0 <= p < m <= q < N ==> A[p] <= A[q];
invariant forall p,q :: 0 <= p <= n < q < N ==> A[p] <= A[q];
{
var r, i, j := A[f], m, n;
while (i <= j)
invariant m <= i && j <= n;
invariant -1 <= j && i <= N;
invariant i <= j ==> exists g :: i <= g < N && r <= A[g];
invariant i <= j ==> exists g :: 0 <= g <= j && A[g] <= r;
invariant forall p :: 0 <= p < i ==> A[p] <= r;
invariant forall q :: j < q < N ==> r <= A[q];
// the following two invariants capture (and follow from) the fact that the array is not modified outside the [m:n] range
invariant forall p,q :: 0 <= p < m <= q < N ==> A[p] <= A[q];
invariant forall p,q :: 0 <= p <= n < q < N ==> A[p] <= A[q];
// the following invariant is used to prove progress of the outer loop
invariant (i==m && j==n && r==A[f]) || (m<i && j<n);
{
ghost var firstIteration := i==m && j==n;
while (A[i] < r)
invariant m <= i <= N && (firstIteration ==> i <= f);
invariant exists g :: i <= g < N && r <= A[g];
invariant exists g :: 0 <= g <= j && A[g] <= r;
invariant forall p :: 0 <= p < i ==> A[p] <= r;
decreases j - i;
{ i := i + 1; }
while (r < A[j])
invariant 0 <= j <= n && (firstIteration ==> f <= j);
invariant exists g :: i <= g < N && r <= A[g];
invariant exists g :: 0 <= g <= j && A[g] <= r;
invariant forall q :: j < q < N ==> r <= A[q];
decreases j;
{ j := j - 1; }
assert A[j] <= r <= A[i];
if (i <= j) {
var w := A[i]; A[i] := A[j]; A[j] := w; // swap A[i] and A[j] (which may be referring to the same location)
assert A[i] <= r <= A[j];
i, j := i + 1, j - 1;
}
}
if (f <= j) {
n := j;
} else if (i <= f) {
m := i;
} else {
break; // Hoare used a goto
}
}
}
| // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
// A version of Turing's additive factorial program [Dr. A. Turing, "Checking a large routine",
// In "Report of a Conference of High Speed Automatic Calculating Machines", pp. 67-69, 1949].
ghost function Factorial(n: nat): nat
{
if n == 0 then 1 else n * Factorial(n-1)
}
method AdditiveFactorial(n: nat) returns (u: nat)
ensures u == Factorial(n);
{
u := 1;
var r := 0;
while (r < n)
{
var v := u;
var s := 1;
while (s <= r)
{
u := u + v;
s := s + 1;
}
r := r + 1;
}
}
// Hoare's FIND program [C.A.R. Hoare, "Proof of a program: FIND", CACM 14(1): 39-45, 1971].
// The proof annotations here are not the same as in Hoare's article.
// In Hoare's words:
// This program operates on an array A[1:N], and a value of f (1 <= f <= N).
// Its effect is to rearrange the elements of A in such a way that:
// forall p,q (1 <= p <= f <= q <= N ==> A[p] <= A[f] <= A[q]).
//
// Here, we use 0-based indices, so we would say:
// This method operates on an array A[0..N], and a value of f (0 <= f < N).
// Its effect is to rearrange the elements of A in such a way that:
// forall p,q :: 0 <= p <= f <= q < N ==> A[p] <= A[f] <= A[q]).
method FIND(A: array<int>, N: int, f: int)
requires A.Length == N;
requires 0 <= f < N;
modifies A;
ensures forall p,q :: 0 <= p <= f <= q < N ==> A[p] <= A[q];
{
var m, n := 0, N-1;
while (m < n)
{
var r, i, j := A[f], m, n;
while (i <= j)
// the following two invariants capture (and follow from) the fact that the array is not modified outside the [m:n] range
// the following invariant is used to prove progress of the outer loop
{
ghost var firstIteration := i==m && j==n;
while (A[i] < r)
{ i := i + 1; }
while (r < A[j])
{ j := j - 1; }
if (i <= j) {
var w := A[i]; A[i] := A[j]; A[j] := w; // swap A[i] and A[j] (which may be referring to the same location)
i, j := i + 1, j - 1;
}
}
if (f <= j) {
n := j;
} else if (i <= f) {
m := i;
} else {
break; // Hoare used a goto
}
}
}
|
313 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny2_MajorityVote.dfy | // RUN: %testDafnyForEachResolver "%s"
// Rustan Leino, June 2012.
// This file verifies an algorithm, due to Boyer and Moore, that finds the majority choice
// among a sequence of votes, see http://www.cs.utexas.edu/~moore/best-ideas/mjrty/.
// Actually, this algorithm is a slight variation on theirs, but the general idea for why
// it is correct is the same. In the Boyer and Moore algorithm, the loop counter is advanced
// by exactly 1 each iteration, which means that there may or may not be a "current leader".
// In my program below, I had instead written the loop invariant to say there is always a
// "current leader", which requires the loop index sometimes to skip a value.
//
// This file has two versions of the algorithm. In the first version, the given sequence
// of votes is assumed to have a (strict) majority choice, meaning that strictly more than
// 50% of the votes are for one candidate. It is convenient to have a name for the majority
// choice, in order to talk about it in specifications. The easiest way to do this in
// Dafny is probably to introduce a ghost parameter with the given properties. That's what
// the algorithm does, see parameter K. The postcondition is thus to output the value of
// K, which is done in the non-ghost out-parameter k.
// The proof of the algorithm requires two lemmas. These lemmas are proved automatically
// by Dafny's induction tactic.
//
// In the second version of the program, the main method does not assume there is a majority
// choice. Rather, it eseentially uses the first algorithm and then checks if what it
// returns really is a majority choice. To do this, the specification of the first algorithm
// needs to be changed slightly to accommodate the possibility that there is no majority
// choice. That change in specification is also reflected in the loop invariant. Moreover,
// the algorithm itself now needs to extra 'if' statements to see if the entire sequence
// has been searched through. (This extra 'if' is essentially already handled by Boyer and
// Moore's algorithm, because it increments the loop index by 1 each iteration and therefore
// already has a special case for the case of running out of sequence elements without a
// current leader.)
// The calling harness, DetermineElection, somewhat existentially comes up with the majority
// choice, if there is such a choice, and then passes in that choice as the ghost parameter K
// to the main algorithm. Neat, huh?
// Language comment:
// The "(==)" that sits after some type parameters in this program says that the actual
// type argument must support equality.
// Advanced remark:
// There is a subtle situation in the verification of DetermineElection. Suppose the type
// parameter Candidate denotes some type whose instances depend on which object are
// allocated. For example, if Candidate is some class type, then more candidates can come
// into being by object allocations (using "new"). What does the quantification of
// candidates "c" in the postcondition of DetermineElection now mean--all candidates that
// existed in the pre-state or (the possibly larger set of) all candidates that exist in the
// post-state? (It means the latter.) And if there does not exist a candidate in majority
// in the pre-state, could there be a (newly created) candidate in majority in the post-state?
// This will require some proof. The simplest argument seems to be that even if more candidates
// are created during the course of DetermineElection, such candidates cannot possibly
// be in majority in the sequence "a", since "a" can only contain candidates that were already
// created in the pre-state. This property is easily specified by adding a postcondition
// to the Count function. Alternatively, one could have added the antecedent "c in a" or
// "old(allocated(c))" to the "forall c" quantification in the postcondition of DetermineElection.
// About reading the proofs:
// Dafny proves the FindWinner algorithm from the given loop invariants and the two lemmas
// Lemma_Unique and Lemma_Split. In showing this proof to some colleagues, they found they
// were not as quick as Dafny in constructing the proof from these ingredients. For a human
// to understand the situation better, it helps to take smaller (and more) steps in the proof.
// At the end of this file, Nadia Polikarpova has written two versions of FindWinner that does
// that, using Dafny's support for calculational proofs.
function Count<T(==)>(a: seq<T>, s: int, t: int, x: T): int
requires 0 <= s <= t <= |a|
{
if s == t then 0 else
Count(a, s, t-1, x) + if a[t-1] == x then 1 else 0
}
ghost predicate HasMajority<T>(a: seq<T>, s: int, t: int, x: T)
requires 0 <= s <= t <= |a|
{
2 * Count(a, s, t, x) > t - s
}
// Here is the first version of the algorithm, the one that assumes there is a majority choice.
method FindWinner<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0];
var n, c, s := 1, 1, 0;
while n < |a|
invariant 0 <= s <= n <= |a|
invariant 2 * Count(a, s, |a|, K) > |a| - s // K has majority among a[s..]
invariant 2 * Count(a, s, n, k) > n - s // k has majority among a[s..n]
invariant c == Count(a, s, n, k)
{
if a[n] == k {
n, c := n + 1, c + 1;
} else if 2 * c > n + 1 - s {
n := n + 1;
} else {
n := n + 1;
// We have 2*Count(a, s, n, k) == n-s, and thus the following lemma
// lets us conclude 2*Count(a, s, n, K) <= n-s.
Lemma_Unique(a, s, n, K, k);
// We also have 2*Count(a, s, |a|, K) > |a|-s, and the following lemma
// tells us Count(a, s, |a|, K) == Count(a, s, n, K) + Count(a, n, |a|, K),
// and thus we can conclude 2*Count(a, n, |a|, K) > |a|-n.
Lemma_Split(a, s, n, |a|, K);
k, n, c, s := a[n], n + 1, 1, n;
}
}
Lemma_Unique(a, s, |a|, K, k); // both k and K have a majority, so K == k
}
// ------------------------------------------------------------------------------
// Here is the second version of the program, the one that also computes whether or not
// there is a majority choice.
datatype Result<Candidate> = NoWinner | Winner(cand: Candidate)
method DetermineElection<Candidate(==,0,!new)>(a: seq<Candidate>) returns (result: Result<Candidate>)
ensures result.Winner? ==> 2 * Count(a, 0, |a|, result.cand) > |a|
ensures result.NoWinner? ==> forall c :: 2 * Count(a, 0, |a|, c) <= |a|
{
if |a| == 0 { return NoWinner; }
ghost var b := exists c :: 2 * Count(a, 0, |a|, c) > |a|;
ghost var w :| b ==> 2 * Count(a, 0, |a|, w) > |a|;
var cand := SearchForWinner(a, b, w);
return if 2 * Count(a, 0, |a|, cand) > |a| then Winner(cand) else NoWinner;
}
// The difference between SearchForWinner for FindWinner above are the occurrences of the
// antecedent "hasWinner ==>" and the two checks for no-more-votes that may result in a "return"
// statement.
method SearchForWinner<Candidate(==)>(a: seq<Candidate>, ghost hasWinner: bool, ghost K: Candidate) returns (k: Candidate)
requires |a| != 0
requires hasWinner ==> 2 * Count(a, 0, |a|, K) > |a| // K has a (strict) majority of the votes
ensures hasWinner ==> k == K // find K
{
k := a[0];
var n, c, s := 1, 1, 0;
while n < |a|
invariant 0 <= s <= n <= |a|
invariant hasWinner ==> 2 * Count(a, s, |a|, K) > |a| - s // K has majority among a[s..]
invariant 2 * Count(a, s, n, k) > n - s // k has majority among a[s..n]
invariant c == Count(a, s, n, k)
{
if a[n] == k {
n, c := n + 1, c + 1;
} else if 2 * c > n + 1 - s {
n := n + 1;
} else {
n := n + 1;
// We have 2*Count(a, s, n, k) == n-s, and thus the following lemma
// lets us conclude 2*Count(a, s, n, K) <= n-s.
Lemma_Unique(a, s, n, K, k);
// We also have 2*Count(a, s, |a|, K) > |a|-s, and the following lemma
// tells us Count(a, s, |a|, K) == Count(a, s, n, K) + Count(a, n, |a|, K),
// and thus we can conclude 2*Count(a, n, |a|, K) > |a|-n.
Lemma_Split(a, s, n, |a|, K);
if |a| == n { return; }
k, n, c, s := a[n], n + 1, 1, n;
}
}
Lemma_Unique(a, s, |a|, K, k); // both k and K have a majority, so K == k
}
// ------------------------------------------------------------------------------
// Here are two lemmas about Count that are used in the methods above.
lemma Lemma_Split<T>(a: seq<T>, s: int, t: int, u: int, x: T)
requires 0 <= s <= t <= u <= |a|
ensures Count(a, s, t, x) + Count(a, t, u, x) == Count(a, s, u, x)
{
/* The postcondition of this method is proved automatically via Dafny's
induction tactic. But if a manual proof had to be provided, it would
look like this:
if s != t {
Lemma_Split(a, s, t-1, u, x);
}
*/
}
lemma Lemma_Unique<T>(a: seq<T>, s: int, t: int, x: T, y: T)
requires 0 <= s <= t <= |a|
ensures x != y ==> Count(a, s, t, x) + Count(a, s, t, y) <= t - s
{
/* The postcondition of this method is proved automatically via Dafny's
induction tactic. But if a manual proof had to be provided, it would
look like this:
if s != t {
Lemma_Unique(a, s, t-1, x, y);
}
*/
}
// ------------------------------------------------------------------------------
// This version uses more calculations with integer formulas
method FindWinner'<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0]; // Current candidate: the first element
var lo, up, c := 0, 1, 1; // Window: [0..1], number of occurrences of k in the window: 1
while up < |a|
invariant 0 <= lo < up <= |a| // (0)
invariant HasMajority(a, lo, |a|, K) // (1) K has majority among a[lo..]
invariant HasMajority(a, lo, up, k) // (2) k has majority among a[lo..up] (in the current window)
invariant c == Count(a, lo, up, k) // (3)
{
if a[up] == k {
// One more occurrence of k
up, c := up + 1, c + 1;
} else if 2 * c > up + 1 - lo {
// An occurrence of another value, but k still has the majority
up := up + 1;
} else {
// An occurrence of another value and k just lost the majority.
// Prove that k has exactly 50% in the future window a[lo..up + 1]:
calc /* k has 50% among a[lo..up + 1] */ {
true;
== // negation of the previous branch condition;
2 * c <= up + 1 - lo;
== // loop invariant (3)
2 * Count(a, lo, up, k) <= up + 1 - lo;
== calc {
true;
== // loop invariant (2)
HasMajority(a, lo, up, k);
== // def. HasMajority
2 * Count(a, lo, up, k) > up - lo;
==
2 * Count(a, lo, up, k) >= up + 1 - lo;
}
2 * Count(a, lo, up, k) == up + 1 - lo;
}
up := up + 1;
assert 2 * Count(a, lo, up, k) == up - lo; // k has exactly 50% in the current window a[lo..up]
// We are going to start a new window a[up..up + 1] and choose a new candidate,
// so invariants (2) and (3) will be easy to re-establish.
// To re-establish (1) we have to prove that K has majority among a[up..], as up will become the new lo.
// The main idea is that we had enough K's in a[lo..], and there cannot be too many in a[lo..up].
calc /* K has majority among a[up..] */ {
2 * Count(a, up, |a|, K);
== { Lemma_Split(a, lo, up, |a|, K); }
2 * Count(a, lo, |a|, K) - 2 * Count(a, lo, up, K);
> { assert HasMajority(a, lo, |a|, K); } // loop invariant (1)
|a| - lo - 2 * Count(a, lo, up, K);
>= { if k == K {
calc {
2 * Count(a, lo, up, K);
==
2 * Count(a, lo, up, k);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
} else {
calc {
2 * Count(a, lo, up, K);
<= { Lemma_Unique(a, lo, up, k, K); }
2 * ((up - lo) - Count(a, lo, up, k));
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
}
assert 2 * Count(a, lo, up, K) <= up - lo;
}
|a| - lo - (up - lo);
==
|a| - up;
}
assert HasMajority(a, up, |a|, K);
k, lo, up, c := a[up], up, up + 1, 1;
assert HasMajority(a, lo, |a|, K);
}
}
Lemma_Unique(a, lo, |a|, K, k); // both k and K have a majority among a[lo..], so K == k
}
// This version uses more calculations with boolean formulas
method FindWinner''<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0]; // Current candidate: the first element
var lo, up, c := 0, 1, 1; // Window: [0..1], number of occurrences of k in the window: 1
while up < |a|
invariant 0 <= lo < up <= |a| // (0)
invariant HasMajority(a, lo, |a|, K) // (1) K has majority among a[lo..]
invariant HasMajority(a, lo, up, k) // (2) k has majority among a[lo..up] (in the current window)
invariant c == Count(a, lo, up, k) // (3)
{
if a[up] == k {
// One more occurrence of k
up, c := up + 1, c + 1;
} else if 2 * c > up + 1 - lo {
// An occurrence of another value, but k still has the majority
up := up + 1;
} else {
// An occurrence of another value and k just lost the majority.
// Prove that k has exactly 50% in the future window a[lo..up + 1]:
calc /* k has 50% among a[lo..up + 1] */ {
true;
== // negation of the previous branch condition
2 * c <= up + 1 - lo;
== // loop invariant (3)
2 * Count(a, lo, up, k) <= up + 1 - lo;
== calc {
true;
== // loop invariant (2)
HasMajority(a, lo, up, k);
== // def. HasMajority
2 * Count(a, lo, up, k) > up - lo;
==
2 * Count(a, lo, up, k) >= up + 1 - lo;
}
2 * Count(a, lo, up, k) == up + 1 - lo;
}
up := up + 1;
assert 2 * Count(a, lo, up, k) == up - lo; // k has exactly 50% in the current window a[lo..up]
// We are going to start a new window a[up..up + 1] and choose a new candidate,
// so invariants (2) and (3) will be easy to re-establish.
// To re-establish (1) we have to prove that K has majority among a[up..], as up will become the new lo.
// The main idea is that we had enough K's in a[lo..], and there cannot be too many in a[lo..up].
calc /* K has majority among a[up..] */ {
true;
== // loop invariant (1)
HasMajority(a, lo, |a|, K);
==
2 * Count(a, lo, |a|, K) > |a| - lo;
== { Lemma_Split(a, lo, up, |a|, K); }
2 * Count(a, lo, up, K) + 2 * Count(a, up, |a|, K) > |a| - lo;
==>
{ if k == K {
calc {
2 * Count(a, lo, up, K);
==
2 * Count(a, lo, up, k);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
} else {
calc {
true;
== { Lemma_Unique(a, lo, up, k, K); }
Count(a, lo, up, K) + Count(a, lo, up, k) <= up - lo;
==
2 * Count(a, lo, up, K) + 2 * Count(a, lo, up, k) <= 2 * (up - lo);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
2 * Count(a, lo, up, K) <= up - lo;
}
}
assert 2 * Count(a, lo, up, K) <= up - lo;
}
// subtract off Count(a, lo, up, K) from the LHS and subtract off the larger amount up - lo from the RHS
2 * Count(a, up, |a|, K) > (|a| - lo) - (up - lo);
==
2 * Count(a, up, |a|, K) > |a| - up;
==
HasMajority(a, up, |a|, K);
}
k, lo, up, c := a[up], up, up + 1, 1;
assert HasMajority(a, lo, |a|, K);
}
}
Lemma_Unique(a, lo, |a|, K, k); // both k and K have a majority among a[lo..], so K == k
}
| // RUN: %testDafnyForEachResolver "%s"
// Rustan Leino, June 2012.
// This file verifies an algorithm, due to Boyer and Moore, that finds the majority choice
// among a sequence of votes, see http://www.cs.utexas.edu/~moore/best-ideas/mjrty/.
// Actually, this algorithm is a slight variation on theirs, but the general idea for why
// it is correct is the same. In the Boyer and Moore algorithm, the loop counter is advanced
// by exactly 1 each iteration, which means that there may or may not be a "current leader".
// In my program below, I had instead written the loop invariant to say there is always a
// "current leader", which requires the loop index sometimes to skip a value.
//
// This file has two versions of the algorithm. In the first version, the given sequence
// of votes is assumed to have a (strict) majority choice, meaning that strictly more than
// 50% of the votes are for one candidate. It is convenient to have a name for the majority
// choice, in order to talk about it in specifications. The easiest way to do this in
// Dafny is probably to introduce a ghost parameter with the given properties. That's what
// the algorithm does, see parameter K. The postcondition is thus to output the value of
// K, which is done in the non-ghost out-parameter k.
// The proof of the algorithm requires two lemmas. These lemmas are proved automatically
// by Dafny's induction tactic.
//
// In the second version of the program, the main method does not assume there is a majority
// choice. Rather, it eseentially uses the first algorithm and then checks if what it
// returns really is a majority choice. To do this, the specification of the first algorithm
// needs to be changed slightly to accommodate the possibility that there is no majority
// choice. That change in specification is also reflected in the loop invariant. Moreover,
// the algorithm itself now needs to extra 'if' statements to see if the entire sequence
// has been searched through. (This extra 'if' is essentially already handled by Boyer and
// Moore's algorithm, because it increments the loop index by 1 each iteration and therefore
// already has a special case for the case of running out of sequence elements without a
// current leader.)
// The calling harness, DetermineElection, somewhat existentially comes up with the majority
// choice, if there is such a choice, and then passes in that choice as the ghost parameter K
// to the main algorithm. Neat, huh?
// Language comment:
// The "(==)" that sits after some type parameters in this program says that the actual
// type argument must support equality.
// Advanced remark:
// There is a subtle situation in the verification of DetermineElection. Suppose the type
// parameter Candidate denotes some type whose instances depend on which object are
// allocated. For example, if Candidate is some class type, then more candidates can come
// into being by object allocations (using "new"). What does the quantification of
// candidates "c" in the postcondition of DetermineElection now mean--all candidates that
// existed in the pre-state or (the possibly larger set of) all candidates that exist in the
// post-state? (It means the latter.) And if there does not exist a candidate in majority
// in the pre-state, could there be a (newly created) candidate in majority in the post-state?
// This will require some proof. The simplest argument seems to be that even if more candidates
// are created during the course of DetermineElection, such candidates cannot possibly
// be in majority in the sequence "a", since "a" can only contain candidates that were already
// created in the pre-state. This property is easily specified by adding a postcondition
// to the Count function. Alternatively, one could have added the antecedent "c in a" or
// "old(allocated(c))" to the "forall c" quantification in the postcondition of DetermineElection.
// About reading the proofs:
// Dafny proves the FindWinner algorithm from the given loop invariants and the two lemmas
// Lemma_Unique and Lemma_Split. In showing this proof to some colleagues, they found they
// were not as quick as Dafny in constructing the proof from these ingredients. For a human
// to understand the situation better, it helps to take smaller (and more) steps in the proof.
// At the end of this file, Nadia Polikarpova has written two versions of FindWinner that does
// that, using Dafny's support for calculational proofs.
function Count<T(==)>(a: seq<T>, s: int, t: int, x: T): int
requires 0 <= s <= t <= |a|
{
if s == t then 0 else
Count(a, s, t-1, x) + if a[t-1] == x then 1 else 0
}
ghost predicate HasMajority<T>(a: seq<T>, s: int, t: int, x: T)
requires 0 <= s <= t <= |a|
{
2 * Count(a, s, t, x) > t - s
}
// Here is the first version of the algorithm, the one that assumes there is a majority choice.
method FindWinner<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0];
var n, c, s := 1, 1, 0;
while n < |a|
{
if a[n] == k {
n, c := n + 1, c + 1;
} else if 2 * c > n + 1 - s {
n := n + 1;
} else {
n := n + 1;
// We have 2*Count(a, s, n, k) == n-s, and thus the following lemma
// lets us conclude 2*Count(a, s, n, K) <= n-s.
Lemma_Unique(a, s, n, K, k);
// We also have 2*Count(a, s, |a|, K) > |a|-s, and the following lemma
// tells us Count(a, s, |a|, K) == Count(a, s, n, K) + Count(a, n, |a|, K),
// and thus we can conclude 2*Count(a, n, |a|, K) > |a|-n.
Lemma_Split(a, s, n, |a|, K);
k, n, c, s := a[n], n + 1, 1, n;
}
}
Lemma_Unique(a, s, |a|, K, k); // both k and K have a majority, so K == k
}
// ------------------------------------------------------------------------------
// Here is the second version of the program, the one that also computes whether or not
// there is a majority choice.
datatype Result<Candidate> = NoWinner | Winner(cand: Candidate)
method DetermineElection<Candidate(==,0,!new)>(a: seq<Candidate>) returns (result: Result<Candidate>)
ensures result.Winner? ==> 2 * Count(a, 0, |a|, result.cand) > |a|
ensures result.NoWinner? ==> forall c :: 2 * Count(a, 0, |a|, c) <= |a|
{
if |a| == 0 { return NoWinner; }
ghost var b := exists c :: 2 * Count(a, 0, |a|, c) > |a|;
ghost var w :| b ==> 2 * Count(a, 0, |a|, w) > |a|;
var cand := SearchForWinner(a, b, w);
return if 2 * Count(a, 0, |a|, cand) > |a| then Winner(cand) else NoWinner;
}
// The difference between SearchForWinner for FindWinner above are the occurrences of the
// antecedent "hasWinner ==>" and the two checks for no-more-votes that may result in a "return"
// statement.
method SearchForWinner<Candidate(==)>(a: seq<Candidate>, ghost hasWinner: bool, ghost K: Candidate) returns (k: Candidate)
requires |a| != 0
requires hasWinner ==> 2 * Count(a, 0, |a|, K) > |a| // K has a (strict) majority of the votes
ensures hasWinner ==> k == K // find K
{
k := a[0];
var n, c, s := 1, 1, 0;
while n < |a|
{
if a[n] == k {
n, c := n + 1, c + 1;
} else if 2 * c > n + 1 - s {
n := n + 1;
} else {
n := n + 1;
// We have 2*Count(a, s, n, k) == n-s, and thus the following lemma
// lets us conclude 2*Count(a, s, n, K) <= n-s.
Lemma_Unique(a, s, n, K, k);
// We also have 2*Count(a, s, |a|, K) > |a|-s, and the following lemma
// tells us Count(a, s, |a|, K) == Count(a, s, n, K) + Count(a, n, |a|, K),
// and thus we can conclude 2*Count(a, n, |a|, K) > |a|-n.
Lemma_Split(a, s, n, |a|, K);
if |a| == n { return; }
k, n, c, s := a[n], n + 1, 1, n;
}
}
Lemma_Unique(a, s, |a|, K, k); // both k and K have a majority, so K == k
}
// ------------------------------------------------------------------------------
// Here are two lemmas about Count that are used in the methods above.
lemma Lemma_Split<T>(a: seq<T>, s: int, t: int, u: int, x: T)
requires 0 <= s <= t <= u <= |a|
ensures Count(a, s, t, x) + Count(a, t, u, x) == Count(a, s, u, x)
{
/* The postcondition of this method is proved automatically via Dafny's
induction tactic. But if a manual proof had to be provided, it would
look like this:
if s != t {
Lemma_Split(a, s, t-1, u, x);
}
*/
}
lemma Lemma_Unique<T>(a: seq<T>, s: int, t: int, x: T, y: T)
requires 0 <= s <= t <= |a|
ensures x != y ==> Count(a, s, t, x) + Count(a, s, t, y) <= t - s
{
/* The postcondition of this method is proved automatically via Dafny's
induction tactic. But if a manual proof had to be provided, it would
look like this:
if s != t {
Lemma_Unique(a, s, t-1, x, y);
}
*/
}
// ------------------------------------------------------------------------------
// This version uses more calculations with integer formulas
method FindWinner'<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0]; // Current candidate: the first element
var lo, up, c := 0, 1, 1; // Window: [0..1], number of occurrences of k in the window: 1
while up < |a|
{
if a[up] == k {
// One more occurrence of k
up, c := up + 1, c + 1;
} else if 2 * c > up + 1 - lo {
// An occurrence of another value, but k still has the majority
up := up + 1;
} else {
// An occurrence of another value and k just lost the majority.
// Prove that k has exactly 50% in the future window a[lo..up + 1]:
calc /* k has 50% among a[lo..up + 1] */ {
true;
== // negation of the previous branch condition;
2 * c <= up + 1 - lo;
== // loop invariant (3)
2 * Count(a, lo, up, k) <= up + 1 - lo;
== calc {
true;
== // loop invariant (2)
HasMajority(a, lo, up, k);
== // def. HasMajority
2 * Count(a, lo, up, k) > up - lo;
==
2 * Count(a, lo, up, k) >= up + 1 - lo;
}
2 * Count(a, lo, up, k) == up + 1 - lo;
}
up := up + 1;
// We are going to start a new window a[up..up + 1] and choose a new candidate,
// so invariants (2) and (3) will be easy to re-establish.
// To re-establish (1) we have to prove that K has majority among a[up..], as up will become the new lo.
// The main idea is that we had enough K's in a[lo..], and there cannot be too many in a[lo..up].
calc /* K has majority among a[up..] */ {
2 * Count(a, up, |a|, K);
== { Lemma_Split(a, lo, up, |a|, K); }
2 * Count(a, lo, |a|, K) - 2 * Count(a, lo, up, K);
> { assert HasMajority(a, lo, |a|, K); } // loop invariant (1)
|a| - lo - 2 * Count(a, lo, up, K);
>= { if k == K {
calc {
2 * Count(a, lo, up, K);
==
2 * Count(a, lo, up, k);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
} else {
calc {
2 * Count(a, lo, up, K);
<= { Lemma_Unique(a, lo, up, k, K); }
2 * ((up - lo) - Count(a, lo, up, k));
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
}
}
|a| - lo - (up - lo);
==
|a| - up;
}
k, lo, up, c := a[up], up, up + 1, 1;
}
}
Lemma_Unique(a, lo, |a|, K, k); // both k and K have a majority among a[lo..], so K == k
}
// This version uses more calculations with boolean formulas
method FindWinner''<Candidate(==)>(a: seq<Candidate>, ghost K: Candidate) returns (k: Candidate)
requires HasMajority(a, 0, |a|, K) // K has a (strict) majority of the votes
ensures k == K // find K
{
k := a[0]; // Current candidate: the first element
var lo, up, c := 0, 1, 1; // Window: [0..1], number of occurrences of k in the window: 1
while up < |a|
{
if a[up] == k {
// One more occurrence of k
up, c := up + 1, c + 1;
} else if 2 * c > up + 1 - lo {
// An occurrence of another value, but k still has the majority
up := up + 1;
} else {
// An occurrence of another value and k just lost the majority.
// Prove that k has exactly 50% in the future window a[lo..up + 1]:
calc /* k has 50% among a[lo..up + 1] */ {
true;
== // negation of the previous branch condition
2 * c <= up + 1 - lo;
== // loop invariant (3)
2 * Count(a, lo, up, k) <= up + 1 - lo;
== calc {
true;
== // loop invariant (2)
HasMajority(a, lo, up, k);
== // def. HasMajority
2 * Count(a, lo, up, k) > up - lo;
==
2 * Count(a, lo, up, k) >= up + 1 - lo;
}
2 * Count(a, lo, up, k) == up + 1 - lo;
}
up := up + 1;
// We are going to start a new window a[up..up + 1] and choose a new candidate,
// so invariants (2) and (3) will be easy to re-establish.
// To re-establish (1) we have to prove that K has majority among a[up..], as up will become the new lo.
// The main idea is that we had enough K's in a[lo..], and there cannot be too many in a[lo..up].
calc /* K has majority among a[up..] */ {
true;
== // loop invariant (1)
HasMajority(a, lo, |a|, K);
==
2 * Count(a, lo, |a|, K) > |a| - lo;
== { Lemma_Split(a, lo, up, |a|, K); }
2 * Count(a, lo, up, K) + 2 * Count(a, up, |a|, K) > |a| - lo;
==>
{ if k == K {
calc {
2 * Count(a, lo, up, K);
==
2 * Count(a, lo, up, k);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
up - lo;
}
} else {
calc {
true;
== { Lemma_Unique(a, lo, up, k, K); }
Count(a, lo, up, K) + Count(a, lo, up, k) <= up - lo;
==
2 * Count(a, lo, up, K) + 2 * Count(a, lo, up, k) <= 2 * (up - lo);
== { assert 2 * Count(a, lo, up, k) == up - lo; } // k has 50% among a[lo..up]
2 * Count(a, lo, up, K) <= up - lo;
}
}
}
// subtract off Count(a, lo, up, K) from the LHS and subtract off the larger amount up - lo from the RHS
2 * Count(a, up, |a|, K) > (|a| - lo) - (up - lo);
==
2 * Count(a, up, |a|, K) > |a| - up;
==
HasMajority(a, up, |a|, K);
}
k, lo, up, c := a[up], up, up + 1, 1;
}
}
Lemma_Unique(a, lo, |a|, K, k); // both k and K have a majority among a[lo..], so K == k
}
|
314 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny2_StoreAndRetrieve.dfy | // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
// This file shows an example program that uses both refinement and :autocontracts
// specify a class that stores a set of things that can be retrieved using a query.
//
// (For another example that uses these features, see Test/dafny3/CachedContainer.dfy.)
abstract module AbstractInterface {
class {:autocontracts} StoreAndRetrieve<Thing(==)> {
ghost var Contents: set<Thing>
ghost predicate Valid() {
Valid'()
}
ghost predicate {:autocontracts false} Valid'()
reads this, Repr
constructor Init()
ensures Contents == {}
method Store(t: Thing)
ensures Contents == old(Contents) + {t}
method Retrieve(matchCriterion: Thing -> bool) returns (thing: Thing)
requires exists t :: t in Contents && matchCriterion(t)
ensures Contents == old(Contents)
ensures thing in Contents && matchCriterion(thing)
}
}
abstract module A refines AbstractInterface {
class StoreAndRetrieve<Thing(==)> ... {
constructor Init...
{
Contents := {};
Repr := {this};
new;
assume Valid'(); // to be checked in module B
}
method Store...
{
Contents := Contents + {t};
assume Valid'(); // to be checked in module B
}
method Retrieve...
{
var k :| assume k in Contents && matchCriterion(k);
thing := k;
}
}
}
abstract module B refines A {
class StoreAndRetrieve<Thing(==)> ... {
var arr: seq<Thing>
ghost predicate Valid'...
{
Contents == set x | x in arr
}
constructor Init...
{
arr := [];
new;
assert ...;
}
method Store...
{
arr := arr + [t];
...;
assert ...;
}
method Retrieve...
{
var i := 0;
while (i < |arr|)
invariant i < |arr|
invariant forall j :: 0 <= j < i ==> !matchCriterion(arr[j])
{
if matchCriterion(arr[i]) {
break;
}
i := i + 1;
}
var k := arr[i];
...;
var a: seq<Thing> :| assume Contents == set x | x in a;
arr := a;
}
}
}
module abC refines B { // TODO module C causes Go to fail
class StoreAndRetrieve<Thing(==)> ... {
method Retrieve...
{
...;
var a := [thing] + arr[..i] + arr[i+1..]; // LRU behavior
}
}
}
abstract module AbstractClient {
import S : AbstractInterface
method Test() {
var s := new S.StoreAndRetrieve<real>.Init();
s.Store(20.3);
var fn := r => true;
var r := s.Retrieve(fn);
print r, "\n"; // 20.3
}
}
module Client refines AbstractClient {
import S = abC
method Main() {
Test();
}
}
| // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
// This file shows an example program that uses both refinement and :autocontracts
// specify a class that stores a set of things that can be retrieved using a query.
//
// (For another example that uses these features, see Test/dafny3/CachedContainer.dfy.)
abstract module AbstractInterface {
class {:autocontracts} StoreAndRetrieve<Thing(==)> {
ghost var Contents: set<Thing>
ghost predicate Valid() {
Valid'()
}
ghost predicate {:autocontracts false} Valid'()
reads this, Repr
constructor Init()
ensures Contents == {}
method Store(t: Thing)
ensures Contents == old(Contents) + {t}
method Retrieve(matchCriterion: Thing -> bool) returns (thing: Thing)
requires exists t :: t in Contents && matchCriterion(t)
ensures Contents == old(Contents)
ensures thing in Contents && matchCriterion(thing)
}
}
abstract module A refines AbstractInterface {
class StoreAndRetrieve<Thing(==)> ... {
constructor Init...
{
Contents := {};
Repr := {this};
new;
assume Valid'(); // to be checked in module B
}
method Store...
{
Contents := Contents + {t};
assume Valid'(); // to be checked in module B
}
method Retrieve...
{
var k :| assume k in Contents && matchCriterion(k);
thing := k;
}
}
}
abstract module B refines A {
class StoreAndRetrieve<Thing(==)> ... {
var arr: seq<Thing>
ghost predicate Valid'...
{
Contents == set x | x in arr
}
constructor Init...
{
arr := [];
new;
}
method Store...
{
arr := arr + [t];
...;
}
method Retrieve...
{
var i := 0;
while (i < |arr|)
{
if matchCriterion(arr[i]) {
break;
}
i := i + 1;
}
var k := arr[i];
...;
var a: seq<Thing> :| assume Contents == set x | x in a;
arr := a;
}
}
}
module abC refines B { // TODO module C causes Go to fail
class StoreAndRetrieve<Thing(==)> ... {
method Retrieve...
{
...;
var a := [thing] + arr[..i] + arr[i+1..]; // LRU behavior
}
}
}
abstract module AbstractClient {
import S : AbstractInterface
method Test() {
var s := new S.StoreAndRetrieve<real>.Init();
s.Store(20.3);
var fn := r => true;
var r := s.Retrieve(fn);
print r, "\n"; // 20.3
}
}
module Client refines AbstractClient {
import S = abC
method Main() {
Test();
}
}
|
315 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny3_CachedContainer.dfy | // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
// This file contains an example chain of module refinements, starting from a
// simple interface M0 to an implementation M3. Module Client.Test() is
// verified against the original M0 module. Module CachedClient instantiates
// the abstract import of M0 with the concrete module M3, and then gets to
// reuse the proof done in Client.
//
// At a sufficiently abstract level, the concepts used are all standard.
// However, it can be tricky to set these things up in Dafny, if you want
// the final program to be a composition of smaller refinement steps.
//
// Textually, refinement modules in Dafny are written with "...", rather
// than by repeating the program text from the module being refined.
// This can be difficult to both author and read, so this file can be
// used as a guide for what to aim for. Undoubtedly, use of the /rprint:-
// option on the command line will be useful, since it lets you see what
// all the ...'s expand to.
//
// As a convenience, this program also uses a second experimental feature,
// namely the preprocessing requested by :autocontracts, which supplies
// much of the boilerplate specifications that one uses with the
// dynamic-frames idiom in Dafny. This feature was designed to reduce clutter
// in the program text, but can increase the mystery behind what's really
// going on. Here, too, using the /rprint:- option will be useful, since
// it shows the automatically generated specifications and code.
//
// (For another example that uses these features, see Test/dafny2/StoreAndRetrieve.dfy.)
// give the method signatures and specs
abstract module M0 {
class {:autocontracts} Container<T(==)> {
ghost var Contents: set<T>
ghost predicate Valid() {
Valid'()
}
ghost predicate {:autocontracts false} Valid'()
reads this, Repr
constructor ()
ensures Contents == {}
method Add(t: T)
ensures Contents == old(Contents) + {t}
method Remove(t: T)
ensures Contents == old(Contents) - {t}
method Contains(t: T) returns (b: bool)
ensures Contents == old(Contents)
ensures b <==> t in Contents
}
}
// provide bodies for the methods
abstract module M1 refines M0 {
class Container<T(==)> ... {
constructor... {
Contents := {};
Repr := {this};
new;
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Add... {
Contents := Contents + {t};
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Remove... {
Contents := Contents - {t};
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Contains... {
// b := t in Contents;
b :| assume b <==> t in Contents;
}
}
}
// implement the set in terms of a sequence
abstract module M2 refines M1 {
class Container<T(==)> ... {
var elems: seq<T>
ghost predicate Valid'...
{
Contents == (set x | x in elems) &&
(forall i,j :: 0 <= i < j < |elems| ==> elems[i] != elems[j]) &&
Valid''()
}
ghost predicate {:autocontracts false} Valid''()
reads this, Repr
method FindIndex(t: T) returns (j: nat)
ensures j <= |elems|
ensures if j < |elems| then elems[j] == t else t !in elems
{
j := 0;
while (j < |elems|)
invariant j <= |elems|
invariant forall i :: 0 <= i < j ==> elems[i] != t
{
if (elems[j] == t) {
return;
}
j := j + 1;
}
}
constructor... {
elems := [];
new;
label CheckPost:
assume Valid''(); // to be checked in further refinements
assert ...;
}
method Add... {
var j := FindIndex(t);
if j == |elems| {
elems := elems + [t];
}
...;
label CheckPost:
assume Valid''(); // to be checked in further refinements
assert ...;
}
method Remove... {
var j := FindIndex(t);
if j < |elems| {
elems := elems[..j] + elems[j+1..];
}
...;
label CheckPost:
assume Valid''(); // to be checked in further refinements
assert ...;
}
method Contains... {
var j := FindIndex(t);
b := j < |elems|;
}
}
}
// implement a cache
module M3 refines M2 {
datatype Cache<T> = None | Some(index: nat, value: T)
class Container<T(==)> ... {
var cache: Cache<T>
ghost predicate Valid''... {
cache.Some? ==> cache.index < |elems| && elems[cache.index] == cache.value
}
constructor... {
cache := None;
new;
...;
assert ...;
}
method FindIndex... {
if cache.Some? && cache.value == t {
return cache.index;
}
}
method Add... {
...;
assert ...;
}
method Remove... {
...;
if ... {
if cache.Some? {
if cache.index == j {
// clear the cache
cache := None;
} else if j < cache.index {
// adjust for the shifting down
cache := cache.(index := cache.index - 1);
}
}
}
...;
assert ...;
}
}
}
// here a client of the Container
abstract module Client {
import M : M0
method Test() {
var c := new M.Container();
c.Add(56);
c.Add(12);
var b := c.Contains(17);
assert !b;
print b, " "; // false (does not contain 17)
b := c.Contains(12);
assert b;
print b, " "; // true (contains 12)
c.Remove(12);
b := c.Contains(12);
assert !b;
print b, " "; // false (no longer contains 12)
assert c.Contents == {56};
b := c.Contains(56);
assert b;
print b, "\n"; // true (still contains 56)
}
}
module CachedClient refines Client {
import M = M3
method Main() {
Test();
}
}
| // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
// This file contains an example chain of module refinements, starting from a
// simple interface M0 to an implementation M3. Module Client.Test() is
// verified against the original M0 module. Module CachedClient instantiates
// the abstract import of M0 with the concrete module M3, and then gets to
// reuse the proof done in Client.
//
// At a sufficiently abstract level, the concepts used are all standard.
// However, it can be tricky to set these things up in Dafny, if you want
// the final program to be a composition of smaller refinement steps.
//
// Textually, refinement modules in Dafny are written with "...", rather
// than by repeating the program text from the module being refined.
// This can be difficult to both author and read, so this file can be
// used as a guide for what to aim for. Undoubtedly, use of the /rprint:-
// option on the command line will be useful, since it lets you see what
// all the ...'s expand to.
//
// As a convenience, this program also uses a second experimental feature,
// namely the preprocessing requested by :autocontracts, which supplies
// much of the boilerplate specifications that one uses with the
// dynamic-frames idiom in Dafny. This feature was designed to reduce clutter
// in the program text, but can increase the mystery behind what's really
// going on. Here, too, using the /rprint:- option will be useful, since
// it shows the automatically generated specifications and code.
//
// (For another example that uses these features, see Test/dafny2/StoreAndRetrieve.dfy.)
// give the method signatures and specs
abstract module M0 {
class {:autocontracts} Container<T(==)> {
ghost var Contents: set<T>
ghost predicate Valid() {
Valid'()
}
ghost predicate {:autocontracts false} Valid'()
reads this, Repr
constructor ()
ensures Contents == {}
method Add(t: T)
ensures Contents == old(Contents) + {t}
method Remove(t: T)
ensures Contents == old(Contents) - {t}
method Contains(t: T) returns (b: bool)
ensures Contents == old(Contents)
ensures b <==> t in Contents
}
}
// provide bodies for the methods
abstract module M1 refines M0 {
class Container<T(==)> ... {
constructor... {
Contents := {};
Repr := {this};
new;
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Add... {
Contents := Contents + {t};
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Remove... {
Contents := Contents - {t};
label CheckPost:
assume Valid'(); // to be checked in further refinements
}
method Contains... {
// b := t in Contents;
b :| assume b <==> t in Contents;
}
}
}
// implement the set in terms of a sequence
abstract module M2 refines M1 {
class Container<T(==)> ... {
var elems: seq<T>
ghost predicate Valid'...
{
Contents == (set x | x in elems) &&
(forall i,j :: 0 <= i < j < |elems| ==> elems[i] != elems[j]) &&
Valid''()
}
ghost predicate {:autocontracts false} Valid''()
reads this, Repr
method FindIndex(t: T) returns (j: nat)
ensures j <= |elems|
ensures if j < |elems| then elems[j] == t else t !in elems
{
j := 0;
while (j < |elems|)
{
if (elems[j] == t) {
return;
}
j := j + 1;
}
}
constructor... {
elems := [];
new;
label CheckPost:
assume Valid''(); // to be checked in further refinements
}
method Add... {
var j := FindIndex(t);
if j == |elems| {
elems := elems + [t];
}
...;
label CheckPost:
assume Valid''(); // to be checked in further refinements
}
method Remove... {
var j := FindIndex(t);
if j < |elems| {
elems := elems[..j] + elems[j+1..];
}
...;
label CheckPost:
assume Valid''(); // to be checked in further refinements
}
method Contains... {
var j := FindIndex(t);
b := j < |elems|;
}
}
}
// implement a cache
module M3 refines M2 {
datatype Cache<T> = None | Some(index: nat, value: T)
class Container<T(==)> ... {
var cache: Cache<T>
ghost predicate Valid''... {
cache.Some? ==> cache.index < |elems| && elems[cache.index] == cache.value
}
constructor... {
cache := None;
new;
...;
}
method FindIndex... {
if cache.Some? && cache.value == t {
return cache.index;
}
}
method Add... {
...;
}
method Remove... {
...;
if ... {
if cache.Some? {
if cache.index == j {
// clear the cache
cache := None;
} else if j < cache.index {
// adjust for the shifting down
cache := cache.(index := cache.index - 1);
}
}
}
...;
}
}
}
// here a client of the Container
abstract module Client {
import M : M0
method Test() {
var c := new M.Container();
c.Add(56);
c.Add(12);
var b := c.Contains(17);
print b, " "; // false (does not contain 17)
b := c.Contains(12);
print b, " "; // true (contains 12)
c.Remove(12);
b := c.Contains(12);
print b, " "; // false (no longer contains 12)
b := c.Contains(56);
print b, "\n"; // true (still contains 56)
}
}
module CachedClient refines Client {
import M = M3
method Main() {
Test();
}
}
|
316 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny3_CalcExample.dfy | // RUN: %testDafnyForEachResolver "%s"
// Here is a function "f" and three axioms (that is, unproved lemmas) about "f":
ghost function f(x: int, y: int): int
lemma Associativity(x: int, y: int, z: int)
ensures f(x, f(y, z)) == f(f(x, y), z)
lemma Monotonicity(y: int, z: int)
requires y <= z
ensures forall x :: f(x, y) <= f(x, z)
lemma DiagonalIdentity(x: int)
ensures f(x, x) == x
// From these axioms, we can prove a lemma about "f":
method CalculationalStyleProof(a: int, b: int, c: int, x: int)
requires c <= x == f(a, b)
ensures f(a, f(b, c)) <= x
{
calc {
f(a, f(b, c));
== { Associativity(a, b, c); }
f(f(a, b), c);
== { assert f(a, b) == x; }
f(x, c);
<= { assert c <= x; Monotonicity(c, x); }
f(x, x);
== { DiagonalIdentity(x); }
x;
}
}
// Here's the same lemma, but with a proof written in a different style.
// (An explanation of the constructs in this lemma is found below.)
method DifferentStyleProof(a: int, b: int, c: int, x: int)
requires A: c <= x
requires B: x == f(a, b)
ensures f(a, f(b, c)) <= x
{
assert 0: f(a, f(b, c)) == f(f(a, b), c) by {
Associativity(a, b, c);
}
assert 1: f(f(a, b), c) == f(x, c) by {
reveal B;
}
assert 2: f(x, c) <= f(x, x) by {
assert c <= x by { reveal A; }
Monotonicity(c, x);
}
assert 3: f(x, x) == x by {
DiagonalIdentity(x);
}
assert 4: f(a, f(b, c)) == f(x, c) by {
reveal 0, 1;
}
assert 5: f(x, c) <= x by {
reveal 2, 3;
}
assert f(a, f(b, c)) <= x by {
reveal 4, 5;
}
}
// To understand the lemma above, here's what you need to know (and then some):
//
// * An ordinary "assert P;" statement instructs the verifier to verify
// the boolean condition "P" and then to assume "P" from here on (that
// is, in the control flow that continues from here).
//
// * An assert with a proof is written "assert P by { S }" where "S" is
// a list of statements (typically other assertions and lemma calls).
// This statement instructs the verifier to do "S" and then prove "P".
// Once this is done, the verifier assumes "P" from here on, but it
// "forgets" anything it learnt or was able to assume on account of
// doing "S". In other words, an assertion like this is like a local
// lemma--the proof "S" is used only to establish "P" and is then
// forgotten, and after the statement, only "P" remains. Note, the
// body of the "by" clause does "S" and then stops; that is, there are
// no control paths out of the body of the "by" clause.
//
// * An assertion (either an ordinary assertion or an assertion with a
// proof) can start with a label, as in:
//
// assert L: P;
//
// or:
//
// assert L: P by { S }
//
// This instructs the verifier to prove the assertion as described in the
// previous two bullets, but then to forget about "P". In other words, the
// difference between a labeled assertion and and an unlabeled assertion
// is that an unlabeled assertion ends by assuming "P" whereas the labeled
// assertion does not assume anything.
//
// * Syntactically, the label "L" in a labeled assertion is the same as in
// a statement prefix "label L:", namely, "L" is either an identifier or
// a (decimal) numeric literal.
//
// * The condition "P" proved by a labeled assertion can later be recalled
// using a "reveal" statement. The "reveal" statement takes a list of
// arguments, each of which can be a label occurring in a previous
// assertion.
//
// * A precondition (or think of it as an antecedent of a lemma) is given by
// a "requires" clause. Ordinarily, the precondition is assumed on entry
// to the body of a method or lemma. Like an assert statement, a precondition
// can also be labeled. Such a precondition is not automatically assumed on
// entry to the body, but can be recalled by a "reveal" statement.
//
// * Fine points: Some exclusions apply. For example, labeled preconditions are
// not supported for functions and cannot be used to hide/reveal conditions
// while checking the well-formedness of a specification. Labeled assertions are
// not supported in expression contexts. The "reveal" described is the "reveal"
// statement. A labeled assertion can be revealed only at those program points
// that are dominated by the assertion, that is, in places that are reached
// only after definitely first having reached the assertion.
//
// * Fine point: The label "L" introduced by an assertion can also be used in
// "old@L(E)" expressions, where "E" is an expression. However, note that
// "old@L(E)" differs from "E" only in how the heap is dereferenced. That is,
// "old@L" has no effect on local variables. In contrast, a labeled assertion
// speaks about the values of the heap and locals at the time the assertion is
// mentioned. So, even if the heap or locals mentioned in a labeled assertion
// change after the assertion is mentioned, recalling the assertion condition
// with a "reveal" statement always recall the condition with the heap and locals
// as they were when the assert was stated. For example, suppose "P" is an
// expression that mentions a local variable "x". Then, the second assertion in
//
// assert L: P by { ... }
// x := x + 1;
// ...make changes to the heap...
// reveal L;
// assert old@L(P);
//
// does not necessarily hold. The first assertion uses the initial value of the
// heap and the initial value of "x". Consequently, "reveal L;" recalls the
// asserted condition, with that initial heap and that initial value of "x",
// despite the fact that the code changes both "x" and the heap between the
// assert and the reveal. The expression "old@L(P)" essentially rolls
// back to the initial heap, but it uses the current value of "x".
| // RUN: %testDafnyForEachResolver "%s"
// Here is a function "f" and three axioms (that is, unproved lemmas) about "f":
ghost function f(x: int, y: int): int
lemma Associativity(x: int, y: int, z: int)
ensures f(x, f(y, z)) == f(f(x, y), z)
lemma Monotonicity(y: int, z: int)
requires y <= z
ensures forall x :: f(x, y) <= f(x, z)
lemma DiagonalIdentity(x: int)
ensures f(x, x) == x
// From these axioms, we can prove a lemma about "f":
method CalculationalStyleProof(a: int, b: int, c: int, x: int)
requires c <= x == f(a, b)
ensures f(a, f(b, c)) <= x
{
calc {
f(a, f(b, c));
== { Associativity(a, b, c); }
f(f(a, b), c);
== { assert f(a, b) == x; }
f(x, c);
<= { assert c <= x; Monotonicity(c, x); }
f(x, x);
== { DiagonalIdentity(x); }
x;
}
}
// Here's the same lemma, but with a proof written in a different style.
// (An explanation of the constructs in this lemma is found below.)
method DifferentStyleProof(a: int, b: int, c: int, x: int)
requires A: c <= x
requires B: x == f(a, b)
ensures f(a, f(b, c)) <= x
{
Associativity(a, b, c);
}
reveal B;
}
Monotonicity(c, x);
}
DiagonalIdentity(x);
}
reveal 0, 1;
}
reveal 2, 3;
}
reveal 4, 5;
}
}
// To understand the lemma above, here's what you need to know (and then some):
//
// * An ordinary "assert P;" statement instructs the verifier to verify
// the boolean condition "P" and then to assume "P" from here on (that
// is, in the control flow that continues from here).
//
// * An assert with a proof is written "assert P by { S }" where "S" is
// a list of statements (typically other assertions and lemma calls).
// This statement instructs the verifier to do "S" and then prove "P".
// Once this is done, the verifier assumes "P" from here on, but it
// "forgets" anything it learnt or was able to assume on account of
// doing "S". In other words, an assertion like this is like a local
// lemma--the proof "S" is used only to establish "P" and is then
// forgotten, and after the statement, only "P" remains. Note, the
// body of the "by" clause does "S" and then stops; that is, there are
// no control paths out of the body of the "by" clause.
//
// * An assertion (either an ordinary assertion or an assertion with a
// proof) can start with a label, as in:
//
// assert L: P;
//
// or:
//
// assert L: P by { S }
//
// This instructs the verifier to prove the assertion as described in the
// previous two bullets, but then to forget about "P". In other words, the
// difference between a labeled assertion and and an unlabeled assertion
// is that an unlabeled assertion ends by assuming "P" whereas the labeled
// assertion does not assume anything.
//
// * Syntactically, the label "L" in a labeled assertion is the same as in
// a statement prefix "label L:", namely, "L" is either an identifier or
// a (decimal) numeric literal.
//
// * The condition "P" proved by a labeled assertion can later be recalled
// using a "reveal" statement. The "reveal" statement takes a list of
// arguments, each of which can be a label occurring in a previous
// assertion.
//
// * A precondition (or think of it as an antecedent of a lemma) is given by
// a "requires" clause. Ordinarily, the precondition is assumed on entry
// to the body of a method or lemma. Like an assert statement, a precondition
// can also be labeled. Such a precondition is not automatically assumed on
// entry to the body, but can be recalled by a "reveal" statement.
//
// * Fine points: Some exclusions apply. For example, labeled preconditions are
// not supported for functions and cannot be used to hide/reveal conditions
// while checking the well-formedness of a specification. Labeled assertions are
// not supported in expression contexts. The "reveal" described is the "reveal"
// statement. A labeled assertion can be revealed only at those program points
// that are dominated by the assertion, that is, in places that are reached
// only after definitely first having reached the assertion.
//
// * Fine point: The label "L" introduced by an assertion can also be used in
// "old@L(E)" expressions, where "E" is an expression. However, note that
// "old@L(E)" differs from "E" only in how the heap is dereferenced. That is,
// "old@L" has no effect on local variables. In contrast, a labeled assertion
// speaks about the values of the heap and locals at the time the assertion is
// mentioned. So, even if the heap or locals mentioned in a labeled assertion
// change after the assertion is mentioned, recalling the assertion condition
// with a "reveal" statement always recall the condition with the heap and locals
// as they were when the assert was stated. For example, suppose "P" is an
// expression that mentions a local variable "x". Then, the second assertion in
//
// assert L: P by { ... }
// x := x + 1;
// ...make changes to the heap...
// reveal L;
// assert old@L(P);
//
// does not necessarily hold. The first assertion uses the initial value of the
// heap and the initial value of "x". Consequently, "reveal L;" recalls the
// asserted condition, with that initial heap and that initial value of "x",
// despite the fact that the code changes both "x" and the heap between the
// assert and the reveal. The expression "old@L(P)" essentially rolls
// back to the initial heap, but it uses the current value of "x".
|
317 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny3_InfiniteTrees.dfy | // RUN: %dafny /compile:0 /deprecation:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// Here is the usual definition of possibly infinite lists, along with a function Tail(s, n), which drops
// n heads from s, and two lemmas that prove properties of Tail.
codatatype Stream<T> = Nil | Cons(head: T, tail: Stream)
ghost function Tail(s: Stream, n: nat): Stream
{
if n == 0 then s else
var t := Tail(s, n-1);
if t == Nil then t else t.tail
}
lemma Tail_Lemma0(s: Stream, n: nat)
requires s.Cons? && Tail(s, n).Cons?;
ensures Tail(s, n).tail == Tail(s.tail, n);
{
}
lemma Tail_Lemma1(s: Stream, k: nat, n: nat)
requires k <= n;
ensures Tail(s, n).Cons? ==> Tail(s, k).Cons?;
// Note, the contrapositive of this lemma says: Tail(s, k) == Nil ==> Tail(s, n) == Nil
{
if k < n && Tail(s, n).Cons? {
assert Tail(s, n) == Tail(s, n-1).tail;
}
}
lemma Tail_Lemma2(s: Stream, n: nat)
requires s.Cons? && Tail(s.tail, n).Cons?;
ensures Tail(s, n).Cons?;
{
if n != 0 {
Tail_Lemma0(s, n-1);
}
}
// Co-predicate IsNeverEndingStream(s) answers whether or not s ever contains Nil.
greatest predicate IsNeverEndingStream<S>(s: Stream<S>)
{
match s
case Nil => false
case Cons(_, tail) => IsNeverEndingStream(tail)
}
// Here is an example of an infinite stream.
ghost function AnInfiniteStream(): Stream<int>
{
Cons(0, AnInfiniteStream())
}
greatest lemma Proposition0()
ensures IsNeverEndingStream(AnInfiniteStream());
{
}
// Now, consider a Tree definition, where each node can have a possibly infinite number of children.
datatype Tree = Node(children: Stream<Tree>)
// Such a tree might have not just infinite width but also infinite height. The following predicate
// holds if there is, for every path down from the root, a common bound on the height of each such path.
// Note that the definition needs a co-predicate in order to say something about all of a node's children.
ghost predicate HasBoundedHeight(t: Tree)
{
exists n :: 0 <= n && LowerThan(t.children, n)
}
greatest predicate LowerThan(s: Stream<Tree>, n: nat)
{
match s
case Nil => true
case Cons(t, tail) =>
1 <= n && LowerThan(t.children, n-1) && LowerThan(tail, n)
}
// Co-predicate LowerThan(s, n) recurses on LowerThan(s.tail, n). Thus, a property of LowerThan is that
// LowerThan(s, h) implies LowerThan(s', h) for any suffix s' of s.
lemma LowerThan_Lemma(s: Stream<Tree>, n: nat, h: nat)
ensures LowerThan(s, h) ==> LowerThan(Tail(s, n), h);
{
Tail_Lemma1(s, 0, n);
if n == 0 || Tail(s, n) == Nil {
} else {
match s {
case Cons(t, tail) =>
LowerThan_Lemma(tail, n-1, h);
Tail_Lemma0(s, n-1);
}
}
}
// A tree t where every node has an infinite number of children satisfies InfiniteEverywhere(t.children).
// Otherwise, IsFiniteSomewhere(t) holds. That is, IsFiniteSomewhere says that the tree has some node
// with less than infinite width. Such a tree may or may not be of finite height, as we'll see in an
// example below.
ghost predicate IsFiniteSomewhere(t: Tree)
{
!InfiniteEverywhere(t.children)
}
greatest predicate InfiniteEverywhere(s: Stream<Tree>)
{
match s
case Nil => false
case Cons(t, tail) => InfiniteEverywhere(t.children) && InfiniteEverywhere(tail)
}
// Here is a tree where every node has exactly 1 child. Such a tree is finite in width (which implies
// it is finite somewhere) and infinite in height (which implies there is no bound on its height).
ghost function SkinnyTree(): Tree
{
Node(Cons(SkinnyTree(), Nil))
}
lemma Proposition1()
ensures IsFiniteSomewhere(SkinnyTree()) && !HasBoundedHeight(SkinnyTree());
{
assert forall n {:induction} :: 0 <= n ==> !LowerThan(SkinnyTree().children, n);
}
// Any tree where all paths have bounded height are finite somewhere.
lemma Theorem0(t: Tree)
requires HasBoundedHeight(t);
ensures IsFiniteSomewhere(t);
{
var n :| 0 <= n && LowerThan(t.children, n);
/*
assert (forall k :: 0 <= k ==> InfiniteEverywhere#[k](t.children)) ==> InfiniteEverywhere(t.children);
assert InfiniteEverywhere(t.children) ==> (forall k :: 0 <= k ==> InfiniteEverywhere#[k](t.children));
assert InfiniteEverywhere(t.children) <==> (forall k :: 0 <= k ==> InfiniteEverywhere#[k](t.children)); // TODO: why does this not follow from the previous two?
*/
var k := FindNil(t.children, n);
}
lemma FindNil(s: Stream<Tree>, n: nat) returns (k: nat)
requires LowerThan(s, n);
ensures !InfiniteEverywhere#[k as ORDINAL](s);
{
match s {
case Nil => k := 1;
case Cons(t, _) =>
k := FindNil(t.children, n-1);
k := k + 1;
}
}
// We defined an InfiniteEverywhere property above and negated it to get an IsFiniteSomewhere predicate.
// If we had an InfiniteHeightSomewhere property, then we could negate it to obtain a predicate
// HasFiniteHeightEverywhere. Consider the following definitions:
ghost predicate HasFiniteHeightEverywhere_Bad(t: Tree)
{
!InfiniteHeightSomewhere_Bad(t.children)
}
greatest predicate InfiniteHeightSomewhere_Bad(s: Stream<Tree>)
{
match s
case Nil => false
case Cons(t, tail) => InfiniteHeightSomewhere_Bad(t.children) || InfiniteHeightSomewhere_Bad(tail)
}
// In some ways, this definition may look reasonable--a list of trees is infinite somewhere
// if it is nonempty, and either the list of children of the first node satisfies the property
// or the tail of the list does. However, because co-predicates are defined by greatest
// fix-points, there is nothing in this definition that "forces" the list to ever get to a
// node whose list of children satisfy the property. The following example shows that a
// shallow, infinitely wide tree satisfies the negation of HasFiniteHeightEverywhere_Bad.
ghost function ATree(): Tree
{
Node(ATreeChildren())
}
ghost function ATreeChildren(): Stream<Tree>
{
Cons(Node(Nil), ATreeChildren())
}
lemma Proposition2()
ensures !HasFiniteHeightEverywhere_Bad(ATree());
{
Proposition2_Lemma0();
Proposition2_Lemma1(ATreeChildren());
}
greatest lemma Proposition2_Lemma0()
ensures IsNeverEndingStream(ATreeChildren());
{
}
greatest lemma Proposition2_Lemma1(s: Stream<Tree>)
requires IsNeverEndingStream(s);
ensures InfiniteHeightSomewhere_Bad(s);
{
calc {
InfiniteHeightSomewhere_Bad#[_k](s);
InfiniteHeightSomewhere_Bad#[_k-1](s.head.children) || InfiniteHeightSomewhere_Bad#[_k-1](s.tail);
<==
InfiniteHeightSomewhere_Bad#[_k-1](s.tail); // induction hypothesis
}
}
// What was missing from the InfiniteHeightSomewhere_Bad definition was the existence of a child
// node that satisfies the property recursively. To address that problem, we may consider
// a definition like the following:
/*
ghost predicate HasFiniteHeightEverywhere_Attempt(t: Tree)
{
!InfiniteHeightSomewhere_Attempt(t.children)
}
greatest predicate InfiniteHeightSomewhere_Attempt(s: Stream<Tree>)
{
exists n ::
0 <= n &&
var ch := Tail(s, n);
ch.Cons? && InfiniteHeightSomewhere_Attempt(ch.head.children)
}
*/
// However, Dafny does not allow this definition: the recursive call to InfiniteHeightSomewhere_Attempt
// sits inside an unbounded existential quantifier, which means the co-predicate's connection with its prefix
// predicate is not guaranteed to hold, so Dafny disallows this co-predicate definition.
// We will use a different way to express the HasFiniteHeightEverywhere property. Instead of
// using an existential quantifier inside the recursively defined co-predicate, we can place a "larger"
// existential quantifier outside the call to the co-predicate. This existential quantifier is going to be
// over the possible paths down the tree (it is "larger" in the sense that it selects a child tree at each
// level down the path, not just at one level).
// A path is a possibly infinite list of indices, each selecting the next child tree to navigate to. A path
// is valid when it uses valid indices and does not stop at a node with children.
greatest predicate ValidPath(t: Tree, p: Stream<int>)
{
match p
case Nil => t == Node(Nil)
case Cons(index, tail) =>
0 <= index &&
var ch := Tail(t.children, index);
ch.Cons? && ValidPath(ch.head, tail)
}
lemma ValidPath_Lemma(p: Stream<int>)
ensures ValidPath(Node(Nil), p) ==> p == Nil;
{
if ValidPath(Node(Nil), p) {
match p {
case Nil =>
case Cons(index, tail) => // proof by contradiction
var nil : Stream<Tree> := Nil;
Tail_Lemma1(nil, 0, index);
}
}
}
// A tree has finite height (everywhere) if it has no valid infinite paths.
ghost predicate HasFiniteHeight(t: Tree)
{
forall p :: ValidPath(t, p) ==> !IsNeverEndingStream(p)
}
// From this definition, we can prove that any tree of bounded height is also of finite height.
lemma Theorem1(t: Tree)
requires HasBoundedHeight(t);
ensures HasFiniteHeight(t);
{
var n :| 0 <= n && LowerThan(t.children, n);
forall p | ValidPath(t, p) {
Theorem1_Lemma(t, n, p);
}
}
lemma Theorem1_Lemma(t: Tree, n: nat, p: Stream<int>)
requires LowerThan(t.children, n) && ValidPath(t, p);
ensures !IsNeverEndingStream(p);
decreases n;
{
match p {
case Nil =>
case Cons(index, tail) =>
var ch := Tail(t.children, index);
calc {
LowerThan(t.children, n);
==> { LowerThan_Lemma(t.children, index, n); }
LowerThan(ch, n);
==> // def. LowerThan
LowerThan(ch.head.children, n-1);
==> //{ Theorem1_Lemma(ch.head, n-1, tail); }
!IsNeverEndingStream(tail);
==> // def. IsNeverEndingStream
!IsNeverEndingStream(p);
}
}
}
// In fact, HasBoundedHeight is strictly strong than HasFiniteHeight, as we'll show with an example.
// Define SkinnyFiniteTree(n) to be a skinny (that is, of width 1) tree of height n.
ghost function SkinnyFiniteTree(n: nat): Tree
ensures forall k: nat :: LowerThan(SkinnyFiniteTree(n).children, k) <==> n <= k;
{
if n == 0 then Node(Nil) else Node(Cons(SkinnyFiniteTree(n-1), Nil))
}
// Next, we define a tree whose root has an infinite number of children, child i of which
// is a SkinnyFiniteTree(i).
ghost function FiniteUnboundedTree(): Tree
{
Node(EverLongerSkinnyTrees(0))
}
ghost function EverLongerSkinnyTrees(n: nat): Stream<Tree>
{
Cons(SkinnyFiniteTree(n), EverLongerSkinnyTrees(n+1))
}
lemma EverLongerSkinnyTrees_Lemma(k: nat, n: nat)
ensures Tail(EverLongerSkinnyTrees(k), n).Cons?;
ensures Tail(EverLongerSkinnyTrees(k), n).head == SkinnyFiniteTree(k+n);
decreases n;
{
if n == 0 {
} else {
calc {
Tail(EverLongerSkinnyTrees(k), n);
{ EverLongerSkinnyTrees_Lemma(k, n-1); } // this ensures that .tail on the next line is well-defined
Tail(EverLongerSkinnyTrees(k), n-1).tail;
{ Tail_Lemma0(EverLongerSkinnyTrees(k), n-1); }
Tail(EverLongerSkinnyTrees(k).tail, n-1);
Tail(EverLongerSkinnyTrees(k+1), n-1);
}
EverLongerSkinnyTrees_Lemma(k+1, n-1);
}
}
lemma Proposition3()
ensures !HasBoundedHeight(FiniteUnboundedTree()) && HasFiniteHeight(FiniteUnboundedTree());
{
Proposition3a();
Proposition3b();
}
lemma Proposition3a()
ensures !HasBoundedHeight(FiniteUnboundedTree());
{
var ch := FiniteUnboundedTree().children;
forall n | 0 <= n
ensures !LowerThan(ch, n);
{
var cn := Tail(ch, n+1);
EverLongerSkinnyTrees_Lemma(0, n+1);
assert cn.head == SkinnyFiniteTree(n+1);
assert !LowerThan(cn.head.children, n);
LowerThan_Lemma(ch, n+1, n);
}
}
lemma Proposition3b()
ensures HasFiniteHeight(FiniteUnboundedTree());
{
var t := FiniteUnboundedTree();
forall p | ValidPath(t, p)
ensures !IsNeverEndingStream(p);
{
assert p.Cons?;
var index := p.head;
assert 0 <= index;
var ch := Tail(t.children, index);
assert ch.Cons? && ValidPath(ch.head, p.tail);
EverLongerSkinnyTrees_Lemma(0, index);
assert ch.head == SkinnyFiniteTree(index);
var si := SkinnyFiniteTree(index);
assert LowerThan(si.children, index);
Proposition3b_Lemma(si, index, p.tail);
}
}
lemma Proposition3b_Lemma(t: Tree, h: nat, p: Stream<int>)
requires LowerThan(t.children, h) && ValidPath(t, p)
ensures !IsNeverEndingStream(p)
decreases h
{
match p {
case Nil =>
case Cons(index, tail) =>
// From the definition of ValidPath(t, p), we get the following:
var ch := Tail(t.children, index);
// assert ch.Cons? && ValidPath(ch.head, tail);
// From the definition of LowerThan(t.children, h), we get the following:
match t.children {
case Nil =>
ValidPath_Lemma(p);
assert false; // absurd case
case Cons(_, _) =>
// assert 1 <= h;
LowerThan_Lemma(t.children, index, h);
// assert LowerThan(ch, h);
}
// Putting these together, by ch.Cons? and the definition of LowerThan(ch, h), we get:
assert LowerThan(ch.head.children, h-1);
// And now we can invoke the induction hypothesis:
// Proposition3b_Lemma(ch.head, h-1, tail);
}
}
// Using a stream of integers to denote a path is convenient, because it allows us to
// use Tail to quickly select the next child tree. But we can also define paths in a
// way that more directly follows the navigation steps required to get to the next child,
// using Peano numbers instead of the built-in integers. This means that each Succ
// constructor among the Peano numbers corresponds to moving "right" among the children
// of a tree node. A path is valid only if it always selects a child from a list
// of children; this implies we must avoid infinite "right" moves. The appropriate type
// Numbers (which is really just a stream of natural numbers) is defined as a combination
// two mutually recursive datatypes, one inductive and the other co-inductive.
codatatype CoOption<T> = None | Some(get: T)
datatype Number = Succ(Number) | Zero(CoOption<Number>)
// Note that the use of an inductive datatype for Number guarantees that sequences of successive
// "right" moves are finite (analogously, each Peano number is finite). Yet the use of a co-inductive
// CoOption in between allows paths to go on forever. In contrast, a definition like:
codatatype InfPath = Right(InfPath) | Down(InfPath) | Stop
// does not guarantee the absence of infinitely long sequences of "right" moves. In other words,
// InfPath also gives rise to indecisive paths--those that never select a child node. Also,
// compare the definition of Number with:
codatatype FinPath = Right(FinPath) | Down(FinPath) | Stop
// where the type can only represent finite paths. As a final alternative to consider, had we
// wanted only infinite, decisive paths, we would just drop the None constructor, forcing each
// CoOption to be some Number. As it is, we want to allow both finite and infinite paths, but we
// want to be able to distinguish them, so we define a co-predicate that does so:
greatest predicate InfinitePath(r: CoOption<Number>)
{
match r
case None => false
case Some(num) => InfinitePath'(num)
}
greatest predicate InfinitePath'(num: Number)
{
match num
case Succ(next) => InfinitePath'(next)
case Zero(r) => InfinitePath(r)
}
// As before, a path is valid for a tree when it navigates to existing nodes and does not stop
// in a node with more children.
greatest predicate ValidPath_Alt(t: Tree, r: CoOption<Number>)
{
match r
case None => t == Node(Nil)
case Some(num) => ValidPath_Alt'(t.children, num)
}
greatest predicate ValidPath_Alt'(s: Stream<Tree>, num: Number)
{
match num
case Succ(next) => s.Cons? && ValidPath_Alt'(s.tail, next)
case Zero(r) => s.Cons? && ValidPath_Alt(s.head, r)
}
// Here is the alternative definition of a tree that has finite height everywhere, using the
// new paths.
ghost predicate HasFiniteHeight_Alt(t: Tree)
{
forall r :: ValidPath_Alt(t, r) ==> !InfinitePath(r)
}
// We will prove that this new definition is equivalent to the previous. To do that, we
// first definite functions S2N and N2S to map between the path representations
// Stream<int> and CoOption<Number>, and then prove some lemmas about this correspondence.
ghost function S2N(p: Stream<int>): CoOption<Number>
decreases 0;
{
match p
case Nil => None
case Cons(n, tail) => Some(S2N'(if n < 0 then 0 else n, tail))
}
ghost function S2N'(n: nat, tail: Stream<int>): Number
decreases n + 1;
{
if n <= 0 then Zero(S2N(tail)) else Succ(S2N'(n-1, tail))
}
ghost function N2S(r: CoOption<Number>): Stream<int>
{
match r
case None => Nil
case Some(num) => N2S'(0, num)
}
ghost function N2S'(n: nat, num: Number): Stream<int>
decreases num;
{
match num
case Zero(r) => Cons(n, N2S(r))
case Succ(next) => N2S'(n + 1, next)
}
lemma Path_Lemma0(t: Tree, p: Stream<int>)
requires ValidPath(t, p);
ensures ValidPath_Alt(t, S2N(p));
{
if ValidPath(t, p) {
Path_Lemma0'(t, p);
}
}
greatest lemma Path_Lemma0'(t: Tree, p: Stream<int>)
requires ValidPath(t, p);
ensures ValidPath_Alt(t, S2N(p));
{
match p {
case Nil =>
assert t == Node(Nil);
case Cons(index, tail) =>
assert 0 <= index;
var ch := Tail(t.children, index);
assert ch.Cons? && ValidPath(ch.head, tail);
calc {
ValidPath_Alt#[_k](t, S2N(p));
{ assert S2N(p) == Some(S2N'(index, tail)); }
ValidPath_Alt#[_k](t, Some(S2N'(index, tail)));
// def. ValidPath_Alt#
ValidPath_Alt'#[_k-1](t.children, S2N'(index, tail));
{ Path_Lemma0''(t.children, index, tail); }
true;
}
}
}
greatest lemma Path_Lemma0''(tChildren: Stream<Tree>, n: nat, tail: Stream<int>)
requires var ch := Tail(tChildren, n); ch.Cons? && ValidPath(ch.head, tail);
ensures ValidPath_Alt'(tChildren, S2N'(n, tail));
{
Tail_Lemma1(tChildren, 0, n);
match S2N'(n, tail) {
case Succ(next) =>
calc {
Tail(tChildren, n);
{ Tail_Lemma1(tChildren, n-1, n); }
Tail(tChildren, n-1).tail;
{ Tail_Lemma0(tChildren, n-1); }
Tail(tChildren.tail, n-1);
}
Path_Lemma0''(tChildren.tail, n-1, tail);
case Zero(r) =>
Path_Lemma0'(tChildren.head, tail);
}
}
lemma Path_Lemma1(t: Tree, r: CoOption<Number>)
requires ValidPath_Alt(t, r);
ensures ValidPath(t, N2S(r));
{
if ValidPath_Alt(t, r) {
Path_Lemma1'(t, r);
}
}
greatest lemma Path_Lemma1'(t: Tree, r: CoOption<Number>)
requires ValidPath_Alt(t, r);
ensures ValidPath(t, N2S(r));
decreases 1;
{
match r {
case None =>
assert t == Node(Nil);
assert N2S(r) == Nil;
case Some(num) =>
assert ValidPath_Alt'(t.children, num);
// assert N2S'(0, num).Cons?;
// Path_Lemma1''(t.children, 0, num);
var p := N2S'(0, num);
calc {
ValidPath#[_k](t, N2S(r));
ValidPath#[_k](t, N2S(Some(num)));
ValidPath#[_k](t, N2S'(0, num));
{ Path_Lemma1''#[_k](t.children, 0, num); }
true;
}
}
}
greatest lemma Path_Lemma1''(s: Stream<Tree>, n: nat, num: Number)
requires ValidPath_Alt'(Tail(s, n), num);
ensures ValidPath(Node(s), N2S'(n, num));
decreases 0, num;
{
match num {
case Succ(next) =>
Path_Lemma1''#[_k](s, n+1, next);
case Zero(r) =>
calc {
ValidPath#[_k](Node(s), N2S'(n, num));
ValidPath#[_k](Node(s), Cons(n, N2S(r)));
Tail(s, n).Cons? && ValidPath#[_k-1](Tail(s, n).head, N2S(r));
{ assert Tail(s, n).Cons?; }
ValidPath#[_k-1](Tail(s, n).head, N2S(r));
{ Path_Lemma1'(Tail(s, n).head, r); }
true;
}
}
}
lemma Path_Lemma2(p: Stream<int>)
ensures IsNeverEndingStream(p) ==> InfinitePath(S2N(p));
{
if IsNeverEndingStream(p) {
Path_Lemma2'(p);
}
}
greatest lemma Path_Lemma2'(p: Stream<int>)
requires IsNeverEndingStream(p);
ensures InfinitePath(S2N(p));
{
match p {
case Cons(n, tail) =>
calc {
InfinitePath#[_k](S2N(p));
// def. S2N
InfinitePath#[_k](Some(S2N'(if n < 0 then 0 else n, tail)));
// def. InfinitePath
InfinitePath'#[_k-1](S2N'(if n < 0 then 0 else n, tail));
<== { Path_Lemma2''(p, if n < 0 then 0 else n, tail); }
InfinitePath#[_k-1](S2N(tail));
{ Path_Lemma2'(tail); }
true;
}
}
}
greatest lemma Path_Lemma2''(p: Stream<int>, n: nat, tail: Stream<int>)
requires IsNeverEndingStream(p) && p.tail == tail
ensures InfinitePath'(S2N'(n, tail))
{
Path_Lemma2'(tail);
}
lemma Path_Lemma3(r: CoOption<Number>)
ensures InfinitePath(r) ==> IsNeverEndingStream(N2S(r));
{
if InfinitePath(r) {
match r {
case Some(num) => Path_Lemma3'(0, num);
}
}
}
greatest lemma Path_Lemma3'(n: nat, num: Number)
requires InfinitePath'(num);
ensures IsNeverEndingStream(N2S'(n, num));
decreases num;
{
match num {
case Zero(r) =>
calc {
IsNeverEndingStream#[_k](N2S'(n, num));
// def. N2S'
IsNeverEndingStream#[_k](Cons(n, N2S(r)));
// def. IsNeverEndingStream
IsNeverEndingStream#[_k-1](N2S(r));
{ Path_Lemma3'(0, r.get); }
true;
}
case Succ(next) =>
Path_Lemma3'#[_k](n + 1, next);
}
}
lemma Theorem2(t: Tree)
ensures HasFiniteHeight(t) <==> HasFiniteHeight_Alt(t);
{
if HasFiniteHeight_Alt(t) {
forall p {
calc ==> {
ValidPath(t, p);
{ Path_Lemma0(t, p); }
ValidPath_Alt(t, S2N(p));
// assumption HasFiniteHeight(t)
!InfinitePath(S2N(p));
{ Path_Lemma2(p); }
!IsNeverEndingStream(p);
}
}
}
if HasFiniteHeight(t) {
forall r {
calc ==> {
ValidPath_Alt(t, r);
{ Path_Lemma1(t, r); }
ValidPath(t, N2S(r));
// assumption HasFiniteHeight_Alt(t)
!IsNeverEndingStream(N2S(r));
{ Path_Lemma3(r); }
!InfinitePath(r);
}
}
}
}
| // RUN: %dafny /compile:0 /deprecation:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// Here is the usual definition of possibly infinite lists, along with a function Tail(s, n), which drops
// n heads from s, and two lemmas that prove properties of Tail.
codatatype Stream<T> = Nil | Cons(head: T, tail: Stream)
ghost function Tail(s: Stream, n: nat): Stream
{
if n == 0 then s else
var t := Tail(s, n-1);
if t == Nil then t else t.tail
}
lemma Tail_Lemma0(s: Stream, n: nat)
requires s.Cons? && Tail(s, n).Cons?;
ensures Tail(s, n).tail == Tail(s.tail, n);
{
}
lemma Tail_Lemma1(s: Stream, k: nat, n: nat)
requires k <= n;
ensures Tail(s, n).Cons? ==> Tail(s, k).Cons?;
// Note, the contrapositive of this lemma says: Tail(s, k) == Nil ==> Tail(s, n) == Nil
{
if k < n && Tail(s, n).Cons? {
}
}
lemma Tail_Lemma2(s: Stream, n: nat)
requires s.Cons? && Tail(s.tail, n).Cons?;
ensures Tail(s, n).Cons?;
{
if n != 0 {
Tail_Lemma0(s, n-1);
}
}
// Co-predicate IsNeverEndingStream(s) answers whether or not s ever contains Nil.
greatest predicate IsNeverEndingStream<S>(s: Stream<S>)
{
match s
case Nil => false
case Cons(_, tail) => IsNeverEndingStream(tail)
}
// Here is an example of an infinite stream.
ghost function AnInfiniteStream(): Stream<int>
{
Cons(0, AnInfiniteStream())
}
greatest lemma Proposition0()
ensures IsNeverEndingStream(AnInfiniteStream());
{
}
// Now, consider a Tree definition, where each node can have a possibly infinite number of children.
datatype Tree = Node(children: Stream<Tree>)
// Such a tree might have not just infinite width but also infinite height. The following predicate
// holds if there is, for every path down from the root, a common bound on the height of each such path.
// Note that the definition needs a co-predicate in order to say something about all of a node's children.
ghost predicate HasBoundedHeight(t: Tree)
{
exists n :: 0 <= n && LowerThan(t.children, n)
}
greatest predicate LowerThan(s: Stream<Tree>, n: nat)
{
match s
case Nil => true
case Cons(t, tail) =>
1 <= n && LowerThan(t.children, n-1) && LowerThan(tail, n)
}
// Co-predicate LowerThan(s, n) recurses on LowerThan(s.tail, n). Thus, a property of LowerThan is that
// LowerThan(s, h) implies LowerThan(s', h) for any suffix s' of s.
lemma LowerThan_Lemma(s: Stream<Tree>, n: nat, h: nat)
ensures LowerThan(s, h) ==> LowerThan(Tail(s, n), h);
{
Tail_Lemma1(s, 0, n);
if n == 0 || Tail(s, n) == Nil {
} else {
match s {
case Cons(t, tail) =>
LowerThan_Lemma(tail, n-1, h);
Tail_Lemma0(s, n-1);
}
}
}
// A tree t where every node has an infinite number of children satisfies InfiniteEverywhere(t.children).
// Otherwise, IsFiniteSomewhere(t) holds. That is, IsFiniteSomewhere says that the tree has some node
// with less than infinite width. Such a tree may or may not be of finite height, as we'll see in an
// example below.
ghost predicate IsFiniteSomewhere(t: Tree)
{
!InfiniteEverywhere(t.children)
}
greatest predicate InfiniteEverywhere(s: Stream<Tree>)
{
match s
case Nil => false
case Cons(t, tail) => InfiniteEverywhere(t.children) && InfiniteEverywhere(tail)
}
// Here is a tree where every node has exactly 1 child. Such a tree is finite in width (which implies
// it is finite somewhere) and infinite in height (which implies there is no bound on its height).
ghost function SkinnyTree(): Tree
{
Node(Cons(SkinnyTree(), Nil))
}
lemma Proposition1()
ensures IsFiniteSomewhere(SkinnyTree()) && !HasBoundedHeight(SkinnyTree());
{
}
// Any tree where all paths have bounded height are finite somewhere.
lemma Theorem0(t: Tree)
requires HasBoundedHeight(t);
ensures IsFiniteSomewhere(t);
{
var n :| 0 <= n && LowerThan(t.children, n);
/*
*/
var k := FindNil(t.children, n);
}
lemma FindNil(s: Stream<Tree>, n: nat) returns (k: nat)
requires LowerThan(s, n);
ensures !InfiniteEverywhere#[k as ORDINAL](s);
{
match s {
case Nil => k := 1;
case Cons(t, _) =>
k := FindNil(t.children, n-1);
k := k + 1;
}
}
// We defined an InfiniteEverywhere property above and negated it to get an IsFiniteSomewhere predicate.
// If we had an InfiniteHeightSomewhere property, then we could negate it to obtain a predicate
// HasFiniteHeightEverywhere. Consider the following definitions:
ghost predicate HasFiniteHeightEverywhere_Bad(t: Tree)
{
!InfiniteHeightSomewhere_Bad(t.children)
}
greatest predicate InfiniteHeightSomewhere_Bad(s: Stream<Tree>)
{
match s
case Nil => false
case Cons(t, tail) => InfiniteHeightSomewhere_Bad(t.children) || InfiniteHeightSomewhere_Bad(tail)
}
// In some ways, this definition may look reasonable--a list of trees is infinite somewhere
// if it is nonempty, and either the list of children of the first node satisfies the property
// or the tail of the list does. However, because co-predicates are defined by greatest
// fix-points, there is nothing in this definition that "forces" the list to ever get to a
// node whose list of children satisfy the property. The following example shows that a
// shallow, infinitely wide tree satisfies the negation of HasFiniteHeightEverywhere_Bad.
ghost function ATree(): Tree
{
Node(ATreeChildren())
}
ghost function ATreeChildren(): Stream<Tree>
{
Cons(Node(Nil), ATreeChildren())
}
lemma Proposition2()
ensures !HasFiniteHeightEverywhere_Bad(ATree());
{
Proposition2_Lemma0();
Proposition2_Lemma1(ATreeChildren());
}
greatest lemma Proposition2_Lemma0()
ensures IsNeverEndingStream(ATreeChildren());
{
}
greatest lemma Proposition2_Lemma1(s: Stream<Tree>)
requires IsNeverEndingStream(s);
ensures InfiniteHeightSomewhere_Bad(s);
{
calc {
InfiniteHeightSomewhere_Bad#[_k](s);
InfiniteHeightSomewhere_Bad#[_k-1](s.head.children) || InfiniteHeightSomewhere_Bad#[_k-1](s.tail);
<==
InfiniteHeightSomewhere_Bad#[_k-1](s.tail); // induction hypothesis
}
}
// What was missing from the InfiniteHeightSomewhere_Bad definition was the existence of a child
// node that satisfies the property recursively. To address that problem, we may consider
// a definition like the following:
/*
ghost predicate HasFiniteHeightEverywhere_Attempt(t: Tree)
{
!InfiniteHeightSomewhere_Attempt(t.children)
}
greatest predicate InfiniteHeightSomewhere_Attempt(s: Stream<Tree>)
{
exists n ::
0 <= n &&
var ch := Tail(s, n);
ch.Cons? && InfiniteHeightSomewhere_Attempt(ch.head.children)
}
*/
// However, Dafny does not allow this definition: the recursive call to InfiniteHeightSomewhere_Attempt
// sits inside an unbounded existential quantifier, which means the co-predicate's connection with its prefix
// predicate is not guaranteed to hold, so Dafny disallows this co-predicate definition.
// We will use a different way to express the HasFiniteHeightEverywhere property. Instead of
// using an existential quantifier inside the recursively defined co-predicate, we can place a "larger"
// existential quantifier outside the call to the co-predicate. This existential quantifier is going to be
// over the possible paths down the tree (it is "larger" in the sense that it selects a child tree at each
// level down the path, not just at one level).
// A path is a possibly infinite list of indices, each selecting the next child tree to navigate to. A path
// is valid when it uses valid indices and does not stop at a node with children.
greatest predicate ValidPath(t: Tree, p: Stream<int>)
{
match p
case Nil => t == Node(Nil)
case Cons(index, tail) =>
0 <= index &&
var ch := Tail(t.children, index);
ch.Cons? && ValidPath(ch.head, tail)
}
lemma ValidPath_Lemma(p: Stream<int>)
ensures ValidPath(Node(Nil), p) ==> p == Nil;
{
if ValidPath(Node(Nil), p) {
match p {
case Nil =>
case Cons(index, tail) => // proof by contradiction
var nil : Stream<Tree> := Nil;
Tail_Lemma1(nil, 0, index);
}
}
}
// A tree has finite height (everywhere) if it has no valid infinite paths.
ghost predicate HasFiniteHeight(t: Tree)
{
forall p :: ValidPath(t, p) ==> !IsNeverEndingStream(p)
}
// From this definition, we can prove that any tree of bounded height is also of finite height.
lemma Theorem1(t: Tree)
requires HasBoundedHeight(t);
ensures HasFiniteHeight(t);
{
var n :| 0 <= n && LowerThan(t.children, n);
forall p | ValidPath(t, p) {
Theorem1_Lemma(t, n, p);
}
}
lemma Theorem1_Lemma(t: Tree, n: nat, p: Stream<int>)
requires LowerThan(t.children, n) && ValidPath(t, p);
ensures !IsNeverEndingStream(p);
{
match p {
case Nil =>
case Cons(index, tail) =>
var ch := Tail(t.children, index);
calc {
LowerThan(t.children, n);
==> { LowerThan_Lemma(t.children, index, n); }
LowerThan(ch, n);
==> // def. LowerThan
LowerThan(ch.head.children, n-1);
==> //{ Theorem1_Lemma(ch.head, n-1, tail); }
!IsNeverEndingStream(tail);
==> // def. IsNeverEndingStream
!IsNeverEndingStream(p);
}
}
}
// In fact, HasBoundedHeight is strictly strong than HasFiniteHeight, as we'll show with an example.
// Define SkinnyFiniteTree(n) to be a skinny (that is, of width 1) tree of height n.
ghost function SkinnyFiniteTree(n: nat): Tree
ensures forall k: nat :: LowerThan(SkinnyFiniteTree(n).children, k) <==> n <= k;
{
if n == 0 then Node(Nil) else Node(Cons(SkinnyFiniteTree(n-1), Nil))
}
// Next, we define a tree whose root has an infinite number of children, child i of which
// is a SkinnyFiniteTree(i).
ghost function FiniteUnboundedTree(): Tree
{
Node(EverLongerSkinnyTrees(0))
}
ghost function EverLongerSkinnyTrees(n: nat): Stream<Tree>
{
Cons(SkinnyFiniteTree(n), EverLongerSkinnyTrees(n+1))
}
lemma EverLongerSkinnyTrees_Lemma(k: nat, n: nat)
ensures Tail(EverLongerSkinnyTrees(k), n).Cons?;
ensures Tail(EverLongerSkinnyTrees(k), n).head == SkinnyFiniteTree(k+n);
{
if n == 0 {
} else {
calc {
Tail(EverLongerSkinnyTrees(k), n);
{ EverLongerSkinnyTrees_Lemma(k, n-1); } // this ensures that .tail on the next line is well-defined
Tail(EverLongerSkinnyTrees(k), n-1).tail;
{ Tail_Lemma0(EverLongerSkinnyTrees(k), n-1); }
Tail(EverLongerSkinnyTrees(k).tail, n-1);
Tail(EverLongerSkinnyTrees(k+1), n-1);
}
EverLongerSkinnyTrees_Lemma(k+1, n-1);
}
}
lemma Proposition3()
ensures !HasBoundedHeight(FiniteUnboundedTree()) && HasFiniteHeight(FiniteUnboundedTree());
{
Proposition3a();
Proposition3b();
}
lemma Proposition3a()
ensures !HasBoundedHeight(FiniteUnboundedTree());
{
var ch := FiniteUnboundedTree().children;
forall n | 0 <= n
ensures !LowerThan(ch, n);
{
var cn := Tail(ch, n+1);
EverLongerSkinnyTrees_Lemma(0, n+1);
LowerThan_Lemma(ch, n+1, n);
}
}
lemma Proposition3b()
ensures HasFiniteHeight(FiniteUnboundedTree());
{
var t := FiniteUnboundedTree();
forall p | ValidPath(t, p)
ensures !IsNeverEndingStream(p);
{
var index := p.head;
var ch := Tail(t.children, index);
EverLongerSkinnyTrees_Lemma(0, index);
var si := SkinnyFiniteTree(index);
Proposition3b_Lemma(si, index, p.tail);
}
}
lemma Proposition3b_Lemma(t: Tree, h: nat, p: Stream<int>)
requires LowerThan(t.children, h) && ValidPath(t, p)
ensures !IsNeverEndingStream(p)
{
match p {
case Nil =>
case Cons(index, tail) =>
// From the definition of ValidPath(t, p), we get the following:
var ch := Tail(t.children, index);
// assert ch.Cons? && ValidPath(ch.head, tail);
// From the definition of LowerThan(t.children, h), we get the following:
match t.children {
case Nil =>
ValidPath_Lemma(p);
case Cons(_, _) =>
// assert 1 <= h;
LowerThan_Lemma(t.children, index, h);
// assert LowerThan(ch, h);
}
// Putting these together, by ch.Cons? and the definition of LowerThan(ch, h), we get:
// And now we can invoke the induction hypothesis:
// Proposition3b_Lemma(ch.head, h-1, tail);
}
}
// Using a stream of integers to denote a path is convenient, because it allows us to
// use Tail to quickly select the next child tree. But we can also define paths in a
// way that more directly follows the navigation steps required to get to the next child,
// using Peano numbers instead of the built-in integers. This means that each Succ
// constructor among the Peano numbers corresponds to moving "right" among the children
// of a tree node. A path is valid only if it always selects a child from a list
// of children; this implies we must avoid infinite "right" moves. The appropriate type
// Numbers (which is really just a stream of natural numbers) is defined as a combination
// two mutually recursive datatypes, one inductive and the other co-inductive.
codatatype CoOption<T> = None | Some(get: T)
datatype Number = Succ(Number) | Zero(CoOption<Number>)
// Note that the use of an inductive datatype for Number guarantees that sequences of successive
// "right" moves are finite (analogously, each Peano number is finite). Yet the use of a co-inductive
// CoOption in between allows paths to go on forever. In contrast, a definition like:
codatatype InfPath = Right(InfPath) | Down(InfPath) | Stop
// does not guarantee the absence of infinitely long sequences of "right" moves. In other words,
// InfPath also gives rise to indecisive paths--those that never select a child node. Also,
// compare the definition of Number with:
codatatype FinPath = Right(FinPath) | Down(FinPath) | Stop
// where the type can only represent finite paths. As a final alternative to consider, had we
// wanted only infinite, decisive paths, we would just drop the None constructor, forcing each
// CoOption to be some Number. As it is, we want to allow both finite and infinite paths, but we
// want to be able to distinguish them, so we define a co-predicate that does so:
greatest predicate InfinitePath(r: CoOption<Number>)
{
match r
case None => false
case Some(num) => InfinitePath'(num)
}
greatest predicate InfinitePath'(num: Number)
{
match num
case Succ(next) => InfinitePath'(next)
case Zero(r) => InfinitePath(r)
}
// As before, a path is valid for a tree when it navigates to existing nodes and does not stop
// in a node with more children.
greatest predicate ValidPath_Alt(t: Tree, r: CoOption<Number>)
{
match r
case None => t == Node(Nil)
case Some(num) => ValidPath_Alt'(t.children, num)
}
greatest predicate ValidPath_Alt'(s: Stream<Tree>, num: Number)
{
match num
case Succ(next) => s.Cons? && ValidPath_Alt'(s.tail, next)
case Zero(r) => s.Cons? && ValidPath_Alt(s.head, r)
}
// Here is the alternative definition of a tree that has finite height everywhere, using the
// new paths.
ghost predicate HasFiniteHeight_Alt(t: Tree)
{
forall r :: ValidPath_Alt(t, r) ==> !InfinitePath(r)
}
// We will prove that this new definition is equivalent to the previous. To do that, we
// first definite functions S2N and N2S to map between the path representations
// Stream<int> and CoOption<Number>, and then prove some lemmas about this correspondence.
ghost function S2N(p: Stream<int>): CoOption<Number>
{
match p
case Nil => None
case Cons(n, tail) => Some(S2N'(if n < 0 then 0 else n, tail))
}
ghost function S2N'(n: nat, tail: Stream<int>): Number
{
if n <= 0 then Zero(S2N(tail)) else Succ(S2N'(n-1, tail))
}
ghost function N2S(r: CoOption<Number>): Stream<int>
{
match r
case None => Nil
case Some(num) => N2S'(0, num)
}
ghost function N2S'(n: nat, num: Number): Stream<int>
{
match num
case Zero(r) => Cons(n, N2S(r))
case Succ(next) => N2S'(n + 1, next)
}
lemma Path_Lemma0(t: Tree, p: Stream<int>)
requires ValidPath(t, p);
ensures ValidPath_Alt(t, S2N(p));
{
if ValidPath(t, p) {
Path_Lemma0'(t, p);
}
}
greatest lemma Path_Lemma0'(t: Tree, p: Stream<int>)
requires ValidPath(t, p);
ensures ValidPath_Alt(t, S2N(p));
{
match p {
case Nil =>
case Cons(index, tail) =>
var ch := Tail(t.children, index);
calc {
ValidPath_Alt#[_k](t, S2N(p));
{ assert S2N(p) == Some(S2N'(index, tail)); }
ValidPath_Alt#[_k](t, Some(S2N'(index, tail)));
// def. ValidPath_Alt#
ValidPath_Alt'#[_k-1](t.children, S2N'(index, tail));
{ Path_Lemma0''(t.children, index, tail); }
true;
}
}
}
greatest lemma Path_Lemma0''(tChildren: Stream<Tree>, n: nat, tail: Stream<int>)
requires var ch := Tail(tChildren, n); ch.Cons? && ValidPath(ch.head, tail);
ensures ValidPath_Alt'(tChildren, S2N'(n, tail));
{
Tail_Lemma1(tChildren, 0, n);
match S2N'(n, tail) {
case Succ(next) =>
calc {
Tail(tChildren, n);
{ Tail_Lemma1(tChildren, n-1, n); }
Tail(tChildren, n-1).tail;
{ Tail_Lemma0(tChildren, n-1); }
Tail(tChildren.tail, n-1);
}
Path_Lemma0''(tChildren.tail, n-1, tail);
case Zero(r) =>
Path_Lemma0'(tChildren.head, tail);
}
}
lemma Path_Lemma1(t: Tree, r: CoOption<Number>)
requires ValidPath_Alt(t, r);
ensures ValidPath(t, N2S(r));
{
if ValidPath_Alt(t, r) {
Path_Lemma1'(t, r);
}
}
greatest lemma Path_Lemma1'(t: Tree, r: CoOption<Number>)
requires ValidPath_Alt(t, r);
ensures ValidPath(t, N2S(r));
{
match r {
case None =>
case Some(num) =>
// assert N2S'(0, num).Cons?;
// Path_Lemma1''(t.children, 0, num);
var p := N2S'(0, num);
calc {
ValidPath#[_k](t, N2S(r));
ValidPath#[_k](t, N2S(Some(num)));
ValidPath#[_k](t, N2S'(0, num));
{ Path_Lemma1''#[_k](t.children, 0, num); }
true;
}
}
}
greatest lemma Path_Lemma1''(s: Stream<Tree>, n: nat, num: Number)
requires ValidPath_Alt'(Tail(s, n), num);
ensures ValidPath(Node(s), N2S'(n, num));
{
match num {
case Succ(next) =>
Path_Lemma1''#[_k](s, n+1, next);
case Zero(r) =>
calc {
ValidPath#[_k](Node(s), N2S'(n, num));
ValidPath#[_k](Node(s), Cons(n, N2S(r)));
Tail(s, n).Cons? && ValidPath#[_k-1](Tail(s, n).head, N2S(r));
{ assert Tail(s, n).Cons?; }
ValidPath#[_k-1](Tail(s, n).head, N2S(r));
{ Path_Lemma1'(Tail(s, n).head, r); }
true;
}
}
}
lemma Path_Lemma2(p: Stream<int>)
ensures IsNeverEndingStream(p) ==> InfinitePath(S2N(p));
{
if IsNeverEndingStream(p) {
Path_Lemma2'(p);
}
}
greatest lemma Path_Lemma2'(p: Stream<int>)
requires IsNeverEndingStream(p);
ensures InfinitePath(S2N(p));
{
match p {
case Cons(n, tail) =>
calc {
InfinitePath#[_k](S2N(p));
// def. S2N
InfinitePath#[_k](Some(S2N'(if n < 0 then 0 else n, tail)));
// def. InfinitePath
InfinitePath'#[_k-1](S2N'(if n < 0 then 0 else n, tail));
<== { Path_Lemma2''(p, if n < 0 then 0 else n, tail); }
InfinitePath#[_k-1](S2N(tail));
{ Path_Lemma2'(tail); }
true;
}
}
}
greatest lemma Path_Lemma2''(p: Stream<int>, n: nat, tail: Stream<int>)
requires IsNeverEndingStream(p) && p.tail == tail
ensures InfinitePath'(S2N'(n, tail))
{
Path_Lemma2'(tail);
}
lemma Path_Lemma3(r: CoOption<Number>)
ensures InfinitePath(r) ==> IsNeverEndingStream(N2S(r));
{
if InfinitePath(r) {
match r {
case Some(num) => Path_Lemma3'(0, num);
}
}
}
greatest lemma Path_Lemma3'(n: nat, num: Number)
requires InfinitePath'(num);
ensures IsNeverEndingStream(N2S'(n, num));
{
match num {
case Zero(r) =>
calc {
IsNeverEndingStream#[_k](N2S'(n, num));
// def. N2S'
IsNeverEndingStream#[_k](Cons(n, N2S(r)));
// def. IsNeverEndingStream
IsNeverEndingStream#[_k-1](N2S(r));
{ Path_Lemma3'(0, r.get); }
true;
}
case Succ(next) =>
Path_Lemma3'#[_k](n + 1, next);
}
}
lemma Theorem2(t: Tree)
ensures HasFiniteHeight(t) <==> HasFiniteHeight_Alt(t);
{
if HasFiniteHeight_Alt(t) {
forall p {
calc ==> {
ValidPath(t, p);
{ Path_Lemma0(t, p); }
ValidPath_Alt(t, S2N(p));
// assumption HasFiniteHeight(t)
!InfinitePath(S2N(p));
{ Path_Lemma2(p); }
!IsNeverEndingStream(p);
}
}
}
if HasFiniteHeight(t) {
forall r {
calc ==> {
ValidPath_Alt(t, r);
{ Path_Lemma1(t, r); }
ValidPath(t, N2S(r));
// assumption HasFiniteHeight_Alt(t)
!IsNeverEndingStream(N2S(r));
{ Path_Lemma3(r); }
!InfinitePath(r);
}
}
}
}
|
318 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny3_Iter.dfy | // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
class List<T> {
ghost var Contents: seq<T>
ghost var Repr: set<object>
var a: array<T>
var n: nat
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
{
this in Repr &&
a in Repr &&
n <= a.Length &&
Contents == a[..n]
}
constructor Init()
ensures Valid() && fresh(Repr)
ensures Contents == []
{
Contents, n := [], 0;
a := new T[0];
Repr := {this, a};
}
method Add(t: T)
requires Valid()
modifies Repr
ensures Valid() && fresh(Repr - old(Repr))
ensures Contents == old(Contents) + [t]
{
if (n == a.Length) {
var b := new T[2 * a.Length + 1](i requires 0 <= i reads this, a =>
if i < a.Length then a[i] else t);
assert b[..n] == a[..n] == Contents;
a, Repr := b, Repr + {b};
assert b[..n] == Contents;
}
a[n], n, Contents := t, n + 1, Contents + [t];
}
}
class Cell { var data: int }
iterator M<T(0)>(l: List<T>, c: Cell) yields (x: T)
requires l.Valid()
reads l.Repr
modifies c
yield requires true
yield ensures xs <= l.Contents // this is needed in order for the next line to be well-formed
yield ensures x == l.Contents[|xs|-1]
ensures xs == l.Contents
{
var i := 0;
while i < l.n
invariant i <= l.n && i == |xs| && xs <= l.Contents
{
if (*) { assert l.Valid(); } // this property is maintained, due to the reads clause
if (*) {
x := l.a[i]; yield; // or, equivalently, 'yield l.a[i]'
i := i + 1;
} else {
x, i := l.a[i], i + 1;
yield;
}
}
}
method Client<T(==,0)>(l: List, stop: T) returns (s: seq<T>)
requires l.Valid()
{
var c := new Cell;
var iter := new M(l, c);
s := [];
while true
invariant iter.Valid() && fresh(iter._new)
invariant iter.xs <= l.Contents
decreases |l.Contents| - |iter.xs|
{
var more := iter.MoveNext();
if (!more) { break; }
s := s + [iter.x];
if (iter.x == stop) { return; } // if we ever see 'stop', then just end
}
}
method PrintSequence<T>(s: seq<T>)
{
var i := 0;
while i < |s|
{
print s[i], " ";
i := i + 1;
}
print "\n";
}
method Main()
{
var myList := new List.Init();
var i := 0;
while i < 100
invariant myList.Valid() && fresh(myList.Repr)
{
myList.Add(i);
i := i + 2;
}
var s := Client(myList, 89);
PrintSequence(s);
s := Client(myList, 14);
PrintSequence(s);
}
| // RUN: %testDafnyForEachCompiler --refresh-exit-code=0 "%s" -- --relax-definite-assignment
class List<T> {
ghost var Contents: seq<T>
ghost var Repr: set<object>
var a: array<T>
var n: nat
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
{
this in Repr &&
a in Repr &&
n <= a.Length &&
Contents == a[..n]
}
constructor Init()
ensures Valid() && fresh(Repr)
ensures Contents == []
{
Contents, n := [], 0;
a := new T[0];
Repr := {this, a};
}
method Add(t: T)
requires Valid()
modifies Repr
ensures Valid() && fresh(Repr - old(Repr))
ensures Contents == old(Contents) + [t]
{
if (n == a.Length) {
var b := new T[2 * a.Length + 1](i requires 0 <= i reads this, a =>
if i < a.Length then a[i] else t);
a, Repr := b, Repr + {b};
}
a[n], n, Contents := t, n + 1, Contents + [t];
}
}
class Cell { var data: int }
iterator M<T(0)>(l: List<T>, c: Cell) yields (x: T)
requires l.Valid()
reads l.Repr
modifies c
yield requires true
yield ensures xs <= l.Contents // this is needed in order for the next line to be well-formed
yield ensures x == l.Contents[|xs|-1]
ensures xs == l.Contents
{
var i := 0;
while i < l.n
{
if (*) { assert l.Valid(); } // this property is maintained, due to the reads clause
if (*) {
x := l.a[i]; yield; // or, equivalently, 'yield l.a[i]'
i := i + 1;
} else {
x, i := l.a[i], i + 1;
yield;
}
}
}
method Client<T(==,0)>(l: List, stop: T) returns (s: seq<T>)
requires l.Valid()
{
var c := new Cell;
var iter := new M(l, c);
s := [];
while true
{
var more := iter.MoveNext();
if (!more) { break; }
s := s + [iter.x];
if (iter.x == stop) { return; } // if we ever see 'stop', then just end
}
}
method PrintSequence<T>(s: seq<T>)
{
var i := 0;
while i < |s|
{
print s[i], " ";
i := i + 1;
}
print "\n";
}
method Main()
{
var myList := new List.Init();
var i := 0;
while i < 100
{
myList.Add(i);
i := i + 2;
}
var s := Client(myList, 89);
PrintSequence(s);
s := Client(myList, 14);
PrintSequence(s);
}
|
319 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny3_Streams.dfy | // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
// ----- Stream
codatatype Stream<T> = Nil | Cons(head: T, tail: Stream)
ghost function append(M: Stream, N: Stream): Stream
{
match M
case Nil => N
case Cons(t, M') => Cons(t, append(M', N))
}
// ----- f, g, and maps
type X
ghost function f(x: X): X
ghost function g(x: X): X
ghost function map_f(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(f(x), map_f(N))
}
ghost function map_g(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(g(x), map_g(N))
}
ghost function map_fg(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(f(g(x)), map_fg(N))
}
// ----- Theorems
// map (f * g) M = map f (map g M)
greatest lemma Theorem0(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem0(N);
}
}
greatest lemma Theorem0_Alt(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
if (M.Cons?) {
Theorem0_Alt(M.tail);
}
}
lemma Theorem0_Par(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
forall k: nat {
Theorem0_Ind(k, M);
}
}
lemma Theorem0_Ind(k: nat, M: Stream<X>)
ensures map_fg(M) ==#[k] map_f(map_g(M));
{
if (k != 0) {
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem0_Ind(k-1, N);
}
}
}
lemma Theorem0_AutoInd(k: nat, M: Stream<X>)
ensures map_fg(M) ==#[k] map_f(map_g(M));
{
}
// map f (append M N) = append (map f M) (map f N)
greatest lemma Theorem1(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
match (M) {
case Nil =>
case Cons(x, M') =>
Theorem1(M', N);
}
}
greatest lemma Theorem1_Alt(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
if (M.Cons?) {
Theorem1_Alt(M.tail, N);
}
}
lemma Theorem1_Par(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
forall k: nat {
Theorem1_Ind(k, M, N);
}
}
lemma Theorem1_Ind(k: nat, M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N));
{
// this time, try doing the 'if' inside the 'match' (instead of the other way around)
match (M) {
case Nil =>
case Cons(x, M') =>
if (k != 0) {
Theorem1_Ind(k-1, M', N);
}
}
}
lemma Theorem1_AutoInd(k: nat, M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N));
{
}
lemma Theorem1_AutoForall()
{
// assert forall k: nat, M, N :: map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N)); // TODO: this is not working yet, apparently
}
// append NIL M = M
lemma Theorem2(M: Stream<X>)
ensures append(Nil, M) == M;
{
// trivial
}
// append M NIL = M
greatest lemma Theorem3(M: Stream<X>)
ensures append(M, Nil) == M;
{
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem3(N);
}
}
greatest lemma Theorem3_Alt(M: Stream<X>)
ensures append(M, Nil) == M;
{
if (M.Cons?) {
Theorem3_Alt(M.tail);
}
}
// append M (append N P) = append (append M N) P
greatest lemma Theorem4(M: Stream<X>, N: Stream<X>, P: Stream<X>)
ensures append(M, append(N, P)) == append(append(M, N), P);
{
match (M) {
case Nil =>
case Cons(x, M') =>
Theorem4(M', N, P);
}
}
greatest lemma Theorem4_Alt(M: Stream<X>, N: Stream<X>, P: Stream<X>)
ensures append(M, append(N, P)) == append(append(M, N), P);
{
if (M.Cons?) {
Theorem4_Alt(M.tail, N, P);
}
}
// ----- Flatten
// Flatten can't be written as just:
//
// function SimpleFlatten(M: Stream<Stream>): Stream
// {
// match M
// case Nil => Nil
// case Cons(s, N) => append(s, SimpleFlatten(N))
// }
//
// because this function fails to be productive given an infinite stream of Nil's.
// Instead, here are two variations of SimpleFlatten. The first variation (FlattenStartMarker)
// prepends a "startMarker" to each of the streams in "M". The other (FlattenNonEmpties)
// insists that "M" contain no empty streams. One can prove a theorem that relates these
// two versions.
// This first variation of Flatten returns a stream of the streams in M, each preceded with
// "startMarker".
ghost function FlattenStartMarker<T>(M: Stream<Stream>, startMarker: T): Stream
{
PrependThenFlattenStartMarker(Nil, M, startMarker)
}
ghost function PrependThenFlattenStartMarker<T>(prefix: Stream, M: Stream<Stream>, startMarker: T): Stream
{
match prefix
case Cons(hd, tl) =>
Cons(hd, PrependThenFlattenStartMarker(tl, M, startMarker))
case Nil =>
match M
case Nil => Nil
case Cons(s, N) => Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker))
}
// The next variation of Flatten requires M to contain no empty streams.
greatest predicate StreamOfNonEmpties(M: Stream<Stream>)
{
match M
case Nil => true
case Cons(s, N) => s.Cons? && StreamOfNonEmpties(N)
}
ghost function FlattenNonEmpties(M: Stream<Stream>): Stream
requires StreamOfNonEmpties(M);
{
PrependThenFlattenNonEmpties(Nil, M)
}
ghost function PrependThenFlattenNonEmpties(prefix: Stream, M: Stream<Stream>): Stream
requires StreamOfNonEmpties(M);
{
match prefix
case Cons(hd, tl) =>
Cons(hd, PrependThenFlattenNonEmpties(tl, M))
case Nil =>
match M
case Nil => Nil
case Cons(s, N) => Cons(s.head, PrependThenFlattenNonEmpties(s.tail, N))
}
// We can prove a theorem that links the previous two variations of flatten. To
// do that, we first define a function that prepends an element to each stream
// of a given stream of streams.
ghost function Prepend<T>(x: T, M: Stream<Stream>): Stream<Stream>
{
match M
case Nil => Nil
case Cons(s, N) => Cons(Cons(x, s), Prepend(x, N))
}
greatest lemma Prepend_Lemma<T>(x: T, M: Stream<Stream>)
ensures StreamOfNonEmpties(Prepend(x, M));
{
match M {
case Nil =>
case Cons(s, N) => Prepend_Lemma(x, N);
}
}
lemma Theorem_Flatten<T>(M: Stream<Stream>, startMarker: T)
ensures
StreamOfNonEmpties(Prepend(startMarker, M)) ==> // always holds, on account of Prepend_Lemma;
// but until (co-)method can be called from functions,
// this condition is used as an antecedent here
FlattenStartMarker(M, startMarker) == FlattenNonEmpties(Prepend(startMarker, M));
{
Prepend_Lemma(startMarker, M);
Lemma_Flatten(Nil, M, startMarker);
}
greatest lemma Lemma_Flatten<T>(prefix: Stream, M: Stream<Stream>, startMarker: T)
ensures
StreamOfNonEmpties(Prepend(startMarker, M)) ==> // always holds, on account of Prepend_Lemma;
// but until (co-)method can be called from functions,
// this condition is used as an antecedent here
PrependThenFlattenStartMarker(prefix, M, startMarker) == PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{
Prepend_Lemma(startMarker, M);
match (prefix) {
case Cons(hd, tl) =>
Lemma_Flatten(tl, M, startMarker);
case Nil =>
match (M) {
case Nil =>
case Cons(s, N) =>
if (*) {
// This is all that's needed for the proof
Lemma_Flatten(s, N, startMarker);
} else {
// ...but here are some calculations that try to show more of what's going on
// (It would be nice to have ==#[...] available as an operator in calculations.)
// massage the LHS:
calc {
PrependThenFlattenStartMarker(prefix, M, startMarker);
== // def. PrependThenFlattenStartMarker
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker));
}
// massage the RHS:
calc {
PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
== // M == Cons(s, N)
PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, Cons(s, N)));
== // def. Prepend
PrependThenFlattenNonEmpties(prefix, Cons(Cons(startMarker, s), Prepend(startMarker, N)));
== // def. PrependThenFlattenNonEmpties
Cons(Cons(startMarker, s).head, PrependThenFlattenNonEmpties(Cons(startMarker, s).tail, Prepend(startMarker, N)));
== // Cons, head, tail
Cons(startMarker, PrependThenFlattenNonEmpties(s, Prepend(startMarker, N)));
}
// all together now:
calc {
PrependThenFlattenStartMarker(prefix, M, startMarker) ==#[_k] PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{ // by the calculation above, we have:
assert PrependThenFlattenStartMarker(prefix, M, startMarker) == Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker)); }
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker)) ==#[_k] PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{ // and by the other calculation above, we have:
assert PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M)) == Cons(startMarker, PrependThenFlattenNonEmpties(s, Prepend(startMarker, N))); }
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker)) ==#[_k] Cons(startMarker, PrependThenFlattenNonEmpties(s, Prepend(startMarker, N)));
== // def. of ==#[_k] for _k != 0
startMarker == startMarker &&
PrependThenFlattenStartMarker(s, N, startMarker) ==#[_k-1] PrependThenFlattenNonEmpties(s, Prepend(startMarker, N));
{ Lemma_Flatten(s, N, startMarker);
// the postcondition of the call we just made (which invokes the co-induction hypothesis) is:
assert PrependThenFlattenStartMarker(s, N, startMarker) ==#[_k-1] PrependThenFlattenNonEmpties(s, Prepend(startMarker, N));
}
true;
}
}
}
}
}
greatest lemma Lemma_FlattenAppend0<T>(s: Stream, M: Stream<Stream>, startMarker: T)
ensures PrependThenFlattenStartMarker(s, M, startMarker) == append(s, PrependThenFlattenStartMarker(Nil, M, startMarker));
{
match (s) {
case Nil =>
case Cons(hd, tl) =>
Lemma_FlattenAppend0(tl, M, startMarker);
}
}
greatest lemma Lemma_FlattenAppend1<T>(s: Stream, M: Stream<Stream>)
requires StreamOfNonEmpties(M);
ensures PrependThenFlattenNonEmpties(s, M) == append(s, PrependThenFlattenNonEmpties(Nil, M));
{
match (s) {
case Nil =>
case Cons(hd, tl) =>
Lemma_FlattenAppend1(tl, M);
}
}
| // RUN: %testDafnyForEachResolver "%s" -- --warn-deprecation:false
// ----- Stream
codatatype Stream<T> = Nil | Cons(head: T, tail: Stream)
ghost function append(M: Stream, N: Stream): Stream
{
match M
case Nil => N
case Cons(t, M') => Cons(t, append(M', N))
}
// ----- f, g, and maps
type X
ghost function f(x: X): X
ghost function g(x: X): X
ghost function map_f(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(f(x), map_f(N))
}
ghost function map_g(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(g(x), map_g(N))
}
ghost function map_fg(M: Stream<X>): Stream<X>
{
match M
case Nil => Nil
case Cons(x, N) => Cons(f(g(x)), map_fg(N))
}
// ----- Theorems
// map (f * g) M = map f (map g M)
greatest lemma Theorem0(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem0(N);
}
}
greatest lemma Theorem0_Alt(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
if (M.Cons?) {
Theorem0_Alt(M.tail);
}
}
lemma Theorem0_Par(M: Stream<X>)
ensures map_fg(M) == map_f(map_g(M));
{
forall k: nat {
Theorem0_Ind(k, M);
}
}
lemma Theorem0_Ind(k: nat, M: Stream<X>)
ensures map_fg(M) ==#[k] map_f(map_g(M));
{
if (k != 0) {
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem0_Ind(k-1, N);
}
}
}
lemma Theorem0_AutoInd(k: nat, M: Stream<X>)
ensures map_fg(M) ==#[k] map_f(map_g(M));
{
}
// map f (append M N) = append (map f M) (map f N)
greatest lemma Theorem1(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
match (M) {
case Nil =>
case Cons(x, M') =>
Theorem1(M', N);
}
}
greatest lemma Theorem1_Alt(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
if (M.Cons?) {
Theorem1_Alt(M.tail, N);
}
}
lemma Theorem1_Par(M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) == append(map_f(M), map_f(N));
{
forall k: nat {
Theorem1_Ind(k, M, N);
}
}
lemma Theorem1_Ind(k: nat, M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N));
{
// this time, try doing the 'if' inside the 'match' (instead of the other way around)
match (M) {
case Nil =>
case Cons(x, M') =>
if (k != 0) {
Theorem1_Ind(k-1, M', N);
}
}
}
lemma Theorem1_AutoInd(k: nat, M: Stream<X>, N: Stream<X>)
ensures map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N));
{
}
lemma Theorem1_AutoForall()
{
// assert forall k: nat, M, N :: map_f(append(M, N)) ==#[k] append(map_f(M), map_f(N)); // TODO: this is not working yet, apparently
}
// append NIL M = M
lemma Theorem2(M: Stream<X>)
ensures append(Nil, M) == M;
{
// trivial
}
// append M NIL = M
greatest lemma Theorem3(M: Stream<X>)
ensures append(M, Nil) == M;
{
match (M) {
case Nil =>
case Cons(x, N) =>
Theorem3(N);
}
}
greatest lemma Theorem3_Alt(M: Stream<X>)
ensures append(M, Nil) == M;
{
if (M.Cons?) {
Theorem3_Alt(M.tail);
}
}
// append M (append N P) = append (append M N) P
greatest lemma Theorem4(M: Stream<X>, N: Stream<X>, P: Stream<X>)
ensures append(M, append(N, P)) == append(append(M, N), P);
{
match (M) {
case Nil =>
case Cons(x, M') =>
Theorem4(M', N, P);
}
}
greatest lemma Theorem4_Alt(M: Stream<X>, N: Stream<X>, P: Stream<X>)
ensures append(M, append(N, P)) == append(append(M, N), P);
{
if (M.Cons?) {
Theorem4_Alt(M.tail, N, P);
}
}
// ----- Flatten
// Flatten can't be written as just:
//
// function SimpleFlatten(M: Stream<Stream>): Stream
// {
// match M
// case Nil => Nil
// case Cons(s, N) => append(s, SimpleFlatten(N))
// }
//
// because this function fails to be productive given an infinite stream of Nil's.
// Instead, here are two variations of SimpleFlatten. The first variation (FlattenStartMarker)
// prepends a "startMarker" to each of the streams in "M". The other (FlattenNonEmpties)
// insists that "M" contain no empty streams. One can prove a theorem that relates these
// two versions.
// This first variation of Flatten returns a stream of the streams in M, each preceded with
// "startMarker".
ghost function FlattenStartMarker<T>(M: Stream<Stream>, startMarker: T): Stream
{
PrependThenFlattenStartMarker(Nil, M, startMarker)
}
ghost function PrependThenFlattenStartMarker<T>(prefix: Stream, M: Stream<Stream>, startMarker: T): Stream
{
match prefix
case Cons(hd, tl) =>
Cons(hd, PrependThenFlattenStartMarker(tl, M, startMarker))
case Nil =>
match M
case Nil => Nil
case Cons(s, N) => Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker))
}
// The next variation of Flatten requires M to contain no empty streams.
greatest predicate StreamOfNonEmpties(M: Stream<Stream>)
{
match M
case Nil => true
case Cons(s, N) => s.Cons? && StreamOfNonEmpties(N)
}
ghost function FlattenNonEmpties(M: Stream<Stream>): Stream
requires StreamOfNonEmpties(M);
{
PrependThenFlattenNonEmpties(Nil, M)
}
ghost function PrependThenFlattenNonEmpties(prefix: Stream, M: Stream<Stream>): Stream
requires StreamOfNonEmpties(M);
{
match prefix
case Cons(hd, tl) =>
Cons(hd, PrependThenFlattenNonEmpties(tl, M))
case Nil =>
match M
case Nil => Nil
case Cons(s, N) => Cons(s.head, PrependThenFlattenNonEmpties(s.tail, N))
}
// We can prove a theorem that links the previous two variations of flatten. To
// do that, we first define a function that prepends an element to each stream
// of a given stream of streams.
ghost function Prepend<T>(x: T, M: Stream<Stream>): Stream<Stream>
{
match M
case Nil => Nil
case Cons(s, N) => Cons(Cons(x, s), Prepend(x, N))
}
greatest lemma Prepend_Lemma<T>(x: T, M: Stream<Stream>)
ensures StreamOfNonEmpties(Prepend(x, M));
{
match M {
case Nil =>
case Cons(s, N) => Prepend_Lemma(x, N);
}
}
lemma Theorem_Flatten<T>(M: Stream<Stream>, startMarker: T)
ensures
StreamOfNonEmpties(Prepend(startMarker, M)) ==> // always holds, on account of Prepend_Lemma;
// but until (co-)method can be called from functions,
// this condition is used as an antecedent here
FlattenStartMarker(M, startMarker) == FlattenNonEmpties(Prepend(startMarker, M));
{
Prepend_Lemma(startMarker, M);
Lemma_Flatten(Nil, M, startMarker);
}
greatest lemma Lemma_Flatten<T>(prefix: Stream, M: Stream<Stream>, startMarker: T)
ensures
StreamOfNonEmpties(Prepend(startMarker, M)) ==> // always holds, on account of Prepend_Lemma;
// but until (co-)method can be called from functions,
// this condition is used as an antecedent here
PrependThenFlattenStartMarker(prefix, M, startMarker) == PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{
Prepend_Lemma(startMarker, M);
match (prefix) {
case Cons(hd, tl) =>
Lemma_Flatten(tl, M, startMarker);
case Nil =>
match (M) {
case Nil =>
case Cons(s, N) =>
if (*) {
// This is all that's needed for the proof
Lemma_Flatten(s, N, startMarker);
} else {
// ...but here are some calculations that try to show more of what's going on
// (It would be nice to have ==#[...] available as an operator in calculations.)
// massage the LHS:
calc {
PrependThenFlattenStartMarker(prefix, M, startMarker);
== // def. PrependThenFlattenStartMarker
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker));
}
// massage the RHS:
calc {
PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
== // M == Cons(s, N)
PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, Cons(s, N)));
== // def. Prepend
PrependThenFlattenNonEmpties(prefix, Cons(Cons(startMarker, s), Prepend(startMarker, N)));
== // def. PrependThenFlattenNonEmpties
Cons(Cons(startMarker, s).head, PrependThenFlattenNonEmpties(Cons(startMarker, s).tail, Prepend(startMarker, N)));
== // Cons, head, tail
Cons(startMarker, PrependThenFlattenNonEmpties(s, Prepend(startMarker, N)));
}
// all together now:
calc {
PrependThenFlattenStartMarker(prefix, M, startMarker) ==#[_k] PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{ // by the calculation above, we have:
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker)) ==#[_k] PrependThenFlattenNonEmpties(prefix, Prepend(startMarker, M));
{ // and by the other calculation above, we have:
Cons(startMarker, PrependThenFlattenStartMarker(s, N, startMarker)) ==#[_k] Cons(startMarker, PrependThenFlattenNonEmpties(s, Prepend(startMarker, N)));
== // def. of ==#[_k] for _k != 0
startMarker == startMarker &&
PrependThenFlattenStartMarker(s, N, startMarker) ==#[_k-1] PrependThenFlattenNonEmpties(s, Prepend(startMarker, N));
{ Lemma_Flatten(s, N, startMarker);
// the postcondition of the call we just made (which invokes the co-induction hypothesis) is:
}
true;
}
}
}
}
}
greatest lemma Lemma_FlattenAppend0<T>(s: Stream, M: Stream<Stream>, startMarker: T)
ensures PrependThenFlattenStartMarker(s, M, startMarker) == append(s, PrependThenFlattenStartMarker(Nil, M, startMarker));
{
match (s) {
case Nil =>
case Cons(hd, tl) =>
Lemma_FlattenAppend0(tl, M, startMarker);
}
}
greatest lemma Lemma_FlattenAppend1<T>(s: Stream, M: Stream<Stream>)
requires StreamOfNonEmpties(M);
ensures PrependThenFlattenNonEmpties(s, M) == append(s, PrependThenFlattenNonEmpties(Nil, M));
{
match (s) {
case Nil =>
case Cons(hd, tl) =>
Lemma_FlattenAppend1(tl, M);
}
}
|
320 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny4_ACL2-extractor.dfy | // RUN: %dafny /compile:0 /deprecation:0 /proverOpt:O:smt.qi.eager_threshold=30 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// This is the Extractor Problem from section 11.8 of the ACL2 book,
// "Computer-Aided Reasoning: An Approach" by Kaufmann, Manolios, and
// Moore (2011 edition).
datatype List<T> = Nil | Cons(head: T, tail: List)
ghost function length(xs: List): nat
{
match xs
case Nil => 0
case Cons(_, rest) => 1 + length(rest)
}
// If "0 <= n < length(xs)", then return the element of "xs" that is preceded by
// "n" elements; otherwise, return an arbitrary value.
ghost opaque function nth<T(00)>(n: int, xs: List<T>): T
{
if 0 <= n < length(xs) then
nthWorker(n, xs)
else
var t :| true; t
}
ghost function nthWorker<T>(n: int, xs: List<T>): T
requires 0 <= n < length(xs);
{
if n == 0 then xs.head else nthWorker(n-1, xs.tail)
}
ghost function append(xs: List, ys: List): List
{
match xs
case Nil => ys
case Cons(x, rest) => Cons(x, append(rest, ys))
}
ghost function rev(xs: List): List
{
match xs
case Nil => Nil
case Cons(x, rest) => append(rev(rest), Cons(x, Nil))
}
ghost function nats(n: nat): List<int>
{
if n == 0 then Nil else Cons(n-1, nats(n-1))
}
ghost function xtr<T(00)>(mp: List<int>, lst: List): List
{
match mp
case Nil => Nil
case Cons(n, rest) => Cons(nth(n, lst), xtr(rest, lst))
}
lemma ExtractorTheorem<T(00)>(xs: List)
ensures xtr(nats(length(xs)), xs) == rev(xs);
{
var a, b := xtr(nats(length(xs)), xs), rev(xs);
calc {
length(a);
{ XtrLength(nats(length(xs)), xs); }
length(nats(length(xs)));
{ NatsLength(length(xs)); }
length(xs);
}
calc {
length(xs);
{ RevLength(xs); }
length(b);
}
forall i | 0 <= i < length(xs)
ensures nth(i, a) == nth(i, b);
{
reveal nth();
ExtractorLemma(i, xs);
}
EqualElementsMakeEqualLists(a, b);
}
// auxiliary lemmas and proofs follow
// lemmas about length
lemma XtrLength(mp: List<int>, lst: List)
ensures length(xtr(mp, lst)) == length(mp);
{
}
lemma NatsLength(n: nat)
ensures length(nats(n)) == n;
{
}
lemma AppendLength(xs: List, ys: List)
ensures length(append(xs, ys)) == length(xs) + length(ys);
{
}
lemma RevLength(xs: List)
ensures length(rev(xs)) == length(xs);
{
match xs {
case Nil =>
case Cons(x, rest) =>
calc {
length(append(rev(rest), Cons(x, Nil)));
{ AppendLength(rev(rest), Cons(x, Nil)); }
length(rev(rest)) + length(Cons(x, Nil));
length(rev(rest)) + 1;
}
}
}
// you can prove two lists equal by proving their elements equal
lemma EqualElementsMakeEqualLists<T(00)>(xs: List, ys: List)
requires length(xs) == length(ys)
requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys)
ensures xs == ys
{
reveal nth();
match xs {
case Nil =>
case Cons(x, rest) =>
assert nth(0, xs) == nth(0, ys);
forall i | 0 <= i < length(xs.tail)
{
calc {
nth(i, xs.tail) == nth(i, ys.tail);
nth(i+1, Cons(xs.head, xs.tail)) == nth(i+1, Cons(ys.head, ys.tail));
nth(i+1, xs) == nth(i+1, ys);
}
}
// EqualElementsMakeEqualLists(xs.tail, ys.tail);
}
}
// here is the theorem, but applied to the ith element
lemma {:vcs_split_on_every_assert} ExtractorLemma<T(00)>(i: int, xs: List)
requires 0 <= i < length(xs);
ensures nth(i, xtr(nats(length(xs)), xs)) == nth(i, rev(xs));
{
calc {
nth(i, xtr(nats(length(xs)), xs));
{ NatsLength(length(xs));
NthXtr(i, nats(length(xs)), xs); }
nth(nth(i, nats(length(xs))), xs);
{ NthNats(i, length(xs)); }
nth(length(xs) - 1 - i, xs);
{ reveal nth(); RevLength(xs); NthRev(i, xs); }
nth(i, rev(xs));
}
}
// lemmas about what nth gives on certain lists
lemma NthXtr<T(00)>(i: int, mp: List<int>, lst: List<T>)
requires 0 <= i < length(mp);
ensures nth(i, xtr(mp, lst)) == nth(nth(i, mp), lst);
{
reveal nth();
XtrLength(mp, lst);
assert nth(i, xtr(mp, lst)) == nthWorker(i, xtr(mp, lst));
if i == 0 {
} else {
calc {
nth(i-1, xtr(mp, lst).tail);
// def. xtr
nth(i-1, xtr(mp.tail, lst));
{ NthXtr(i-1, mp.tail, lst); }
nth(nth(i-1, mp.tail), lst);
}
}
}
lemma NthNats(i: int, n: nat)
requires 0 <= i < n;
ensures nth(i, nats(n)) == n - 1 - i;
{
reveal nth();
NatsLength(n);
NthNatsWorker(i, n);
}
lemma NthNatsWorker(i: int, n: nat)
requires 0 <= i < n && length(nats(n)) == n;
ensures nthWorker(i, nats(n)) == n - 1 - i;
{
}
lemma NthRev<T(00)>(i: int, xs: List)
requires 0 <= i < length(xs) == length(rev(xs));
ensures nthWorker(i, rev(xs)) == nthWorker(length(xs) - 1 - i, xs);
{
reveal nth();
assert xs.Cons?;
assert 1 <= length(rev(xs)) && rev(xs).Cons?;
RevLength(xs.tail);
if i < length(rev(xs.tail)) {
calc {
nth(i, rev(xs));
nthWorker(i, rev(xs));
// def. rev
nthWorker(i, append(rev(xs.tail), Cons(xs.head, Nil)));
{ NthAppendA(i, rev(xs.tail), Cons(xs.head, Nil)); }
nthWorker(i, rev(xs.tail));
{ NthRev(i, xs.tail); } // induction hypothesis
nthWorker(length(xs.tail) - 1 - i, xs.tail);
// def. nthWorker
nthWorker(length(xs.tail) - 1 - i + 1, xs);
nthWorker(length(xs) - 1 - i, xs);
}
} else {
assert i == length(rev(xs.tail));
calc {
nth(i, rev(xs));
nthWorker(i, rev(xs));
// def. rev
nthWorker(i, append(rev(xs.tail), Cons(xs.head, Nil)));
{ NthAppendB(i, rev(xs.tail), Cons(xs.head, Nil)); }
nthWorker(i - length(rev(xs.tail)), Cons(xs.head, Nil));
nthWorker(0, Cons(xs.head, Nil));
nthWorker(0, xs);
nthWorker(length(xs) - 1 - length(xs.tail), xs);
{ RevLength(xs.tail); }
nthWorker(length(xs) - 1 - length(rev(xs.tail)), xs);
nth(length(xs) - 1 - length(rev(xs.tail)), xs);
nth(length(xs) - 1 - i, xs);
}
}
}
lemma NthAppendA<T(00)>(i: int, xs: List, ys: List)
requires 0 <= i < length(xs);
ensures nth(i, append(xs, ys)) == nth(i, xs);
{
reveal nth();
if i == 0 {
calc {
nth(0, append(xs, ys));
nth(0, Cons(xs.head, append(xs.tail, ys)));
xs.head;
}
} else {
calc {
nth(i, append(xs, ys));
nth(i, Cons(xs.head, append(xs.tail, ys)));
nth(i-1, append(xs.tail, ys));
{ NthAppendA(i-1, xs.tail, ys); }
nth(i-1, xs.tail);
}
}
}
lemma NthAppendB<T(00)>(i: int, xs: List, ys: List)
requires length(xs) <= i < length(xs) + length(ys);
ensures nth(i, append(xs, ys)) == nth(i - length(xs), ys);
{
reveal nth();
AppendLength(xs, ys);
match xs {
case Nil =>
assert nth(i, append(xs, ys)) == nth(i, ys);
case Cons(x, rest) =>
calc {
nth(i, append(xs, ys));
nth(i, append(Cons(x, rest), ys));
// def. append
nth(i, Cons(x, append(rest, ys)));
nth(i-1, append(rest, ys));
{ NthAppendB(i-1, rest, ys); }
nth(i-1 - length(rest), ys);
}
}
}
| // RUN: %dafny /compile:0 /deprecation:0 /proverOpt:O:smt.qi.eager_threshold=30 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// This is the Extractor Problem from section 11.8 of the ACL2 book,
// "Computer-Aided Reasoning: An Approach" by Kaufmann, Manolios, and
// Moore (2011 edition).
datatype List<T> = Nil | Cons(head: T, tail: List)
ghost function length(xs: List): nat
{
match xs
case Nil => 0
case Cons(_, rest) => 1 + length(rest)
}
// If "0 <= n < length(xs)", then return the element of "xs" that is preceded by
// "n" elements; otherwise, return an arbitrary value.
ghost opaque function nth<T(00)>(n: int, xs: List<T>): T
{
if 0 <= n < length(xs) then
nthWorker(n, xs)
else
var t :| true; t
}
ghost function nthWorker<T>(n: int, xs: List<T>): T
requires 0 <= n < length(xs);
{
if n == 0 then xs.head else nthWorker(n-1, xs.tail)
}
ghost function append(xs: List, ys: List): List
{
match xs
case Nil => ys
case Cons(x, rest) => Cons(x, append(rest, ys))
}
ghost function rev(xs: List): List
{
match xs
case Nil => Nil
case Cons(x, rest) => append(rev(rest), Cons(x, Nil))
}
ghost function nats(n: nat): List<int>
{
if n == 0 then Nil else Cons(n-1, nats(n-1))
}
ghost function xtr<T(00)>(mp: List<int>, lst: List): List
{
match mp
case Nil => Nil
case Cons(n, rest) => Cons(nth(n, lst), xtr(rest, lst))
}
lemma ExtractorTheorem<T(00)>(xs: List)
ensures xtr(nats(length(xs)), xs) == rev(xs);
{
var a, b := xtr(nats(length(xs)), xs), rev(xs);
calc {
length(a);
{ XtrLength(nats(length(xs)), xs); }
length(nats(length(xs)));
{ NatsLength(length(xs)); }
length(xs);
}
calc {
length(xs);
{ RevLength(xs); }
length(b);
}
forall i | 0 <= i < length(xs)
ensures nth(i, a) == nth(i, b);
{
reveal nth();
ExtractorLemma(i, xs);
}
EqualElementsMakeEqualLists(a, b);
}
// auxiliary lemmas and proofs follow
// lemmas about length
lemma XtrLength(mp: List<int>, lst: List)
ensures length(xtr(mp, lst)) == length(mp);
{
}
lemma NatsLength(n: nat)
ensures length(nats(n)) == n;
{
}
lemma AppendLength(xs: List, ys: List)
ensures length(append(xs, ys)) == length(xs) + length(ys);
{
}
lemma RevLength(xs: List)
ensures length(rev(xs)) == length(xs);
{
match xs {
case Nil =>
case Cons(x, rest) =>
calc {
length(append(rev(rest), Cons(x, Nil)));
{ AppendLength(rev(rest), Cons(x, Nil)); }
length(rev(rest)) + length(Cons(x, Nil));
length(rev(rest)) + 1;
}
}
}
// you can prove two lists equal by proving their elements equal
lemma EqualElementsMakeEqualLists<T(00)>(xs: List, ys: List)
requires length(xs) == length(ys)
requires forall i :: 0 <= i < length(xs) ==> nth(i, xs) == nth(i, ys)
ensures xs == ys
{
reveal nth();
match xs {
case Nil =>
case Cons(x, rest) =>
forall i | 0 <= i < length(xs.tail)
{
calc {
nth(i, xs.tail) == nth(i, ys.tail);
nth(i+1, Cons(xs.head, xs.tail)) == nth(i+1, Cons(ys.head, ys.tail));
nth(i+1, xs) == nth(i+1, ys);
}
}
// EqualElementsMakeEqualLists(xs.tail, ys.tail);
}
}
// here is the theorem, but applied to the ith element
lemma {:vcs_split_on_every_assert} ExtractorLemma<T(00)>(i: int, xs: List)
requires 0 <= i < length(xs);
ensures nth(i, xtr(nats(length(xs)), xs)) == nth(i, rev(xs));
{
calc {
nth(i, xtr(nats(length(xs)), xs));
{ NatsLength(length(xs));
NthXtr(i, nats(length(xs)), xs); }
nth(nth(i, nats(length(xs))), xs);
{ NthNats(i, length(xs)); }
nth(length(xs) - 1 - i, xs);
{ reveal nth(); RevLength(xs); NthRev(i, xs); }
nth(i, rev(xs));
}
}
// lemmas about what nth gives on certain lists
lemma NthXtr<T(00)>(i: int, mp: List<int>, lst: List<T>)
requires 0 <= i < length(mp);
ensures nth(i, xtr(mp, lst)) == nth(nth(i, mp), lst);
{
reveal nth();
XtrLength(mp, lst);
if i == 0 {
} else {
calc {
nth(i-1, xtr(mp, lst).tail);
// def. xtr
nth(i-1, xtr(mp.tail, lst));
{ NthXtr(i-1, mp.tail, lst); }
nth(nth(i-1, mp.tail), lst);
}
}
}
lemma NthNats(i: int, n: nat)
requires 0 <= i < n;
ensures nth(i, nats(n)) == n - 1 - i;
{
reveal nth();
NatsLength(n);
NthNatsWorker(i, n);
}
lemma NthNatsWorker(i: int, n: nat)
requires 0 <= i < n && length(nats(n)) == n;
ensures nthWorker(i, nats(n)) == n - 1 - i;
{
}
lemma NthRev<T(00)>(i: int, xs: List)
requires 0 <= i < length(xs) == length(rev(xs));
ensures nthWorker(i, rev(xs)) == nthWorker(length(xs) - 1 - i, xs);
{
reveal nth();
RevLength(xs.tail);
if i < length(rev(xs.tail)) {
calc {
nth(i, rev(xs));
nthWorker(i, rev(xs));
// def. rev
nthWorker(i, append(rev(xs.tail), Cons(xs.head, Nil)));
{ NthAppendA(i, rev(xs.tail), Cons(xs.head, Nil)); }
nthWorker(i, rev(xs.tail));
{ NthRev(i, xs.tail); } // induction hypothesis
nthWorker(length(xs.tail) - 1 - i, xs.tail);
// def. nthWorker
nthWorker(length(xs.tail) - 1 - i + 1, xs);
nthWorker(length(xs) - 1 - i, xs);
}
} else {
calc {
nth(i, rev(xs));
nthWorker(i, rev(xs));
// def. rev
nthWorker(i, append(rev(xs.tail), Cons(xs.head, Nil)));
{ NthAppendB(i, rev(xs.tail), Cons(xs.head, Nil)); }
nthWorker(i - length(rev(xs.tail)), Cons(xs.head, Nil));
nthWorker(0, Cons(xs.head, Nil));
nthWorker(0, xs);
nthWorker(length(xs) - 1 - length(xs.tail), xs);
{ RevLength(xs.tail); }
nthWorker(length(xs) - 1 - length(rev(xs.tail)), xs);
nth(length(xs) - 1 - length(rev(xs.tail)), xs);
nth(length(xs) - 1 - i, xs);
}
}
}
lemma NthAppendA<T(00)>(i: int, xs: List, ys: List)
requires 0 <= i < length(xs);
ensures nth(i, append(xs, ys)) == nth(i, xs);
{
reveal nth();
if i == 0 {
calc {
nth(0, append(xs, ys));
nth(0, Cons(xs.head, append(xs.tail, ys)));
xs.head;
}
} else {
calc {
nth(i, append(xs, ys));
nth(i, Cons(xs.head, append(xs.tail, ys)));
nth(i-1, append(xs.tail, ys));
{ NthAppendA(i-1, xs.tail, ys); }
nth(i-1, xs.tail);
}
}
}
lemma NthAppendB<T(00)>(i: int, xs: List, ys: List)
requires length(xs) <= i < length(xs) + length(ys);
ensures nth(i, append(xs, ys)) == nth(i - length(xs), ys);
{
reveal nth();
AppendLength(xs, ys);
match xs {
case Nil =>
case Cons(x, rest) =>
calc {
nth(i, append(xs, ys));
nth(i, append(Cons(x, rest), ys));
// def. append
nth(i, Cons(x, append(rest, ys)));
nth(i-1, append(rest, ys));
{ NthAppendB(i-1, rest, ys); }
nth(i-1 - length(rest), ys);
}
}
}
|
321 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny4_Bug170.dfy | // RUN: %dafny /compile:0 /printTooltips "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
module InductiveThings {
ghost predicate P(x: int)
ghost predicate Q(x: int)
least predicate A(x: int)
{
P(x) || B(x+1)
}
least predicate B(x: int)
{
Q(x) || A(x+1)
}
least lemma AA(x: int) // should be specialized not just for A, but also for B, which is in the same strongly connected component as A in the call graph
requires A(x)
{
if B(x+1) { // this one should be replaced by B#[_k-1](x+1)
BB(x+1);
}
}
least lemma BB(x: int) // should be specialized not just for B, but also for A, which is in the same strongly connected component as B in the call graph
requires B(x)
{
if A(x+1) { // this one should be replaced by A#[_k-1](x+1)
AA(x+1);
}
}
}
module CoThings {
greatest predicate A(x: int)
{
B(x+1)
}
greatest predicate B(x: int)
{
A(x+1)
}
greatest lemma AA(x: int) // should be specialized not just for A, but also for B, which is in the same strongly connected component as A in the call graph
ensures A(x)
{
BB(x+1);
assert B(x+1); // this one should be replaced by B#[_k-1] (which will happen, provided that AA is listed as also being specialized for B)
}
greatest lemma BB(x: int) // should be specialized not just for B, but also for A, which is in the same strongly connected component as B in the call graph
ensures B(x)
{
AA(x+1);
assert A(x+1); // this one should be replaced by A#[_k-1] (which will happen, provided that BB is listed as also being specialized for A)
}
}
module SingleThings {
ghost predicate P(x: int)
least predicate A(x: int)
{
P(x) || A(x+1)
}
least lemma AA(x: int) // should be specialized just for A
requires A(x)
{
if A(x+1) { // this one should be replaced by B#[_k-1](x+1)
AA(x+1);
}
}
}
| // RUN: %dafny /compile:0 /printTooltips "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
module InductiveThings {
ghost predicate P(x: int)
ghost predicate Q(x: int)
least predicate A(x: int)
{
P(x) || B(x+1)
}
least predicate B(x: int)
{
Q(x) || A(x+1)
}
least lemma AA(x: int) // should be specialized not just for A, but also for B, which is in the same strongly connected component as A in the call graph
requires A(x)
{
if B(x+1) { // this one should be replaced by B#[_k-1](x+1)
BB(x+1);
}
}
least lemma BB(x: int) // should be specialized not just for B, but also for A, which is in the same strongly connected component as B in the call graph
requires B(x)
{
if A(x+1) { // this one should be replaced by A#[_k-1](x+1)
AA(x+1);
}
}
}
module CoThings {
greatest predicate A(x: int)
{
B(x+1)
}
greatest predicate B(x: int)
{
A(x+1)
}
greatest lemma AA(x: int) // should be specialized not just for A, but also for B, which is in the same strongly connected component as A in the call graph
ensures A(x)
{
BB(x+1);
}
greatest lemma BB(x: int) // should be specialized not just for B, but also for A, which is in the same strongly connected component as B in the call graph
ensures B(x)
{
AA(x+1);
}
}
module SingleThings {
ghost predicate P(x: int)
least predicate A(x: int)
{
P(x) || A(x+1)
}
least lemma AA(x: int) // should be specialized just for A
requires A(x)
{
if A(x+1) { // this one should be replaced by B#[_k-1](x+1)
AA(x+1);
}
}
}
|
322 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny4_ClassRefinement.dfy | // RUN: %testDafnyForEachCompiler "%s" -- --relax-definite-assignment
abstract module M0 {
class Counter {
ghost var N: int
ghost var Repr: set<object>
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
constructor Init()
ensures N == 0
ensures Valid() && fresh(Repr)
{
Repr := {};
new;
ghost var repr :| {this} <= repr && fresh(repr - {this});
N, Repr := 0, repr;
assume Valid(); // to be verified in refinement module
}
method Inc()
requires Valid()
modifies Repr
ensures N == old(N) + 1
ensures Valid() && fresh(Repr - old(Repr))
{
N := N + 1;
modify Repr - {this};
assume Valid(); // to be verified in refinement module
}
method Get() returns (n: int)
requires Valid()
ensures n == N
{
n :| assume n == N;
}
}
}
module M1 refines M0 {
class Cell {
var data: int
constructor (d: int)
ensures data == d
{ data := d; }
}
class Counter ... {
var c: Cell
var d: Cell
ghost predicate Valid...
{
this in Repr &&
c in Repr &&
d in Repr &&
c != d &&
N == c.data - d.data
}
constructor Init...
{
c := new Cell(0);
d := new Cell(0);
new;
ghost var repr := Repr + {this} + {c,d};
...;
assert ...;
}
method Inc...
{
...;
modify ... {
c.data := c.data + 1;
}
assert ...;
}
method Get...
{
n := c.data - d.data;
}
}
}
method Main() {
var mx := new M1.Counter.Init();
var my := new M1.Counter.Init();
assert mx.N == 0 && my.N == 0;
mx.Inc();
my.Inc();
mx.Inc();
var nx := mx.Get();
var ny := my.Get();
assert nx == 2 && ny == 1;
print nx, " ", ny, "\n";
}
| // RUN: %testDafnyForEachCompiler "%s" -- --relax-definite-assignment
abstract module M0 {
class Counter {
ghost var N: int
ghost var Repr: set<object>
ghost predicate Valid()
reads this, Repr
ensures Valid() ==> this in Repr
constructor Init()
ensures N == 0
ensures Valid() && fresh(Repr)
{
Repr := {};
new;
ghost var repr :| {this} <= repr && fresh(repr - {this});
N, Repr := 0, repr;
assume Valid(); // to be verified in refinement module
}
method Inc()
requires Valid()
modifies Repr
ensures N == old(N) + 1
ensures Valid() && fresh(Repr - old(Repr))
{
N := N + 1;
modify Repr - {this};
assume Valid(); // to be verified in refinement module
}
method Get() returns (n: int)
requires Valid()
ensures n == N
{
n :| assume n == N;
}
}
}
module M1 refines M0 {
class Cell {
var data: int
constructor (d: int)
ensures data == d
{ data := d; }
}
class Counter ... {
var c: Cell
var d: Cell
ghost predicate Valid...
{
this in Repr &&
c in Repr &&
d in Repr &&
c != d &&
N == c.data - d.data
}
constructor Init...
{
c := new Cell(0);
d := new Cell(0);
new;
ghost var repr := Repr + {this} + {c,d};
...;
}
method Inc...
{
...;
modify ... {
c.data := c.data + 1;
}
}
method Get...
{
n := c.data - d.data;
}
}
}
method Main() {
var mx := new M1.Counter.Init();
var my := new M1.Counter.Init();
mx.Inc();
my.Inc();
mx.Inc();
var nx := mx.Get();
var ny := my.Get();
print nx, " ", ny, "\n";
}
|
323 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny4_NipkowKlein-chapter3.dfy | // RUN: %dafny /proverOpt:O:smt.qi.eager_threshold=30 /compile:0 /rprint:"%t.rprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// This file is a Dafny encoding of chapter 3 from "Concrete Semantics: With Isabelle/HOL" by
// Tobias Nipkow and Gerwin Klein.
// ----- lists -----
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
ghost function append(xs: List, ys: List): List
{
match xs
case Nil => ys
case Cons(x, tail) => Cons(x, append(tail, ys))
}
// ----- arithmetic expressions -----
type vname = string // variable names
datatype aexp = N(n: int) | V(vname) | Plus(aexp, aexp) // arithmetic expressions
type val = int
type state = vname -> val
ghost function aval(a: aexp, s: state): val
{
match a
case N(n) => n
case V(x) => s(x)
case Plus(a0, a1) => aval(a0, s) + aval(a1, s)
}
lemma Example0()
{
var y := aval(Plus(N(3), V("x")), x => 0);
// The following line confirms that y is 3. If you don't know what y is, you can use the
// verification debugger to figure it out, like this: Put any value in the assert (for example,
// "assert y == 0;". If you're lucky and picked the right value, the verifier will prove the
// assertion for you. If the verifier says it's unable to prove it, then click on the error
// (in the Dafny IDE), which brings up the verification debugger. There, inspect the value
// of y. This is probably the right value, but due to incompleteness in the verifier, it
// could happen that the value you see is some value that verifier wasn't able to properly
// exclude. Therefore, it's best to now take the value you see in the verification debugger,
// say K, and put that into the assert ("assert y == K;"), to have the verifier confirm that
// K really is the answer.
assert y == 3;
}
// ----- constant folding -----
ghost function asimp_const(a: aexp): aexp
{
match a
case N(n) => a
case V(x) => a
case Plus(a0, a1) =>
var as0, as1 := asimp_const(a0), asimp_const(a1);
if as0.N? && as1.N? then
N(as0.n + as1.n)
else
Plus(as0, as1)
}
lemma AsimpConst(a: aexp, s: state)
ensures aval(asimp_const(a), s) == aval(a, s)
{
// by induction
forall a' | a' < a {
AsimpConst(a', s); // this invokes the induction hypothesis for every a' that is structurally smaller than a
}
/* Here is an alternative proof. In the first two cases, the proof is trivial. The Plus case uses two invocations
of the induction hypothesis.
match a
case N(n) =>
case V(x) =>
case Plus(a0, a1) =>
AsimpConst(a0, s);
AsimpConst(a1, s);
*/
}
// more constant folding
ghost function plus(a0: aexp, a1: aexp): aexp
{
if a0.N? && a1.N? then
N(a0.n + a1.n)
else if a0.N? then
if a0.n == 0 then a1 else Plus(a0, a1)
else if a1.N? then
if a1.n == 0 then a0 else Plus(a0, a1)
else
Plus(a0, a1)
}
lemma AvalPlus(a0: aexp, a1: aexp, s: state)
ensures aval(plus(a0, a1), s) == aval(a0, s) + aval(a1, s)
{
// this proof is done automatically
}
ghost function asimp(a: aexp): aexp
{
match a
case N(n) => a
case V(x) => a
case Plus(a0, a1) => plus(asimp(a0), asimp(a1))
}
lemma AsimpCorrect(a: aexp, s: state)
ensures aval(asimp(a), s) == aval(a, s)
{
// call the induction hypothesis on every value a' that is structurally smaller than a
forall a' | a' < a { AsimpCorrect(a', s); }
}
// The following lemma is not in the Nipkow and Klein book, but it's a fun one to prove.
lemma ASimplInvolutive(a: aexp)
ensures asimp(asimp(a)) == asimp(a)
{
}
// ----- boolean expressions -----
datatype bexp = Bc(v: bool) | Not(bexp) | And(bexp, bexp) | Less(aexp, aexp)
ghost function bval(b: bexp, s: state): bool
{
match b
case Bc(v) => v
case Not(b) => !bval(b, s)
case And(b0, b1) => bval(b0, s) && bval(b1, s)
case Less(a0, a1) => aval(a0, s) < aval(a1, s)
}
// constant folding for booleans
ghost function not(b: bexp): bexp
{
match b
case Bc(b0) => Bc(!b0)
case Not(b0) => b0 // this case is not in the Nipkow and Klein book, but it seems a nice one to include
case And(_, _) => Not(b)
case Less(_, _) => Not(b)
}
ghost function and(b0: bexp, b1: bexp): bexp
{
if b0.Bc? then
if b0.v then b1 else b0
else if b1.Bc? then
if b1.v then b0 else b1
else
And(b0, b1)
}
ghost function less(a0: aexp, a1: aexp): bexp
{
if a0.N? && a1.N? then
Bc(a0.n < a1.n)
else
Less(a0, a1)
}
ghost function bsimp(b: bexp): bexp
{
match b
case Bc(v) => b
case Not(b0) => not(bsimp(b0))
case And(b0, b1) => and(bsimp(b0), bsimp(b1))
case Less(a0, a1) => less(asimp(a0), asimp(a1))
}
lemma BsimpCorrect(b: bexp, s: state)
ensures bval(bsimp(b), s) == bval(b, s)
{
/* Here is one proof, which uses the induction hypothesis any anything smaller than b and also invokes
the lemma AsimpCorrect on every arithmetic expression.
forall b' | b' < b { BsimpCorrect(b', s); }
forall a { AsimpCorrect(a, s); }
Yet another possibility is to mark the lemma with {:induction b} and to use the following line in
the body:
forall a { AsimpCorrect(a, s); }
*/
// Here is another proof, which makes explicit the uses of the induction hypothesis and the other lemma.
match b
case Bc(v) =>
case Not(b0) =>
BsimpCorrect(b0, s);
case And(b0, b1) =>
BsimpCorrect(b0, s); BsimpCorrect(b1, s);
case Less(a0, a1) =>
AsimpCorrect(a0, s); AsimpCorrect(a1, s);
}
// ----- stack machine -----
datatype instr = LOADI(val) | LOAD(vname) | ADD
type stack = List<val>
ghost function exec1(i: instr, s: state, stk: stack): stack
{
match i
case LOADI(n) => Cons(n, stk)
case LOAD(x) => Cons(s(x), stk)
case ADD =>
if stk.Cons? && stk.tail.Cons? then
var Cons(a1, Cons(a0, tail)) := stk;
Cons(a0 + a1, tail)
else // stack underflow
Nil // an alternative would be to return Cons(n, Nil) for an arbitrary value n--that is what Nipkow and Klein do
}
ghost function exec(ii: List<instr>, s: state, stk: stack): stack
{
match ii
case Nil => stk
case Cons(i, rest) => exec(rest, s, exec1(i, s, stk))
}
// ----- compilation -----
ghost function comp(a: aexp): List<instr>
{
match a
case N(n) => Cons(LOADI(n), Nil)
case V(x) => Cons(LOAD(x), Nil)
case Plus(a0, a1) => append(append(comp(a0), comp(a1)), Cons(ADD, Nil))
}
lemma CorrectCompilation(a: aexp, s: state, stk: stack)
ensures exec(comp(a), s, stk) == Cons(aval(a, s), stk)
{
match a
case N(n) =>
case V(x) =>
case Plus(a0, a1) =>
// This proof spells out the proof as a series of equality-preserving steps. Each
// expression in the calculation is terminated by a semi-colon. In some cases, a hint
// for the step is needed. Such hints are given in curly braces.
calc {
exec(comp(a), s, stk);
// definition of comp on Plus
exec(append(append(comp(a0), comp(a1)), Cons(ADD, Nil)), s, stk);
{ ExecAppend(append(comp(a0), comp(a1)), Cons(ADD, Nil), s, stk); }
exec(Cons(ADD, Nil), s, exec(append(comp(a0), comp(a1)), s, stk));
{ ExecAppend(comp(a0), comp(a1), s, stk); }
exec(Cons(ADD, Nil), s, exec(comp(a1), s, exec(comp(a0), s, stk)));
{ CorrectCompilation(a0, s, stk); }
exec(Cons(ADD, Nil), s, exec(comp(a1), s, Cons(aval(a0, s), stk)));
{ CorrectCompilation(a1, s, Cons(aval(a0, s), stk)); }
exec(Cons(ADD, Nil), s, Cons(aval(a1, s), Cons(aval(a0, s), stk)));
// definition of comp on ADD
Cons(aval(a1, s) + aval(a0, s), stk);
// definition of aval on Plus
Cons(aval(a, s), stk);
}
}
lemma ExecAppend(ii0: List<instr>, ii1: List<instr>, s: state, stk: stack)
ensures exec(append(ii0, ii1), s, stk) == exec(ii1, s, exec(ii0, s, stk))
{
// the proof (which is by induction) is done automatically
}
| // RUN: %dafny /proverOpt:O:smt.qi.eager_threshold=30 /compile:0 /rprint:"%t.rprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
// This file is a Dafny encoding of chapter 3 from "Concrete Semantics: With Isabelle/HOL" by
// Tobias Nipkow and Gerwin Klein.
// ----- lists -----
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
ghost function append(xs: List, ys: List): List
{
match xs
case Nil => ys
case Cons(x, tail) => Cons(x, append(tail, ys))
}
// ----- arithmetic expressions -----
type vname = string // variable names
datatype aexp = N(n: int) | V(vname) | Plus(aexp, aexp) // arithmetic expressions
type val = int
type state = vname -> val
ghost function aval(a: aexp, s: state): val
{
match a
case N(n) => n
case V(x) => s(x)
case Plus(a0, a1) => aval(a0, s) + aval(a1, s)
}
lemma Example0()
{
var y := aval(Plus(N(3), V("x")), x => 0);
// The following line confirms that y is 3. If you don't know what y is, you can use the
// verification debugger to figure it out, like this: Put any value in the assert (for example,
// "assert y == 0;". If you're lucky and picked the right value, the verifier will prove the
// assertion for you. If the verifier says it's unable to prove it, then click on the error
// (in the Dafny IDE), which brings up the verification debugger. There, inspect the value
// of y. This is probably the right value, but due to incompleteness in the verifier, it
// could happen that the value you see is some value that verifier wasn't able to properly
// exclude. Therefore, it's best to now take the value you see in the verification debugger,
// say K, and put that into the assert ("assert y == K;"), to have the verifier confirm that
// K really is the answer.
}
// ----- constant folding -----
ghost function asimp_const(a: aexp): aexp
{
match a
case N(n) => a
case V(x) => a
case Plus(a0, a1) =>
var as0, as1 := asimp_const(a0), asimp_const(a1);
if as0.N? && as1.N? then
N(as0.n + as1.n)
else
Plus(as0, as1)
}
lemma AsimpConst(a: aexp, s: state)
ensures aval(asimp_const(a), s) == aval(a, s)
{
// by induction
forall a' | a' < a {
AsimpConst(a', s); // this invokes the induction hypothesis for every a' that is structurally smaller than a
}
/* Here is an alternative proof. In the first two cases, the proof is trivial. The Plus case uses two invocations
of the induction hypothesis.
match a
case N(n) =>
case V(x) =>
case Plus(a0, a1) =>
AsimpConst(a0, s);
AsimpConst(a1, s);
*/
}
// more constant folding
ghost function plus(a0: aexp, a1: aexp): aexp
{
if a0.N? && a1.N? then
N(a0.n + a1.n)
else if a0.N? then
if a0.n == 0 then a1 else Plus(a0, a1)
else if a1.N? then
if a1.n == 0 then a0 else Plus(a0, a1)
else
Plus(a0, a1)
}
lemma AvalPlus(a0: aexp, a1: aexp, s: state)
ensures aval(plus(a0, a1), s) == aval(a0, s) + aval(a1, s)
{
// this proof is done automatically
}
ghost function asimp(a: aexp): aexp
{
match a
case N(n) => a
case V(x) => a
case Plus(a0, a1) => plus(asimp(a0), asimp(a1))
}
lemma AsimpCorrect(a: aexp, s: state)
ensures aval(asimp(a), s) == aval(a, s)
{
// call the induction hypothesis on every value a' that is structurally smaller than a
forall a' | a' < a { AsimpCorrect(a', s); }
}
// The following lemma is not in the Nipkow and Klein book, but it's a fun one to prove.
lemma ASimplInvolutive(a: aexp)
ensures asimp(asimp(a)) == asimp(a)
{
}
// ----- boolean expressions -----
datatype bexp = Bc(v: bool) | Not(bexp) | And(bexp, bexp) | Less(aexp, aexp)
ghost function bval(b: bexp, s: state): bool
{
match b
case Bc(v) => v
case Not(b) => !bval(b, s)
case And(b0, b1) => bval(b0, s) && bval(b1, s)
case Less(a0, a1) => aval(a0, s) < aval(a1, s)
}
// constant folding for booleans
ghost function not(b: bexp): bexp
{
match b
case Bc(b0) => Bc(!b0)
case Not(b0) => b0 // this case is not in the Nipkow and Klein book, but it seems a nice one to include
case And(_, _) => Not(b)
case Less(_, _) => Not(b)
}
ghost function and(b0: bexp, b1: bexp): bexp
{
if b0.Bc? then
if b0.v then b1 else b0
else if b1.Bc? then
if b1.v then b0 else b1
else
And(b0, b1)
}
ghost function less(a0: aexp, a1: aexp): bexp
{
if a0.N? && a1.N? then
Bc(a0.n < a1.n)
else
Less(a0, a1)
}
ghost function bsimp(b: bexp): bexp
{
match b
case Bc(v) => b
case Not(b0) => not(bsimp(b0))
case And(b0, b1) => and(bsimp(b0), bsimp(b1))
case Less(a0, a1) => less(asimp(a0), asimp(a1))
}
lemma BsimpCorrect(b: bexp, s: state)
ensures bval(bsimp(b), s) == bval(b, s)
{
/* Here is one proof, which uses the induction hypothesis any anything smaller than b and also invokes
the lemma AsimpCorrect on every arithmetic expression.
forall b' | b' < b { BsimpCorrect(b', s); }
forall a { AsimpCorrect(a, s); }
Yet another possibility is to mark the lemma with {:induction b} and to use the following line in
the body:
forall a { AsimpCorrect(a, s); }
*/
// Here is another proof, which makes explicit the uses of the induction hypothesis and the other lemma.
match b
case Bc(v) =>
case Not(b0) =>
BsimpCorrect(b0, s);
case And(b0, b1) =>
BsimpCorrect(b0, s); BsimpCorrect(b1, s);
case Less(a0, a1) =>
AsimpCorrect(a0, s); AsimpCorrect(a1, s);
}
// ----- stack machine -----
datatype instr = LOADI(val) | LOAD(vname) | ADD
type stack = List<val>
ghost function exec1(i: instr, s: state, stk: stack): stack
{
match i
case LOADI(n) => Cons(n, stk)
case LOAD(x) => Cons(s(x), stk)
case ADD =>
if stk.Cons? && stk.tail.Cons? then
var Cons(a1, Cons(a0, tail)) := stk;
Cons(a0 + a1, tail)
else // stack underflow
Nil // an alternative would be to return Cons(n, Nil) for an arbitrary value n--that is what Nipkow and Klein do
}
ghost function exec(ii: List<instr>, s: state, stk: stack): stack
{
match ii
case Nil => stk
case Cons(i, rest) => exec(rest, s, exec1(i, s, stk))
}
// ----- compilation -----
ghost function comp(a: aexp): List<instr>
{
match a
case N(n) => Cons(LOADI(n), Nil)
case V(x) => Cons(LOAD(x), Nil)
case Plus(a0, a1) => append(append(comp(a0), comp(a1)), Cons(ADD, Nil))
}
lemma CorrectCompilation(a: aexp, s: state, stk: stack)
ensures exec(comp(a), s, stk) == Cons(aval(a, s), stk)
{
match a
case N(n) =>
case V(x) =>
case Plus(a0, a1) =>
// This proof spells out the proof as a series of equality-preserving steps. Each
// expression in the calculation is terminated by a semi-colon. In some cases, a hint
// for the step is needed. Such hints are given in curly braces.
calc {
exec(comp(a), s, stk);
// definition of comp on Plus
exec(append(append(comp(a0), comp(a1)), Cons(ADD, Nil)), s, stk);
{ ExecAppend(append(comp(a0), comp(a1)), Cons(ADD, Nil), s, stk); }
exec(Cons(ADD, Nil), s, exec(append(comp(a0), comp(a1)), s, stk));
{ ExecAppend(comp(a0), comp(a1), s, stk); }
exec(Cons(ADD, Nil), s, exec(comp(a1), s, exec(comp(a0), s, stk)));
{ CorrectCompilation(a0, s, stk); }
exec(Cons(ADD, Nil), s, exec(comp(a1), s, Cons(aval(a0, s), stk)));
{ CorrectCompilation(a1, s, Cons(aval(a0, s), stk)); }
exec(Cons(ADD, Nil), s, Cons(aval(a1, s), Cons(aval(a0, s), stk)));
// definition of comp on ADD
Cons(aval(a1, s) + aval(a0, s), stk);
// definition of aval on Plus
Cons(aval(a, s), stk);
}
}
lemma ExecAppend(ii0: List<instr>, ii1: List<instr>, s: state, stk: stack)
ensures exec(append(ii0, ii1), s, stk) == exec(ii1, s, exec(ii0, s, stk))
{
// the proof (which is by induction) is done automatically
}
|
324 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from dafny main repo_dafny4_Primes.dfy | // RUN: %testDafnyForEachResolver "%s"
ghost predicate IsPrime(n: int)
{
2 <= n && forall m :: 2 <= m < n ==> n % m != 0 // WISH It would be great to think about the status of modulo as a trigger
}
// The following theorem shows that there is an infinite number of primes
lemma AlwaysMorePrimes(k: int)
ensures exists p :: k <= p && IsPrime(p)
{
var j, s := 0, {};
while true
invariant AllPrimes(s, j)
decreases k - j
{
var p := GetLargerPrime(s, j);
if k <= p { return; }
j, s := p, set x | 2 <= x <= p && IsPrime(x);
}
}
// Here is an alternative formulation of the theorem
lemma NoFiniteSetContainsAllPrimes(s: set<int>)
ensures exists p :: IsPrime(p) && p !in s
{
AlwaysMorePrimes(if s == {} then 0 else PickLargest(s) + 1);
}
// ------------------------- lemmas and auxiliary definitions
ghost predicate AllPrimes(s: set<int>, bound: int)
{
// s contains only primes
(forall x :: x in s ==> IsPrime(x)) &&
// every prime up to "bound" is included in s
(forall p :: IsPrime(p) && p <= bound ==> p in s)
}
lemma GetLargerPrime(s: set<int>, bound: int) returns (p: int)
requires AllPrimes(s, bound)
ensures bound < p && IsPrime(p)
{
var q := product(s);
if exists p :: bound < p <= q && IsPrime(p) {
p :| bound < p <= q && IsPrime(p);
} else {
ProductPlusOneIsPrime(s, q);
p := q+1;
if p <= bound { // by contradction, establish bound < p
assert p in s;
product_property(s);
assert false;
}
}
}
ghost function product(s: set<int>): int
{
if s == {} then 1 else
var a := PickLargest(s); a * product(s - {a})
}
lemma product_property(s: set<int>)
requires forall x :: x in s ==> 1 <= x
ensures 1 <= product(s) && forall x :: x in s ==> x <= product(s)
{
if s != {} {
var a := PickLargest(s);
var s' := s - {a};
assert s == s' + {a};
product_property(s');
MulPos(a, product(s'));
}
}
lemma ProductPlusOneIsPrime(s: set<int>, q: int)
requires AllPrimes(s, q) && q == product(s)
ensures IsPrime(q+1)
{
var p := q+1;
calc {
true;
{ product_property(s); }
2 <= p;
}
forall m | 2 <= m <= q && IsPrime(m)
ensures p % m != 0
{
assert m in s; // because AllPrimes(s, q) && m <= q && IsPrime(m)
RemoveFactor(m, s);
var l := product(s-{m});
assert m*l == q;
MulDivMod(m, l, q, 1);
}
assert IsPrime_Alt(q+1);
AltPrimeDefinition(q+1);
}
// The following lemma is essentially just associativity and commutativity of multiplication.
// To get this proof through, it is necessary to know that if x!=y and y==Pick...(s), then
// also y==Pick...(s - {x}). It is for this reason that we use PickLargest, instead of
// picking an arbitrary element from s.
lemma RemoveFactor(x: int, s: set<int>)
requires x in s
ensures product(s) == x * product(s - {x})
{
var y := PickLargest(s);
if x != y {
calc {
product(s);
y * product(s - {y});
{ RemoveFactor(x, s - {y}); }
y * x * product(s - {y} - {x});
x * y * product(s - {y} - {x});
{ assert s - {y} - {x} == s - {x} - {y}; }
x * y * product(s - {x} - {y});
/* FIXME: This annotation wasn't needed before the introduction
* of auto-triggers. It's not needed if one adds {:no_trigger}
* to the forall y :: y in s ==> y <= x part of PickLargest, but that
* boils down to z3 picking $Box(...) as good trigger
*/
// FIXME: the parens shouldn't be needed around (s - {x})
{ assert y in (s - {x}); }
{ assert y == PickLargest(s - {x}); }
x * product(s - {x});
}
}
}
// This definition is like IsPrime above, except that the quantification is only over primes.
ghost predicate IsPrime_Alt(n: int)
{
2 <= n && forall m :: 2 <= m < n && IsPrime(m) ==> n % m != 0
}
// To show that n is prime, it suffices to prove that it satisfies the alternate definition
lemma AltPrimeDefinition(n: int)
requires IsPrime_Alt(n)
ensures IsPrime(n)
{
forall m | 2 <= m < n
ensures n % m != 0
{
if !IsPrime(m) {
var a, b := Composite(m);
if n % m == 0 { // proof by contradiction
var k := n / m;
calc {
true;
k == n / m;
m * k == n;
a * b * k == n;
==> { MulDivMod(a, b*k, n, 0); }
n % a == 0;
==> // IsPrime_Alt
!(2 <= a < n && IsPrime(a));
{ assert 2 <= a < m < n; }
!IsPrime(a);
false;
}
}
}
}
}
lemma Composite(c: int) returns (a: int, b: int)
requires 2 <= c && !IsPrime(c)
ensures 2 <= a < c && 2 <= b && a * b == c
ensures IsPrime(a)
{
calc {
true;
!IsPrime(c);
!(2 <= c && forall m :: 2 <= m < c ==> c % m != 0);
exists m :: 2 <= m < c && c % m == 0;
}
a :| 2 <= a < c && c % a == 0;
b := c / a;
assert 2 <= a < c && 2 <= b && a * b == c;
if !IsPrime(a) {
var x, y := Composite(a);
a, b := x, y*b;
}
}
ghost function PickLargest(s: set<int>): int
requires s != {}
{
LargestElementExists(s);
var x :| x in s && forall y :: y in s ==> y <= x;
x
}
lemma LargestElementExists(s: set<int>)
requires s != {}
ensures exists x :: x in s && forall y :: y in s ==> y <= x
{
var s' := s;
while true
invariant s' != {} && s' <= s
invariant forall x,y :: x in s' && y in s && y !in s' ==> y <= x
decreases s'
{
var x :| x in s'; // pick something
if forall y :: y in s' ==> y <= x {
// good pick
return;
} else {
// constrain the pick further
var y :| y in s' && x < y;
s' := set z | z in s && x < z;
assert y in s';
}
}
}
lemma MulPos(a: int, b: int)
requires 1 <= a && 1 <= b
ensures a <= a * b
{
if b == 1 {
assert a * b == a;
} else {
assert a * b == a * (b - 1) + a;
MulPos(a, b - 1);
}
}
// This axiom about % is needed. Unfortunately, Z3 seems incapable of proving it.
lemma MulDivMod(a: nat, b: nat, c: nat, j: nat)
requires a * b == c && j < a
ensures (c+j) % a == j
| // RUN: %testDafnyForEachResolver "%s"
ghost predicate IsPrime(n: int)
{
2 <= n && forall m :: 2 <= m < n ==> n % m != 0 // WISH It would be great to think about the status of modulo as a trigger
}
// The following theorem shows that there is an infinite number of primes
lemma AlwaysMorePrimes(k: int)
ensures exists p :: k <= p && IsPrime(p)
{
var j, s := 0, {};
while true
{
var p := GetLargerPrime(s, j);
if k <= p { return; }
j, s := p, set x | 2 <= x <= p && IsPrime(x);
}
}
// Here is an alternative formulation of the theorem
lemma NoFiniteSetContainsAllPrimes(s: set<int>)
ensures exists p :: IsPrime(p) && p !in s
{
AlwaysMorePrimes(if s == {} then 0 else PickLargest(s) + 1);
}
// ------------------------- lemmas and auxiliary definitions
ghost predicate AllPrimes(s: set<int>, bound: int)
{
// s contains only primes
(forall x :: x in s ==> IsPrime(x)) &&
// every prime up to "bound" is included in s
(forall p :: IsPrime(p) && p <= bound ==> p in s)
}
lemma GetLargerPrime(s: set<int>, bound: int) returns (p: int)
requires AllPrimes(s, bound)
ensures bound < p && IsPrime(p)
{
var q := product(s);
if exists p :: bound < p <= q && IsPrime(p) {
p :| bound < p <= q && IsPrime(p);
} else {
ProductPlusOneIsPrime(s, q);
p := q+1;
if p <= bound { // by contradction, establish bound < p
product_property(s);
}
}
}
ghost function product(s: set<int>): int
{
if s == {} then 1 else
var a := PickLargest(s); a * product(s - {a})
}
lemma product_property(s: set<int>)
requires forall x :: x in s ==> 1 <= x
ensures 1 <= product(s) && forall x :: x in s ==> x <= product(s)
{
if s != {} {
var a := PickLargest(s);
var s' := s - {a};
product_property(s');
MulPos(a, product(s'));
}
}
lemma ProductPlusOneIsPrime(s: set<int>, q: int)
requires AllPrimes(s, q) && q == product(s)
ensures IsPrime(q+1)
{
var p := q+1;
calc {
true;
{ product_property(s); }
2 <= p;
}
forall m | 2 <= m <= q && IsPrime(m)
ensures p % m != 0
{
RemoveFactor(m, s);
var l := product(s-{m});
MulDivMod(m, l, q, 1);
}
AltPrimeDefinition(q+1);
}
// The following lemma is essentially just associativity and commutativity of multiplication.
// To get this proof through, it is necessary to know that if x!=y and y==Pick...(s), then
// also y==Pick...(s - {x}). It is for this reason that we use PickLargest, instead of
// picking an arbitrary element from s.
lemma RemoveFactor(x: int, s: set<int>)
requires x in s
ensures product(s) == x * product(s - {x})
{
var y := PickLargest(s);
if x != y {
calc {
product(s);
y * product(s - {y});
{ RemoveFactor(x, s - {y}); }
y * x * product(s - {y} - {x});
x * y * product(s - {y} - {x});
{ assert s - {y} - {x} == s - {x} - {y}; }
x * y * product(s - {x} - {y});
/* FIXME: This annotation wasn't needed before the introduction
* of auto-triggers. It's not needed if one adds {:no_trigger}
* to the forall y :: y in s ==> y <= x part of PickLargest, but that
* boils down to z3 picking $Box(...) as good trigger
*/
// FIXME: the parens shouldn't be needed around (s - {x})
{ assert y in (s - {x}); }
{ assert y == PickLargest(s - {x}); }
x * product(s - {x});
}
}
}
// This definition is like IsPrime above, except that the quantification is only over primes.
ghost predicate IsPrime_Alt(n: int)
{
2 <= n && forall m :: 2 <= m < n && IsPrime(m) ==> n % m != 0
}
// To show that n is prime, it suffices to prove that it satisfies the alternate definition
lemma AltPrimeDefinition(n: int)
requires IsPrime_Alt(n)
ensures IsPrime(n)
{
forall m | 2 <= m < n
ensures n % m != 0
{
if !IsPrime(m) {
var a, b := Composite(m);
if n % m == 0 { // proof by contradiction
var k := n / m;
calc {
true;
k == n / m;
m * k == n;
a * b * k == n;
==> { MulDivMod(a, b*k, n, 0); }
n % a == 0;
==> // IsPrime_Alt
!(2 <= a < n && IsPrime(a));
{ assert 2 <= a < m < n; }
!IsPrime(a);
false;
}
}
}
}
}
lemma Composite(c: int) returns (a: int, b: int)
requires 2 <= c && !IsPrime(c)
ensures 2 <= a < c && 2 <= b && a * b == c
ensures IsPrime(a)
{
calc {
true;
!IsPrime(c);
!(2 <= c && forall m :: 2 <= m < c ==> c % m != 0);
exists m :: 2 <= m < c && c % m == 0;
}
a :| 2 <= a < c && c % a == 0;
b := c / a;
if !IsPrime(a) {
var x, y := Composite(a);
a, b := x, y*b;
}
}
ghost function PickLargest(s: set<int>): int
requires s != {}
{
LargestElementExists(s);
var x :| x in s && forall y :: y in s ==> y <= x;
x
}
lemma LargestElementExists(s: set<int>)
requires s != {}
ensures exists x :: x in s && forall y :: y in s ==> y <= x
{
var s' := s;
while true
{
var x :| x in s'; // pick something
if forall y :: y in s' ==> y <= x {
// good pick
return;
} else {
// constrain the pick further
var y :| y in s' && x < y;
s' := set z | z in s && x < z;
}
}
}
lemma MulPos(a: int, b: int)
requires 1 <= a && 1 <= b
ensures a <= a * b
{
if b == 1 {
} else {
MulPos(a, b - 1);
}
}
// This axiom about % is needed. Unfortunately, Z3 seems incapable of proving it.
lemma MulDivMod(a: nat, b: nat, c: nat, j: nat)
requires a * b == c && j < a
ensures (c+j) % a == j
|
325 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from_dafny_main_repo_dafny0_snapshots_Inputs_Snapshots1.dfy | method M()
{
N();
assert false;
}
method N()
ensures P();
predicate P()
{
false
}
| method M()
{
N();
}
method N()
ensures P();
predicate P()
{
false
}
|
326 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_from_dafny_main_repo_dafny0_snapshots_Inputs_Snapshots5.dfy | method M()
{
N();
if (*)
{
}
else
{
assert (forall b: bool :: b || !b) || 0 != 0;
}
N();
assert (forall b: bool :: b || !b) || 3 != 3;
if (*)
{
}
else
{
assert (forall b: bool :: b || !b) || 1 != 1;
}
}
method N()
ensures (forall b: bool :: b || !b) || 2 != 2;
| method M()
{
N();
if (*)
{
}
else
{
}
N();
if (*)
{
}
else
{
}
}
method N()
ensures (forall b: bool :: b || !b) || 2 != 2;
|
327 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_lightening_verifier.dfy | class CrashableMem<T> {
var mem_ : array<T>;
method read(off : int) returns (r : T)
requires 0 <= off < mem_.Length;
{
return mem_[off];
}
method write(off : int, val : T)
requires 0 <= off < mem_.Length;
modifies mem_;
{
mem_[off] := val;
}
}
datatype GhostState = GS(
num_entry : int,
log : seq<int>,
mem_len : int,
mem : seq<int>,
old_mem : seq<int>,
ideal_mem : seq<int>,
countdown : int,
first_log_pos : map<int, int>
)
datatype GhostOp = WriteMem(off : int, val : int)
| WriteLog(off : int, val : int)
predicate ghost_state_inv(s : GhostState) {
0 <= s.num_entry * 2 < |s.log|
&& |s.log| > 0
&& |s.mem| == s.mem_len && |s.ideal_mem| == s.mem_len && |s.old_mem| == s.mem_len
&& s.countdown >= 0
}
function init_ghost_state(log : seq<int>, mem : seq<int>, countdown : int) : GhostState
requires |log| > 0;
requires countdown >= 0;
ensures ghost_state_inv(init_ghost_state(log, mem, countdown));
{
GS(0, log[..], |mem|, mem[..], mem[..], mem[..], countdown, map[])
}
function mem_write(s : GhostState, off: int, val: int) : GhostState
requires ghost_state_inv(s);
requires 0 <= off < s.mem_len;
ensures ghost_state_inv(mem_write(s, off, val));
{
var new_mem := s.mem[off := val];
var new_ideal_mem := s.ideal_mem[off := val];
s.(mem := new_mem,
ideal_mem := new_ideal_mem)
}
function log_write(s : GhostState, off : int, val: int) : GhostState
requires ghost_state_inv(s);
requires 0 <= off < |s.log|;
ensures ghost_state_inv(log_write(s, off, val));
{
s.(log := s.log[off := val])
}
predicate valid_op(s : GhostState, op : GhostOp)
{
match op
case WriteMem(off, val) => 0 <= off < |s.mem|
case WriteLog(off, val) => 0 <= off < |s.log|
}
function countdown (s : GhostState) : GhostState
{
if s.countdown > 0 then
s.(countdown := s.countdown - 1)
else
s
}
function normal_step (s : GhostState, op : GhostOp) : GhostState
requires valid_op(s, op);
requires ghost_state_inv(s);
ensures ghost_state_inv(normal_step(s, op));
{
match op
case WriteMem(off, val) => mem_write(s, off, val)
case WriteLog(off, val) => log_write(s, off, val)
}
function ghost_step (s : GhostState, op : GhostOp) : (GhostState, bool)
requires valid_op(s, op);
requires ghost_state_inv(s);
ensures ghost_state_inv(normal_step(s, op));
{
if s.countdown > 0 then
var s' := normal_step(s, op);
(s'.(countdown := s.countdown - 1), true)
else
(s, false)
}
function mem_write_step (s : GhostState, off : int, val : int) : (GhostState, bool)
requires 0 <= off < s.mem_len;
requires ghost_state_inv(s);
{
ghost_step(s, WriteMem(off, val))
}
function log_write_step (s : GhostState, off : int, val : int) : (GhostState, bool)
requires 0 <= off < |s.log|;
requires ghost_state_inv(s);
{
ghost_step(s, WriteLog(off, val))
}
function set_num_entry (s : GhostState, n : int) : (GhostState, bool)
requires 0 <= n * 2 < |s.log|;
{
if s.countdown > 0 then
(s.(num_entry := n,
countdown := s.countdown - 1),
true)
else
(s, false)
}
predicate crashed (s : GhostState)
{
s.countdown <= 0
}
predicate old_mem_equiv (s : GhostState)
requires ghost_state_inv(s);
{
(forall o :: !(o in s.first_log_pos) && 0 <= o < |s.mem| ==> s.mem[o] == s.old_mem[o])
}
predicate ghost_tx_inv (s : GhostState)
{
ghost_state_inv(s) &&
(forall o :: o in s.first_log_pos ==> 0 <= o < s.mem_len) &&
(forall o :: o in s.first_log_pos ==> 0 <= s.first_log_pos[o] < s.num_entry) &&
(forall o :: o in s.first_log_pos ==> 0 <= s.first_log_pos[o] * 2 + 1 < |s.log|) &&
(forall o :: o in s.first_log_pos ==> s.log[s.first_log_pos[o] * 2] == o) &&
(forall o :: o in s.first_log_pos ==> s.log[s.first_log_pos[o] * 2 + 1] == s.old_mem[o]) &&
(forall o :: o in s.first_log_pos ==> forall i :: 0 <= i < s.first_log_pos[o] ==> s.log[i * 2] != o) &&
(forall i :: 0 <= i < s.num_entry ==> s.log[i * 2] in s.first_log_pos)
}
function ghost_begin_tx (s : GhostState) : GhostState
requires ghost_state_inv(s);
requires s.num_entry == 0;
ensures ghost_state_inv(ghost_begin_tx(s));
ensures ghost_tx_inv(ghost_begin_tx(s));
ensures old_mem_equiv(ghost_begin_tx(s));
{
var (s', f) := set_num_entry(s, 0);
var s' := s'.(first_log_pos := map[], old_mem := s.mem[..]);
s'
}
function ghost_commit_tx (s : GhostState) : (GhostState, bool)
requires ghost_tx_inv(s);
requires old_mem_equiv(s);
ensures ghost_state_inv(ghost_commit_tx(s).0);
ensures ghost_tx_inv(ghost_commit_tx(s).0);
ensures !ghost_commit_tx(s).1 ==> old_mem_equiv(ghost_commit_tx(s).0);
ensures ghost_commit_tx(s).1 ==> ghost_commit_tx(s).0.num_entry == 0;
{
var s' := s;
var (s', f) := set_num_entry(s', 0);
var s' := if f then s'.(first_log_pos := map[]) else s';
(s', f)
}
function ghost_tx_write (s0 : GhostState, off : int, val : int) : GhostState
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
requires 0 <= off < s0.mem_len;
requires 0 <= s0.num_entry * 2 + 2 < |s0.log|;
ensures ghost_tx_inv(ghost_tx_write(s0, off, val));
ensures old_mem_equiv(ghost_tx_write(s0, off, val));
ensures |ghost_tx_write(s0, off, val).mem| == s0.mem_len;
ensures !crashed(ghost_tx_write(s0, off, val)) ==> ghost_tx_write(s0, off, val).mem[off] == val;
{
var s := s0;
var log_idx := s.num_entry;
var log_off := log_idx * 2;
var old_val := s.mem[off];
var (s, f) := log_write_step(s, log_off, off);
var (s, f) := log_write_step(s, log_off + 1, old_val);
var (s, f) := set_num_entry(s, log_idx + 1);
var s := if f && !(off in s.first_log_pos)
then s.(first_log_pos := s.first_log_pos[off := log_idx])
else s;
var (s, f) := mem_write_step(s, off, val);
s
}
function reverse_recovery (s0 : GhostState, idx : int) : GhostState
decreases idx;
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
requires 0 <= idx <= s0.num_entry;
ensures ghost_tx_inv(reverse_recovery(s0, idx));
ensures old_mem_equiv(reverse_recovery(s0, idx));
ensures s0.old_mem == reverse_recovery(s0, idx).old_mem;
ensures s0.first_log_pos == reverse_recovery(s0, idx).first_log_pos;
ensures forall o :: o in s0.first_log_pos && s0.first_log_pos[o] >= idx ==>
reverse_recovery(s0, idx).mem[o] == s0.mem[o];
ensures forall o :: o in s0.first_log_pos && 0 <= s0.first_log_pos[o] < idx ==>
reverse_recovery(s0, idx).mem[o] == s0.old_mem[o];
{
if idx == 0 then
assert old_mem_equiv(s0);
s0
else
var s := s0;
var i := idx - 1;
var off := s.log[i * 2];
var val := s.log[i * 2 + 1];
var s := s.(mem := s.mem[off := val]);
assert off in s.first_log_pos;
var s := reverse_recovery(s, idx - 1);
assert i == idx - 1;
assert forall o :: o in s.first_log_pos && 0 <= s.first_log_pos[o] < i ==>
s.mem[o] == s.old_mem[o];
assert forall o :: o in s.first_log_pos && s.first_log_pos[o] == i ==>
o == off && val == s.old_mem[o];
assert forall o :: o in s.first_log_pos && s.first_log_pos[o] == i ==>
s.mem[o] == val;
assert old_mem_equiv(s);
s
}
function ghost_recover (s0 : GhostState) : GhostState
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
ensures ghost_recover(s0).mem == s0.old_mem;
ensures ghost_recover(s0).num_entry == 0;
{
var s := reverse_recovery(s0, s0.num_entry);
assert (old_mem_equiv(s));
assert (forall o :: o in s.first_log_pos ==> s.mem[o] == s0.old_mem[o]);
assert forall i :: 0 <= i < |s.mem| ==> s.mem[i] == s0.old_mem[i];
s.(num_entry := 0)
}
class UndoLog {
var log_ : array<int>;
var mem_ : array<int>;
var impl_countdown : int;
ghost var gs : GhostState;
constructor () {}
predicate ghost_state_equiv(gs : GhostState)
reads this;
reads mem_;
reads log_;
{
log_.Length > 0 &&
mem_[..] == gs.mem &&
log_[1..] == gs.log &&
log_[0] == gs.num_entry &&
impl_countdown == gs.countdown
}
predicate state_inv()
reads this;
reads log_;
{
log_.Length > 1 && 0 <= log_[0] && (log_[0] * 2) < log_.Length
&& log_.Length < 0xffffffff && mem_ != log_
&& forall i : int :: 0 <= i < log_[0] ==> 0 <= log_[i * 2 + 1] < mem_.Length
&& impl_countdown >= 0
}
method init(log_size : int, mem_size : int, countdown : int)
requires log_size > 1;
requires mem_size > 0;
requires log_size < 0xffffffff;
modifies this;
ensures fresh(log_);
ensures fresh(mem_);
ensures state_inv();
ensures ghost_state_equiv(gs);
{
log_ := new int[log_size];
mem_ := new int[mem_size];
log_[0] := 0;
impl_countdown := countdown;
gs := GS(0, log_[1..], mem_size, mem_[..], mem_[..], mem_[..], countdown, map[]);
}
method impl_countdown_dec()
modifies this;
requires impl_countdown > 0;
requires mem_ != log_;
ensures mem_ != log_;
ensures impl_countdown == old(impl_countdown) - 1;
ensures impl_countdown >= 0;
ensures gs == old(gs);
ensures log_[..] == old(log_)[..];
ensures mem_[..] == old(mem_)[..];
{
impl_countdown := impl_countdown - 1;
}
method write_mem(off : int, val : int)
modifies this;
modifies mem_;
requires 0 <= off < mem_.Length;
requires mem_ != log_;
requires ghost_state_inv(gs);
requires ghost_state_equiv(gs);
requires 0 <= off < gs.mem_len;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures gs == old(gs);
ensures ghost_state_equiv(mem_write_step(gs, off, val).0);
{
if (impl_countdown > 0) {
mem_[off] := val;
impl_countdown := impl_countdown - 1;
}
}
method write_log(off : int, val : int)
modifies this;
modifies log_;
requires 0 <= off <= |gs.log|;
requires mem_ != log_;
requires ghost_state_inv(gs);
requires ghost_state_equiv(gs);
requires off == 0 ==> 0 <= val * 2 < |gs.log|;
ensures mem_ != log_;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures log_.Length == old(log_).Length;
ensures mem_[..] == old(mem_)[..];
ensures log_[off] == val || log_[off] == old(log_)[off];
ensures forall i :: 0 <= i < log_.Length && i != off ==> log_[i] == old(log_)[i];
ensures gs == old(gs);
ensures off > 0 ==> ghost_state_equiv(log_write_step(gs, off - 1, val).0);
ensures off == 0 ==> ghost_state_equiv(set_num_entry(gs, val).0);
{
if (impl_countdown > 0) {
log_[off] := val;
impl_countdown := impl_countdown - 1;
}
}
method begin_tx()
modifies log_;
modifies this;
requires state_inv();
requires ghost_state_equiv(gs);
requires ghost_state_inv(gs);
requires log_[0] == 0;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures state_inv();
ensures ghost_state_equiv(gs);
ensures ghost_tx_inv(gs);
{
write_log(0, 0);
gs := ghost_begin_tx(gs);
assert state_inv();
}
method commit_tx()
modifies log_;
modifies this;
requires state_inv();
requires ghost_state_equiv(gs);
requires ghost_state_inv(gs);
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures ghost_state_equiv(gs);
ensures state_inv();
{
write_log(0, 0);
gs := ghost_commit_tx(gs).0;
}
method tx_write(offset: int, val : int)
modifies this;
modifies log_;
modifies mem_;
requires state_inv();
requires mem_ != log_;
requires 0 <= offset < mem_.Length;
requires ghost_state_equiv(gs);
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
requires 0 <= log_[0] * 2 + 3 < log_.Length;
ensures ghost_state_equiv(gs);
ensures ghost_tx_inv(gs);
ensures old_mem_equiv(gs);
{
var log_idx := log_[0];
var log_off := log_idx * 2;
ghost var old_gs := gs;
write_log(log_off + 1, offset);
gs := log_write_step(gs, log_off, offset).0;
assert log_off + 1 > 0;
assert ghost_state_equiv(gs);
assert mem_ != log_;
var old_val := mem_[offset];
assert old_val == gs.mem[offset];
write_log(log_off + 2, old_val);
gs := log_write_step(gs, log_off + 1, old_val).0;
assert ghost_tx_inv(gs);
assert log_[0] == gs.num_entry;
assert log_.Length == |gs.log| + 1;
assert 0 <= gs.num_entry * 2 < |gs.log|;
write_log(0, log_idx + 1);
ghost var (s, f) := set_num_entry(gs, log_idx + 1);
s := if f && !(offset in s.first_log_pos)
then s.(first_log_pos := s.first_log_pos[offset := log_idx])
else s;
gs := s;
write_mem(offset, val);
gs := mem_write_step(gs, offset, val).0;
assert gs == ghost_tx_write(old_gs, offset, val);
}
// we assume that recover won't crash (though this code works when recover can fail)
method recover()
modifies log_;
modifies mem_;
modifies this;
requires state_inv();
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
requires ghost_state_equiv(gs);
ensures gs == ghost_recover(old(gs));
ensures ghost_state_equiv(gs);
{
var log_len := log_[0];
assert log_len == gs.num_entry;
if (log_len > 0) {
var i := log_len - 1;
ghost var gs0 := gs;
while i >= 0
modifies mem_;
modifies this;
invariant log_ == old(log_);
invariant mem_ == old(mem_);
invariant unchanged(log_);
invariant -1 <= i < log_len;
invariant |gs.log| == |gs0.log|;
invariant ghost_state_equiv(gs);
invariant ghost_tx_inv(gs);
invariant old_mem_equiv(gs);
invariant reverse_recovery(gs0, log_len) == reverse_recovery(gs, i + 1);
decreases i;
{
assert ghost_state_equiv(gs);
assert 0 <= i < log_[0];
var o := i * 2 + 1;
var off := log_[o];
var val := log_[o + 1];
mem_[off] := val;
assert 0 <= off < mem_.Length;
assert gs.log[i * 2] == off;
assert gs.log[i * 2 + 1] == val;
gs := gs.(mem := gs.mem[off := val]);
i := i - 1;
}
assert ghost_state_equiv(gs);
} else {
assert ghost_state_equiv(gs);
}
log_[0] := 0;
gs := ghost_recover(old(gs));
assert ghost_state_equiv(gs);
}
}
lemma crash_safe_single_tx(init_log : seq<int>, init_mem : seq<int>,
countdown : int,
writes : seq<(int, int)>)
requires |init_log| > 0;
requires countdown >= 0;
requires forall i :: 0 <= i < |writes| ==>
0 <= writes[i].0 < |init_mem|;
requires 0 < |writes| * 2 < |init_log|;
{
var s := init_ghost_state(init_log, init_mem, countdown);
var end_mem := init_mem;
s := ghost_begin_tx(s);
assert s.num_entry == 0;
assert init_mem == s.old_mem;
var i := 0;
while i < |writes|
decreases |writes| - i;
invariant 0 <= i <= |writes|;
invariant s.mem_len == |init_mem|;
invariant s.mem_len == |end_mem|;
invariant 0 <= s.num_entry <= i;
invariant |init_log| == |s.log|;
invariant i * 2 < |s.log|;
invariant 0 <= s.num_entry * 2 < |s.log|;
invariant ghost_tx_inv(s);
invariant old_mem_equiv(s);
invariant init_mem == s.old_mem;
invariant !crashed(s) ==> forall i :: 0 <= i < |s.mem| ==> s.mem[i] == end_mem[i];
{
assert 0 <= i < |writes|;
assert 0 <= writes[i].0 < s.mem_len;
assert 0 <= s.num_entry * 2 + 2 < |s.log|;
s := ghost_tx_write(s, writes[i].0, writes[i].1);
end_mem := end_mem[writes[i].0 := writes[i].1];
assert !crashed(s) ==> s.mem[writes[i].0] == writes[i].1;
i := i + 1;
}
assert ghost_tx_inv(s);
assert old_mem_equiv(s);
var (s', c) := ghost_commit_tx(s);
assert c ==> !crashed(s);
if (c) {
assert !crashed(s);
assert s.mem == end_mem;
} else {
var recovered := ghost_recover(s');
assert recovered.mem == init_mem;
}
}
| class CrashableMem<T> {
var mem_ : array<T>;
method read(off : int) returns (r : T)
requires 0 <= off < mem_.Length;
{
return mem_[off];
}
method write(off : int, val : T)
requires 0 <= off < mem_.Length;
modifies mem_;
{
mem_[off] := val;
}
}
datatype GhostState = GS(
num_entry : int,
log : seq<int>,
mem_len : int,
mem : seq<int>,
old_mem : seq<int>,
ideal_mem : seq<int>,
countdown : int,
first_log_pos : map<int, int>
)
datatype GhostOp = WriteMem(off : int, val : int)
| WriteLog(off : int, val : int)
predicate ghost_state_inv(s : GhostState) {
0 <= s.num_entry * 2 < |s.log|
&& |s.log| > 0
&& |s.mem| == s.mem_len && |s.ideal_mem| == s.mem_len && |s.old_mem| == s.mem_len
&& s.countdown >= 0
}
function init_ghost_state(log : seq<int>, mem : seq<int>, countdown : int) : GhostState
requires |log| > 0;
requires countdown >= 0;
ensures ghost_state_inv(init_ghost_state(log, mem, countdown));
{
GS(0, log[..], |mem|, mem[..], mem[..], mem[..], countdown, map[])
}
function mem_write(s : GhostState, off: int, val: int) : GhostState
requires ghost_state_inv(s);
requires 0 <= off < s.mem_len;
ensures ghost_state_inv(mem_write(s, off, val));
{
var new_mem := s.mem[off := val];
var new_ideal_mem := s.ideal_mem[off := val];
s.(mem := new_mem,
ideal_mem := new_ideal_mem)
}
function log_write(s : GhostState, off : int, val: int) : GhostState
requires ghost_state_inv(s);
requires 0 <= off < |s.log|;
ensures ghost_state_inv(log_write(s, off, val));
{
s.(log := s.log[off := val])
}
predicate valid_op(s : GhostState, op : GhostOp)
{
match op
case WriteMem(off, val) => 0 <= off < |s.mem|
case WriteLog(off, val) => 0 <= off < |s.log|
}
function countdown (s : GhostState) : GhostState
{
if s.countdown > 0 then
s.(countdown := s.countdown - 1)
else
s
}
function normal_step (s : GhostState, op : GhostOp) : GhostState
requires valid_op(s, op);
requires ghost_state_inv(s);
ensures ghost_state_inv(normal_step(s, op));
{
match op
case WriteMem(off, val) => mem_write(s, off, val)
case WriteLog(off, val) => log_write(s, off, val)
}
function ghost_step (s : GhostState, op : GhostOp) : (GhostState, bool)
requires valid_op(s, op);
requires ghost_state_inv(s);
ensures ghost_state_inv(normal_step(s, op));
{
if s.countdown > 0 then
var s' := normal_step(s, op);
(s'.(countdown := s.countdown - 1), true)
else
(s, false)
}
function mem_write_step (s : GhostState, off : int, val : int) : (GhostState, bool)
requires 0 <= off < s.mem_len;
requires ghost_state_inv(s);
{
ghost_step(s, WriteMem(off, val))
}
function log_write_step (s : GhostState, off : int, val : int) : (GhostState, bool)
requires 0 <= off < |s.log|;
requires ghost_state_inv(s);
{
ghost_step(s, WriteLog(off, val))
}
function set_num_entry (s : GhostState, n : int) : (GhostState, bool)
requires 0 <= n * 2 < |s.log|;
{
if s.countdown > 0 then
(s.(num_entry := n,
countdown := s.countdown - 1),
true)
else
(s, false)
}
predicate crashed (s : GhostState)
{
s.countdown <= 0
}
predicate old_mem_equiv (s : GhostState)
requires ghost_state_inv(s);
{
(forall o :: !(o in s.first_log_pos) && 0 <= o < |s.mem| ==> s.mem[o] == s.old_mem[o])
}
predicate ghost_tx_inv (s : GhostState)
{
ghost_state_inv(s) &&
(forall o :: o in s.first_log_pos ==> 0 <= o < s.mem_len) &&
(forall o :: o in s.first_log_pos ==> 0 <= s.first_log_pos[o] < s.num_entry) &&
(forall o :: o in s.first_log_pos ==> 0 <= s.first_log_pos[o] * 2 + 1 < |s.log|) &&
(forall o :: o in s.first_log_pos ==> s.log[s.first_log_pos[o] * 2] == o) &&
(forall o :: o in s.first_log_pos ==> s.log[s.first_log_pos[o] * 2 + 1] == s.old_mem[o]) &&
(forall o :: o in s.first_log_pos ==> forall i :: 0 <= i < s.first_log_pos[o] ==> s.log[i * 2] != o) &&
(forall i :: 0 <= i < s.num_entry ==> s.log[i * 2] in s.first_log_pos)
}
function ghost_begin_tx (s : GhostState) : GhostState
requires ghost_state_inv(s);
requires s.num_entry == 0;
ensures ghost_state_inv(ghost_begin_tx(s));
ensures ghost_tx_inv(ghost_begin_tx(s));
ensures old_mem_equiv(ghost_begin_tx(s));
{
var (s', f) := set_num_entry(s, 0);
var s' := s'.(first_log_pos := map[], old_mem := s.mem[..]);
s'
}
function ghost_commit_tx (s : GhostState) : (GhostState, bool)
requires ghost_tx_inv(s);
requires old_mem_equiv(s);
ensures ghost_state_inv(ghost_commit_tx(s).0);
ensures ghost_tx_inv(ghost_commit_tx(s).0);
ensures !ghost_commit_tx(s).1 ==> old_mem_equiv(ghost_commit_tx(s).0);
ensures ghost_commit_tx(s).1 ==> ghost_commit_tx(s).0.num_entry == 0;
{
var s' := s;
var (s', f) := set_num_entry(s', 0);
var s' := if f then s'.(first_log_pos := map[]) else s';
(s', f)
}
function ghost_tx_write (s0 : GhostState, off : int, val : int) : GhostState
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
requires 0 <= off < s0.mem_len;
requires 0 <= s0.num_entry * 2 + 2 < |s0.log|;
ensures ghost_tx_inv(ghost_tx_write(s0, off, val));
ensures old_mem_equiv(ghost_tx_write(s0, off, val));
ensures |ghost_tx_write(s0, off, val).mem| == s0.mem_len;
ensures !crashed(ghost_tx_write(s0, off, val)) ==> ghost_tx_write(s0, off, val).mem[off] == val;
{
var s := s0;
var log_idx := s.num_entry;
var log_off := log_idx * 2;
var old_val := s.mem[off];
var (s, f) := log_write_step(s, log_off, off);
var (s, f) := log_write_step(s, log_off + 1, old_val);
var (s, f) := set_num_entry(s, log_idx + 1);
var s := if f && !(off in s.first_log_pos)
then s.(first_log_pos := s.first_log_pos[off := log_idx])
else s;
var (s, f) := mem_write_step(s, off, val);
s
}
function reverse_recovery (s0 : GhostState, idx : int) : GhostState
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
requires 0 <= idx <= s0.num_entry;
ensures ghost_tx_inv(reverse_recovery(s0, idx));
ensures old_mem_equiv(reverse_recovery(s0, idx));
ensures s0.old_mem == reverse_recovery(s0, idx).old_mem;
ensures s0.first_log_pos == reverse_recovery(s0, idx).first_log_pos;
ensures forall o :: o in s0.first_log_pos && s0.first_log_pos[o] >= idx ==>
reverse_recovery(s0, idx).mem[o] == s0.mem[o];
ensures forall o :: o in s0.first_log_pos && 0 <= s0.first_log_pos[o] < idx ==>
reverse_recovery(s0, idx).mem[o] == s0.old_mem[o];
{
if idx == 0 then
s0
else
var s := s0;
var i := idx - 1;
var off := s.log[i * 2];
var val := s.log[i * 2 + 1];
var s := s.(mem := s.mem[off := val]);
var s := reverse_recovery(s, idx - 1);
s.mem[o] == s.old_mem[o];
o == off && val == s.old_mem[o];
s.mem[o] == val;
s
}
function ghost_recover (s0 : GhostState) : GhostState
requires ghost_tx_inv(s0);
requires old_mem_equiv(s0);
ensures ghost_recover(s0).mem == s0.old_mem;
ensures ghost_recover(s0).num_entry == 0;
{
var s := reverse_recovery(s0, s0.num_entry);
s.(num_entry := 0)
}
class UndoLog {
var log_ : array<int>;
var mem_ : array<int>;
var impl_countdown : int;
ghost var gs : GhostState;
constructor () {}
predicate ghost_state_equiv(gs : GhostState)
reads this;
reads mem_;
reads log_;
{
log_.Length > 0 &&
mem_[..] == gs.mem &&
log_[1..] == gs.log &&
log_[0] == gs.num_entry &&
impl_countdown == gs.countdown
}
predicate state_inv()
reads this;
reads log_;
{
log_.Length > 1 && 0 <= log_[0] && (log_[0] * 2) < log_.Length
&& log_.Length < 0xffffffff && mem_ != log_
&& forall i : int :: 0 <= i < log_[0] ==> 0 <= log_[i * 2 + 1] < mem_.Length
&& impl_countdown >= 0
}
method init(log_size : int, mem_size : int, countdown : int)
requires log_size > 1;
requires mem_size > 0;
requires log_size < 0xffffffff;
modifies this;
ensures fresh(log_);
ensures fresh(mem_);
ensures state_inv();
ensures ghost_state_equiv(gs);
{
log_ := new int[log_size];
mem_ := new int[mem_size];
log_[0] := 0;
impl_countdown := countdown;
gs := GS(0, log_[1..], mem_size, mem_[..], mem_[..], mem_[..], countdown, map[]);
}
method impl_countdown_dec()
modifies this;
requires impl_countdown > 0;
requires mem_ != log_;
ensures mem_ != log_;
ensures impl_countdown == old(impl_countdown) - 1;
ensures impl_countdown >= 0;
ensures gs == old(gs);
ensures log_[..] == old(log_)[..];
ensures mem_[..] == old(mem_)[..];
{
impl_countdown := impl_countdown - 1;
}
method write_mem(off : int, val : int)
modifies this;
modifies mem_;
requires 0 <= off < mem_.Length;
requires mem_ != log_;
requires ghost_state_inv(gs);
requires ghost_state_equiv(gs);
requires 0 <= off < gs.mem_len;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures gs == old(gs);
ensures ghost_state_equiv(mem_write_step(gs, off, val).0);
{
if (impl_countdown > 0) {
mem_[off] := val;
impl_countdown := impl_countdown - 1;
}
}
method write_log(off : int, val : int)
modifies this;
modifies log_;
requires 0 <= off <= |gs.log|;
requires mem_ != log_;
requires ghost_state_inv(gs);
requires ghost_state_equiv(gs);
requires off == 0 ==> 0 <= val * 2 < |gs.log|;
ensures mem_ != log_;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures log_.Length == old(log_).Length;
ensures mem_[..] == old(mem_)[..];
ensures log_[off] == val || log_[off] == old(log_)[off];
ensures forall i :: 0 <= i < log_.Length && i != off ==> log_[i] == old(log_)[i];
ensures gs == old(gs);
ensures off > 0 ==> ghost_state_equiv(log_write_step(gs, off - 1, val).0);
ensures off == 0 ==> ghost_state_equiv(set_num_entry(gs, val).0);
{
if (impl_countdown > 0) {
log_[off] := val;
impl_countdown := impl_countdown - 1;
}
}
method begin_tx()
modifies log_;
modifies this;
requires state_inv();
requires ghost_state_equiv(gs);
requires ghost_state_inv(gs);
requires log_[0] == 0;
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures state_inv();
ensures ghost_state_equiv(gs);
ensures ghost_tx_inv(gs);
{
write_log(0, 0);
gs := ghost_begin_tx(gs);
}
method commit_tx()
modifies log_;
modifies this;
requires state_inv();
requires ghost_state_equiv(gs);
requires ghost_state_inv(gs);
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
ensures mem_ == old(mem_);
ensures log_ == old(log_);
ensures ghost_state_equiv(gs);
ensures state_inv();
{
write_log(0, 0);
gs := ghost_commit_tx(gs).0;
}
method tx_write(offset: int, val : int)
modifies this;
modifies log_;
modifies mem_;
requires state_inv();
requires mem_ != log_;
requires 0 <= offset < mem_.Length;
requires ghost_state_equiv(gs);
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
requires 0 <= log_[0] * 2 + 3 < log_.Length;
ensures ghost_state_equiv(gs);
ensures ghost_tx_inv(gs);
ensures old_mem_equiv(gs);
{
var log_idx := log_[0];
var log_off := log_idx * 2;
ghost var old_gs := gs;
write_log(log_off + 1, offset);
gs := log_write_step(gs, log_off, offset).0;
var old_val := mem_[offset];
write_log(log_off + 2, old_val);
gs := log_write_step(gs, log_off + 1, old_val).0;
write_log(0, log_idx + 1);
ghost var (s, f) := set_num_entry(gs, log_idx + 1);
s := if f && !(offset in s.first_log_pos)
then s.(first_log_pos := s.first_log_pos[offset := log_idx])
else s;
gs := s;
write_mem(offset, val);
gs := mem_write_step(gs, offset, val).0;
}
// we assume that recover won't crash (though this code works when recover can fail)
method recover()
modifies log_;
modifies mem_;
modifies this;
requires state_inv();
requires ghost_tx_inv(gs);
requires old_mem_equiv(gs);
requires ghost_state_equiv(gs);
ensures gs == ghost_recover(old(gs));
ensures ghost_state_equiv(gs);
{
var log_len := log_[0];
if (log_len > 0) {
var i := log_len - 1;
ghost var gs0 := gs;
while i >= 0
modifies mem_;
modifies this;
{
var o := i * 2 + 1;
var off := log_[o];
var val := log_[o + 1];
mem_[off] := val;
gs := gs.(mem := gs.mem[off := val]);
i := i - 1;
}
} else {
}
log_[0] := 0;
gs := ghost_recover(old(gs));
}
}
lemma crash_safe_single_tx(init_log : seq<int>, init_mem : seq<int>,
countdown : int,
writes : seq<(int, int)>)
requires |init_log| > 0;
requires countdown >= 0;
requires forall i :: 0 <= i < |writes| ==>
0 <= writes[i].0 < |init_mem|;
requires 0 < |writes| * 2 < |init_log|;
{
var s := init_ghost_state(init_log, init_mem, countdown);
var end_mem := init_mem;
s := ghost_begin_tx(s);
var i := 0;
while i < |writes|
{
s := ghost_tx_write(s, writes[i].0, writes[i].1);
end_mem := end_mem[writes[i].0 := writes[i].1];
i := i + 1;
}
var (s', c) := ghost_commit_tx(s);
if (c) {
} else {
var recovered := ghost_recover(s');
}
}
|
328 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_mathematical objects verification_examples_fast_exp.dfy | function exp(b: nat, n: nat): nat {
if n == 0 then 1
else b * exp(b, n-1)
}
lemma exp_sum(b: nat, n1: nat, n2: nat)
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
if n1 == 0 {
return;
} else {
exp_sum(b, n1-1, n2);
}
}
lemma exp_sum_auto(b: nat)
ensures forall n1: nat, n2: nat :: exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
forall n1: nat, n2: nat
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2) {
exp_sum(b, n1, n2);
}
}
function bits(n: nat): seq<bool>
decreases n
{
if n == 0 then []
else [if (n % 2 == 0) then false else true] + bits(n/2)
}
function from_bits(s: seq<bool>): nat {
if s == [] then 0
else (if s[0] then 1 else 0) + 2 * from_bits(s[1..])
}
lemma bits_from_bits(n: nat)
ensures from_bits(bits(n)) == n
{
}
lemma bits_trim_front(n: nat)
requires n > 0
ensures from_bits(bits(n)[1..]) == n/2
{}
lemma from_bits_append(s: seq<bool>, b: bool)
ensures from_bits(s + [b]) == from_bits(s) + exp(2, |s|) * (if b then 1 else 0)
{
if s == [] {
return;
}
assert s == [s[0]] + s[1..];
from_bits_append(s[1..], b);
// from recursive call
assert from_bits(s[1..] + [b]) == from_bits(s[1..]) + exp(2, |s|-1) * (if b then 1 else 0);
exp_sum(2, |s|-1, 1);
assert (s + [b])[1..] == s[1..] + [b]; // observe
assert from_bits(s + [b]) == (if s[0] then 1 else 0) + 2 * from_bits(s[1..] + [b]);
}
lemma from_bits_sum(s1: seq<bool>, s2: seq<bool>)
decreases s2
ensures from_bits(s1 + s2) == from_bits(s1) + exp(2, |s1|) * from_bits(s2)
{
if s2 == [] {
assert s1 + s2 == s1;
return;
}
from_bits_sum(s1 + [s2[0]], s2[1..]);
assert s1 + [s2[0]] + s2[1..] == s1 + s2;
from_bits_append(s1, s2[0]);
assume false; // TODO
}
method fast_exp(b: nat, n: nat) returns (r: nat)
ensures r == exp(b, n)
{
var a := 1;
var c := b;
ghost var n0 := n;
var n := n;
ghost var i: nat := 0;
bits_from_bits(n);
while n > 0
decreases n
invariant c == exp(b, exp(2, i))
invariant n <= n0
invariant i <= |bits(n0)|
invariant bits(n) == bits(n0)[i..]
invariant n == from_bits(bits(n0)[i..])
invariant a == exp(b, from_bits(bits(n0)[..i]))
{
ghost var n_loop_top := n;
if n % 2 == 1 {
assert bits(n)[0] == true;
// a accumulates sum(i => b^(2^n_i), i) where n_i are the 1 bits of n
// TODO: n0-n is sum(i => 2^n_i, i), right?
a := a * c;
exp_sum(b, n0-n, i);
// (n-1)/2 == n/2 in this case, but we want to be extra clear that we're
// "dropping" a 1 bit here and so something interesting is happening
n := (n-1) / 2;
assert 2 * exp(2, i) == exp(2, i+1);
assert a == exp(b, from_bits(bits(n0)[..i]) + exp(2, i)) by {
exp_sum_auto(b);
}
assume false;
assert a == exp(b, from_bits(bits(n0)[..i+1]));
} else {
assert bits(n)[0] == false;
n := n / 2;
assume false;
assert a == exp(b, from_bits(bits(n0)[..i+1]));
}
assert n == n_loop_top/2;
c := c * c;
exp_sum(b, exp(2, i), exp(2, i));
// assert bits(n0)[i+1..] == bits(n0)[i..][1..];
i := i + 1;
}
assert bits(n0)[..i] == bits(n0);
return a;
}
| function exp(b: nat, n: nat): nat {
if n == 0 then 1
else b * exp(b, n-1)
}
lemma exp_sum(b: nat, n1: nat, n2: nat)
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
if n1 == 0 {
return;
} else {
exp_sum(b, n1-1, n2);
}
}
lemma exp_sum_auto(b: nat)
ensures forall n1: nat, n2: nat :: exp(b, n1 + n2) == exp(b, n1) * exp(b, n2)
{
forall n1: nat, n2: nat
ensures exp(b, n1 + n2) == exp(b, n1) * exp(b, n2) {
exp_sum(b, n1, n2);
}
}
function bits(n: nat): seq<bool>
{
if n == 0 then []
else [if (n % 2 == 0) then false else true] + bits(n/2)
}
function from_bits(s: seq<bool>): nat {
if s == [] then 0
else (if s[0] then 1 else 0) + 2 * from_bits(s[1..])
}
lemma bits_from_bits(n: nat)
ensures from_bits(bits(n)) == n
{
}
lemma bits_trim_front(n: nat)
requires n > 0
ensures from_bits(bits(n)[1..]) == n/2
{}
lemma from_bits_append(s: seq<bool>, b: bool)
ensures from_bits(s + [b]) == from_bits(s) + exp(2, |s|) * (if b then 1 else 0)
{
if s == [] {
return;
}
from_bits_append(s[1..], b);
// from recursive call
exp_sum(2, |s|-1, 1);
}
lemma from_bits_sum(s1: seq<bool>, s2: seq<bool>)
ensures from_bits(s1 + s2) == from_bits(s1) + exp(2, |s1|) * from_bits(s2)
{
if s2 == [] {
return;
}
from_bits_sum(s1 + [s2[0]], s2[1..]);
from_bits_append(s1, s2[0]);
assume false; // TODO
}
method fast_exp(b: nat, n: nat) returns (r: nat)
ensures r == exp(b, n)
{
var a := 1;
var c := b;
ghost var n0 := n;
var n := n;
ghost var i: nat := 0;
bits_from_bits(n);
while n > 0
{
ghost var n_loop_top := n;
if n % 2 == 1 {
// a accumulates sum(i => b^(2^n_i), i) where n_i are the 1 bits of n
// TODO: n0-n is sum(i => 2^n_i, i), right?
a := a * c;
exp_sum(b, n0-n, i);
// (n-1)/2 == n/2 in this case, but we want to be extra clear that we're
// "dropping" a 1 bit here and so something interesting is happening
n := (n-1) / 2;
exp_sum_auto(b);
}
assume false;
} else {
n := n / 2;
assume false;
}
c := c * c;
exp_sum(b, exp(2, i), exp(2, i));
// assert bits(n0)[i+1..] == bits(n0)[i..][1..];
i := i + 1;
}
return a;
}
|
329 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_mathematical objects verification_examples_interval_example.dfy | /* Here's a small but realistic setting where you could use Dafny.
The setting is that we're implementing an interval library that manages a
data structure with a low and a high value. It implements some computations
on intervals, and we want to make sure those are right.
*/
// Interval is the Dafny model of the data structure itself. We're using `real`
// here for the numbers; the specifics don't really matter, as long as we can
// compare them with <.
datatype Interval = Interval(lo: real, hi: real)
// Contains is one of the core operations on intervals, both because we support
// it in the API and because in some ways it defines what the interval means.
predicate contains(i: Interval, r: real) {
i.lo <= r <= i.hi
}
// We also provide a way to check if an interval is empty.
predicate empty(i: Interval) {
i.lo > i.hi
}
/* Now we can already do our first proof! Empty is a way to check if an interval
* doesn't contain any numbers - let's prove that empty and contains agree with
* each other. */
lemma empty_ok(i: Interval)
// this is the sort of property that's easy to express logically but hard to test for
ensures empty(i) <==> !exists r :: contains(i, r)
{
if empty(i) {
} else {
assert contains(i, i.lo);
}
}
// min and max are just helper functions for the implementation
function min(r1: real, r2: real): real {
if r1 < r2 then r1 else r2
}
function max(r1: real, r2: real): real {
if r1 > r2 then r1 else r2
}
/* The first complicated operation we expose is a function to intersect two
* intervals. It's not so easy to think about whether this is correct - for
* example, does it handle empty intervals correctly? Maybe two empty intervals
* could intersect to a non-empty one? */
function intersect(i1: Interval, i2: Interval): Interval {
Interval(max(i1.lo, i2.lo), min(i1.hi, i2.hi))
}
// This theorem proves that intersect does exactly what we wanted it to, using
// `contains` as the specification.
lemma intersect_ok(i1: Interval, i2: Interval)
ensures forall r :: contains(intersect(i1, i2), r) <==> contains(i1, r) && contains(i2, r)
{
}
/* Next we'll define the union of intervals. This is more complicated because if
* the intervals have no overlap, a single interval can't capture their union
* exactly. */
// Intersect gives us an easy way to define overlap, and we already know it
// handles empty intervals correctly.
predicate overlap(i1: Interval, i2: Interval) {
!empty(intersect(i1, i2))
}
lemma overlap_ok(i1: Interval, i2: Interval)
ensures overlap(i1, i2) <==> exists r :: contains(i1, r) && contains(i2, r)
{
if overlap(i1, i2) {
if i1.lo >= i2.lo {
assert contains(i2, i1.lo);
} else {
assert contains(i1, i2.lo);
}
}
}
// We'll give this function a precondition so that it always does the right thing.
function union(i1: Interval, i2: Interval): Interval
requires overlap(i1, i2)
{
Interval(min(i1.lo, i2.lo), max(i1.hi, i2.hi))
}
// We can prove union correct in much the same way as intersect, with a similar
// specification, although notice that now we require that the intervals
// overlap.
lemma union_ok(i1: Interval, i2: Interval)
requires overlap(i1, i2)
ensures forall r :: contains(union(i1, i2), r) <==> contains(i1, r) || contains(i2, r)
{
}
// Though not used elsewhere here, if two intervals overlap its possible to show
// that there's a common real contained in both of them. We also show off new
// syntax: this lemma returns a value which is used in the postcondition, and
// which the calling lemma can make use of.
lemma overlap_witness(i1: Interval, i2: Interval) returns (r: real)
requires overlap(i1, i2)
ensures contains(i1, r) && contains(i2, r)
{
if i1.lo >= i2.lo {
r := i1.lo;
} else {
r := i2.lo;
}
}
/* One extension you might try is adding is an operation to check if an interval
* is contained in another and proving that correct. Or, try implementing a
* similar library for 2D rectangles. */
| /* Here's a small but realistic setting where you could use Dafny.
The setting is that we're implementing an interval library that manages a
data structure with a low and a high value. It implements some computations
on intervals, and we want to make sure those are right.
*/
// Interval is the Dafny model of the data structure itself. We're using `real`
// here for the numbers; the specifics don't really matter, as long as we can
// compare them with <.
datatype Interval = Interval(lo: real, hi: real)
// Contains is one of the core operations on intervals, both because we support
// it in the API and because in some ways it defines what the interval means.
predicate contains(i: Interval, r: real) {
i.lo <= r <= i.hi
}
// We also provide a way to check if an interval is empty.
predicate empty(i: Interval) {
i.lo > i.hi
}
/* Now we can already do our first proof! Empty is a way to check if an interval
* doesn't contain any numbers - let's prove that empty and contains agree with
* each other. */
lemma empty_ok(i: Interval)
// this is the sort of property that's easy to express logically but hard to test for
ensures empty(i) <==> !exists r :: contains(i, r)
{
if empty(i) {
} else {
}
}
// min and max are just helper functions for the implementation
function min(r1: real, r2: real): real {
if r1 < r2 then r1 else r2
}
function max(r1: real, r2: real): real {
if r1 > r2 then r1 else r2
}
/* The first complicated operation we expose is a function to intersect two
* intervals. It's not so easy to think about whether this is correct - for
* example, does it handle empty intervals correctly? Maybe two empty intervals
* could intersect to a non-empty one? */
function intersect(i1: Interval, i2: Interval): Interval {
Interval(max(i1.lo, i2.lo), min(i1.hi, i2.hi))
}
// This theorem proves that intersect does exactly what we wanted it to, using
// `contains` as the specification.
lemma intersect_ok(i1: Interval, i2: Interval)
ensures forall r :: contains(intersect(i1, i2), r) <==> contains(i1, r) && contains(i2, r)
{
}
/* Next we'll define the union of intervals. This is more complicated because if
* the intervals have no overlap, a single interval can't capture their union
* exactly. */
// Intersect gives us an easy way to define overlap, and we already know it
// handles empty intervals correctly.
predicate overlap(i1: Interval, i2: Interval) {
!empty(intersect(i1, i2))
}
lemma overlap_ok(i1: Interval, i2: Interval)
ensures overlap(i1, i2) <==> exists r :: contains(i1, r) && contains(i2, r)
{
if overlap(i1, i2) {
if i1.lo >= i2.lo {
} else {
}
}
}
// We'll give this function a precondition so that it always does the right thing.
function union(i1: Interval, i2: Interval): Interval
requires overlap(i1, i2)
{
Interval(min(i1.lo, i2.lo), max(i1.hi, i2.hi))
}
// We can prove union correct in much the same way as intersect, with a similar
// specification, although notice that now we require that the intervals
// overlap.
lemma union_ok(i1: Interval, i2: Interval)
requires overlap(i1, i2)
ensures forall r :: contains(union(i1, i2), r) <==> contains(i1, r) || contains(i2, r)
{
}
// Though not used elsewhere here, if two intervals overlap its possible to show
// that there's a common real contained in both of them. We also show off new
// syntax: this lemma returns a value which is used in the postcondition, and
// which the calling lemma can make use of.
lemma overlap_witness(i1: Interval, i2: Interval) returns (r: real)
requires overlap(i1, i2)
ensures contains(i1, r) && contains(i2, r)
{
if i1.lo >= i2.lo {
r := i1.lo;
} else {
r := i2.lo;
}
}
/* One extension you might try is adding is an operation to check if an interval
* is contained in another and proving that correct. Or, try implementing a
* similar library for 2D rectangles. */
|
330 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_mathematical objects verification_examples_library.dfy | /*
A simple state machine modeling checking out and returning books in a library.
*/
// Status will track where one book is
datatype Status = Shelf | Patron(name: string)
datatype Book = Book(title: string)
// The state of the whole library is just the status of every book owned by the
// library.
datatype Variables = Variables(library: map<Book, Status>)
{
// New syntax (member function): the curly braces below the datatype introduce
// a set of _member functions_, which can be called as v.f(), just like Java,
// C++, or Rust methods. Just like in Java or C++, the body can use the `this`
// keyword to refer to an implicit argument of type Variables.
ghost predicate WellFormed()
{
// New syntax (x in m for maps): maps have a domain and we can write x in m
// to say x is in the domain of m (similarly, `x !in m` is a more readable
// version of `!(x in m)`). As with sequences where indices need to be in
// bounds, to write `m[x]` you'll need to show that `x in m` holds.
//
// What we're saying here is that the empty-titled book is not owned by the
// library.
forall b: Book :: b.title == "" ==> b !in this.library
}
}
ghost predicate Init(v: Variables)
{
&& v.WellFormed()
&& forall b :: b in v.library ==> v.library[b].Shelf?
}
// The transitions of the library state machine.
datatype Step = Checkout(b: Book, to: string) | Return(b: Book)
ghost predicate CheckoutStep(v: Variables, v': Variables, step: Step)
requires step.Checkout?
{
&& v.WellFormed()
&& step.b in v.library
&& v.library[step.b].Shelf?
// New syntax (datatype update): here we define the new Variables from the old
// one by updating one field: v.(library := ...). This is much like a sequence
// update. In fact, we also introduce a map update `v.library[step.b := ...]`
// which works in pretty much the same way.
&& v' == v.(library := v.library[step.b := Patron(step.to)])
}
ghost predicate ReturnStep(v: Variables, v': Variables, step: Step)
requires step.Return?
{
&& v.WellFormed()
&& step.b in v.library
&& v.library[step.b].Patron?
&& v' == v.(library := v.library[step.b := Shelf])
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
case Checkout(_, _) => CheckoutStep(v, v', step)
case Return(_) => ReturnStep(v, v', step)
}
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
lemma NextStepDeterministicGivenStep(v:Variables, v':Variables, step: Step)
requires NextStep(v, v', step)
ensures forall v'' | NextStep(v, v'', step) :: v' == v''
{}
/*
In this lemma we'll write a concrete sequence of states which forms a (short)
execution of this state machine, and prove that it really is an execution.
This can be a good sanity check on the definitions (for example, to make sure
that it's at least possible to take every transition).
*/
lemma ExampleExec() {
var e := [
Variables(library := map[Book("Snow Crash") := Shelf, Book("The Stand") := Shelf]),
Variables(library := map[Book("Snow Crash") := Patron("Jon"), Book("The Stand") := Shelf]),
Variables(library := map[Book("Snow Crash") := Patron("Jon"), Book("The Stand") := Patron("Tej")]),
Variables(library := map[Book("Snow Crash") := Shelf, Book("The Stand") := Patron("Tej")])
];
// Next we'll prove that e is a valid execution.
assert Init(e[0]);
// These steps will be witnesses to help prove Next between every pair of Variables.
var steps := [
Checkout(Book("Snow Crash"), "Jon"),
Checkout(Book("The Stand"), "Tej"),
Return(Book("Snow Crash"))
];
assert forall n: nat | n < |e|-1 :: NextStep(e[n], e[n+1], steps[n]);
assert forall n: nat | n < |e|-1 :: Next(e[n], e[n+1]);
}
| /*
A simple state machine modeling checking out and returning books in a library.
*/
// Status will track where one book is
datatype Status = Shelf | Patron(name: string)
datatype Book = Book(title: string)
// The state of the whole library is just the status of every book owned by the
// library.
datatype Variables = Variables(library: map<Book, Status>)
{
// New syntax (member function): the curly braces below the datatype introduce
// a set of _member functions_, which can be called as v.f(), just like Java,
// C++, or Rust methods. Just like in Java or C++, the body can use the `this`
// keyword to refer to an implicit argument of type Variables.
ghost predicate WellFormed()
{
// New syntax (x in m for maps): maps have a domain and we can write x in m
// to say x is in the domain of m (similarly, `x !in m` is a more readable
// version of `!(x in m)`). As with sequences where indices need to be in
// bounds, to write `m[x]` you'll need to show that `x in m` holds.
//
// What we're saying here is that the empty-titled book is not owned by the
// library.
forall b: Book :: b.title == "" ==> b !in this.library
}
}
ghost predicate Init(v: Variables)
{
&& v.WellFormed()
&& forall b :: b in v.library ==> v.library[b].Shelf?
}
// The transitions of the library state machine.
datatype Step = Checkout(b: Book, to: string) | Return(b: Book)
ghost predicate CheckoutStep(v: Variables, v': Variables, step: Step)
requires step.Checkout?
{
&& v.WellFormed()
&& step.b in v.library
&& v.library[step.b].Shelf?
// New syntax (datatype update): here we define the new Variables from the old
// one by updating one field: v.(library := ...). This is much like a sequence
// update. In fact, we also introduce a map update `v.library[step.b := ...]`
// which works in pretty much the same way.
&& v' == v.(library := v.library[step.b := Patron(step.to)])
}
ghost predicate ReturnStep(v: Variables, v': Variables, step: Step)
requires step.Return?
{
&& v.WellFormed()
&& step.b in v.library
&& v.library[step.b].Patron?
&& v' == v.(library := v.library[step.b := Shelf])
}
ghost predicate NextStep(v: Variables, v': Variables, step: Step)
{
match step {
case Checkout(_, _) => CheckoutStep(v, v', step)
case Return(_) => ReturnStep(v, v', step)
}
}
ghost predicate Next(v: Variables, v': Variables)
{
exists step :: NextStep(v, v', step)
}
lemma NextStepDeterministicGivenStep(v:Variables, v':Variables, step: Step)
requires NextStep(v, v', step)
ensures forall v'' | NextStep(v, v'', step) :: v' == v''
{}
/*
In this lemma we'll write a concrete sequence of states which forms a (short)
execution of this state machine, and prove that it really is an execution.
This can be a good sanity check on the definitions (for example, to make sure
that it's at least possible to take every transition).
*/
lemma ExampleExec() {
var e := [
Variables(library := map[Book("Snow Crash") := Shelf, Book("The Stand") := Shelf]),
Variables(library := map[Book("Snow Crash") := Patron("Jon"), Book("The Stand") := Shelf]),
Variables(library := map[Book("Snow Crash") := Patron("Jon"), Book("The Stand") := Patron("Tej")]),
Variables(library := map[Book("Snow Crash") := Shelf, Book("The Stand") := Patron("Tej")])
];
// Next we'll prove that e is a valid execution.
// These steps will be witnesses to help prove Next between every pair of Variables.
var steps := [
Checkout(Book("Snow Crash"), "Jon"),
Checkout(Book("The Stand"), "Tej"),
Return(Book("Snow Crash"))
];
}
|
331 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_mathematical objects verification_examples_logic.dfy | /* Review of logical connectives and properties of first-order logic. */
/* We'll be using boolean logic both to define protocols and to state their
* properties, so it helps if you have an understanding of what the connectives
* of logic mean and have a little fluency with manipulating them. */
/* The first section of "An Introduction to Abstract Mathematics" by Neil
* Donaldson and Alessandra Pantano might be helpful:
* https://www.math.uci.edu/~ndonalds/math13/notes.pdf
*/
/* The core of logic is the _proposition_. For us, a proposition like `2 < 3` is
* going to be a boolean, with the interpretation that the proposition is true,
* well, if the boolean is true, and false if not. That proposition is clearly
* true.
*/
lemma ExampleProposition()
{
assert 2 < 3;
}
/* Another example: `7 - 3 == 3` is clearly false, but it's still a
* proposition.
*/
lemma SomethingFalse()
{
// you'll get an error if you uncomment this line
// assert 7 - 3 == 3;
}
/* On the other hand something like `7 * false < 8` isn't a
* proposition at all since it has a type error - we won't have to worry too
* much about these because Dafny will quickly and easily catch such mistakes.
*/
lemma SomethingNonsensical()
{
// you'll get an error if you uncomment this line
//
// unlike the above, it will be a type-checking error and not a verification
// failure
// assert 7 * false < 8;
}
/* In Dafny, we can write lemmas with arguments, which are logical variables (of
* the appropriate types). From here on we'll shift to stating logical properties
* as ensures clauses of lemmas, the typical way they'd be packaged in Dafny. */
lemma AdditionCommutes(n: int, m: int)
ensures n + m == m + n
{
// The proof of this lemma goes here. In this case (and in many others), no
// additional assistance is needed so an empty proof suffices.
//
// In Dafny, we won't talk much about proofs on their own - in a course on
// logic you might go over logical rules or proof trees - because Dafny is
// going to have all the power you need to prove things (as long as they're true!).
}
/* Let's start by going over the simplest logical connectives: && ("and") and ||
* ("or"). In these examples think of the input booleans as being arbitrary
* predicates, except that by the time we've passed them to these lemmas their
* represented as just a truth value. */
lemma ProveAndFromBoth(p1: bool, p2: bool)
requires p1
requires p2
ensures p1 && p2
{}
lemma FromAndProveRight(p1: bool, p2: bool)
requires p1 && p2
ensures p2
{}
lemma ProveOrFromLeft(p1: bool, p2: bool)
requires p1
ensures p1 || p2
{}
/* Let's also see _negation_ written `!p`, boolean negation. Asserting or
* ensuring `!p` is the way we prove it's false. */
lemma DoubleNegation(p: bool)
requires p
ensures !!p
{}
lemma LawOfExcludedMiddle(p: bool)
ensures p || !p
{}
/* Now we'll introduce boolean implication, `p ==> q`, read as "if p, then q". In "p
* ==> q" we'll sometimes refer to "p" as a hypothesis and "q" as a conclusion.
* Here are some alternative English logical
* statements and how they map to implication:
*
* "p if q" means "q ==> p"
* "p only if q" means "p ==> q" (this one can be tricky!)
* "p implies q" means "p ==> q"
*/
/* Note that p ==> q is itself a proposition! Here's its "truth table", showing
* all possible combinations of p and q and whether p ==> q is true: */
lemma ImplicationTruthTable()
ensures false ==> false
ensures false ==> true
ensures !(true ==> false)
ensures false ==> true
{}
/* One of the most famous rules of logic, which allows us to take an implication
* (already proven correct) and a proof of its hypothesis to derive its
* conclusion.
*
* Note that both parts are important! We can prove `false ==> 2 < 1` but will
* never be able to use ModusPonens on this to prove `2 < 1`. Well we could, but
* since this is obviously false it would mean we accidentally assumed false
* somewhere else - this is also called an _inconsistency_.
*/
lemma ModusPonens(p1: bool, p2: bool)
requires p1 ==> p2
requires p1
ensures p2
{}
/* We can write a lemma above as implications in ensures clauses, rather than
* using preconditions. The key difference is that calling `FromAndProveLeft(p1,
* p2)` for example will cause Dafny to immediately prove `p1 && p2`, whereas we
* can always call `AndProvesBoth(p1, p2)` and Dafny won't check anything
* (because the implications are true regardless of p1 and p2). */
lemma AndProvesBoth(p1: bool, p2: bool)
ensures p1 && p2 ==> p1
ensures p1 && p2 ==> p2
{}
/* Let's introduce one more logical connective: `p <==> q`, "p if and only if q"
* (also written "iff" and pronounced "if and only if"). This has the same truth
* value as `p == q`. The whole thing is sometimes called a "biconditional".
* This rule is a little like modus ponens but requiring the implication is
* stronger than needed. */
lemma ProveFromBiconditional(p1: bool, p2: bool)
requires p1
requires p1 <==> p2
ensures p2
{}
/* Simplifying and comprehending logical expressions is something you'll
* gradually get practice with. It can get quite complicated! */
lemma SomeEquivalences(p1: bool, p2: bool)
ensures ((p1 ==> p2) && p1) ==> p2
// !p2 ==> !p1 is called the "contrapositive" of p1 ==> p2. It has the same
// truth value.
ensures (p1 ==> p2) <==> (!p2 ==> !p1)
ensures !(p1 ==> !p2) <==> p1 && p2
ensures ((p1 ==> p2) && (!p1 ==> p2)) <==> p2
// you might want to think about this one:
ensures (!p1 || (p1 ==> p2)) <==> (p1 ==> p2)
{}
lemma SomeMoreEquivalences(p1: bool, p2: bool, p3: bool)
// note on parsing: <==> has the lowest priority, so all of these statements are
// equivalences at the top level
ensures (p1 && p2) && p3 <==> p1 && p2 && p3
// this is what chained implications mean
ensures p1 ==> p2 ==> p3 <==> p1 && p2 ==> p3
ensures p1 ==> (p2 ==> p3) <==> p1 && p2 ==> p3
{}
/* Quantifiers */
/* To express and state more interesting properties, we'll need quantifiers -
* that is, forall and exists. Dafny supports these as a way to write
* propositions, and they produce a boolean value just like the other logical
* connectives. */
lemma AdditionCommutesAsForall()
{
// (ignore the warning "No terms found to trigger on")
assert forall n: int, m: int :: n + m == m + n;
// Just to emphasize this is a proposition (a boolean) just like everything
// else we've seen. The big difference is that this forall is clearly not a
// boolean we could evaluate in the normal sense of running it to produce true
// or false - nonetheless Dafny can reason about it mathematically.
var does_addition_commute: bool := forall n: int, m: int :: n + m == m + n;
assert does_addition_commute == true;
}
/* In order to illustrate some properties of forall, we'll introduce some
* arbitrary _predicates_ over integers to put in our examples. By not putting a
* body we tell Dafny to define these terms, but not to assume anything about their
* values except that they are deterministic. */
predicate P(x: int)
predicate Q(x: int)
// This is a predicate over two integers, often called a relation. You might
// also hear propositions, predicates, and predicates over multiple values all
// called relations - propositions are just 0-arity and predicates are 1-arity.
predicate R(x: int, y: int)
/* One operation you'll eventually want some fluency in is the ability to negate
* logical expressions. Let's go through the rules. */
lemma SimplifyingNegations(p: bool, q: bool)
ensures !(p && q) <==> !p || !q
ensures !(p || q) <==> !p && !q
ensures !(p ==> q) <==> p && !q
ensures !!p <==> p
ensures !(forall x :: P(x)) <==> (exists x :: !P(x))
ensures !(exists x :: P(x)) <==> (forall x :: !P(x))
{}
/* Dafny supports a "where" clause in a forall. It's a shorthand for implication. */
lemma WhereIsJustImplies()
// we need parentheses around each side for this to have the desired meaning
ensures (forall x | P(x) :: Q(x)) <==> (forall x :: P(x) ==> Q(x))
{}
lemma NotForallWhere()
ensures !(forall x | P(x) :: Q(x)) <==> exists x :: P(x) && !Q(x)
{}
/* Dafny also supports a "where" clause in an exists, as a shorthand for &&. */
lemma ExistsWhereIsJustAnd()
// we need parentheses around each side for this to have the desired meaning
ensures (exists x | P(x) :: Q(x)) <==> (exists x :: P(x) && Q(x))
// Why this choice? It's so that the following property holds. Notice that for
// all the negation rules we reverse && and ||, and exists and forall; this
// preserves that _duality_ (a formal and pervasive concept in math and
// logic).
ensures !(forall x | P(x) :: Q(x)) <==> (exists x | P(x) :: !Q(x))
{}
| /* Review of logical connectives and properties of first-order logic. */
/* We'll be using boolean logic both to define protocols and to state their
* properties, so it helps if you have an understanding of what the connectives
* of logic mean and have a little fluency with manipulating them. */
/* The first section of "An Introduction to Abstract Mathematics" by Neil
* Donaldson and Alessandra Pantano might be helpful:
* https://www.math.uci.edu/~ndonalds/math13/notes.pdf
*/
/* The core of logic is the _proposition_. For us, a proposition like `2 < 3` is
* going to be a boolean, with the interpretation that the proposition is true,
* well, if the boolean is true, and false if not. That proposition is clearly
* true.
*/
lemma ExampleProposition()
{
}
/* Another example: `7 - 3 == 3` is clearly false, but it's still a
* proposition.
*/
lemma SomethingFalse()
{
// you'll get an error if you uncomment this line
// assert 7 - 3 == 3;
}
/* On the other hand something like `7 * false < 8` isn't a
* proposition at all since it has a type error - we won't have to worry too
* much about these because Dafny will quickly and easily catch such mistakes.
*/
lemma SomethingNonsensical()
{
// you'll get an error if you uncomment this line
//
// unlike the above, it will be a type-checking error and not a verification
// failure
// assert 7 * false < 8;
}
/* In Dafny, we can write lemmas with arguments, which are logical variables (of
* the appropriate types). From here on we'll shift to stating logical properties
* as ensures clauses of lemmas, the typical way they'd be packaged in Dafny. */
lemma AdditionCommutes(n: int, m: int)
ensures n + m == m + n
{
// The proof of this lemma goes here. In this case (and in many others), no
// additional assistance is needed so an empty proof suffices.
//
// In Dafny, we won't talk much about proofs on their own - in a course on
// logic you might go over logical rules or proof trees - because Dafny is
// going to have all the power you need to prove things (as long as they're true!).
}
/* Let's start by going over the simplest logical connectives: && ("and") and ||
* ("or"). In these examples think of the input booleans as being arbitrary
* predicates, except that by the time we've passed them to these lemmas their
* represented as just a truth value. */
lemma ProveAndFromBoth(p1: bool, p2: bool)
requires p1
requires p2
ensures p1 && p2
{}
lemma FromAndProveRight(p1: bool, p2: bool)
requires p1 && p2
ensures p2
{}
lemma ProveOrFromLeft(p1: bool, p2: bool)
requires p1
ensures p1 || p2
{}
/* Let's also see _negation_ written `!p`, boolean negation. Asserting or
* ensuring `!p` is the way we prove it's false. */
lemma DoubleNegation(p: bool)
requires p
ensures !!p
{}
lemma LawOfExcludedMiddle(p: bool)
ensures p || !p
{}
/* Now we'll introduce boolean implication, `p ==> q`, read as "if p, then q". In "p
* ==> q" we'll sometimes refer to "p" as a hypothesis and "q" as a conclusion.
* Here are some alternative English logical
* statements and how they map to implication:
*
* "p if q" means "q ==> p"
* "p only if q" means "p ==> q" (this one can be tricky!)
* "p implies q" means "p ==> q"
*/
/* Note that p ==> q is itself a proposition! Here's its "truth table", showing
* all possible combinations of p and q and whether p ==> q is true: */
lemma ImplicationTruthTable()
ensures false ==> false
ensures false ==> true
ensures !(true ==> false)
ensures false ==> true
{}
/* One of the most famous rules of logic, which allows us to take an implication
* (already proven correct) and a proof of its hypothesis to derive its
* conclusion.
*
* Note that both parts are important! We can prove `false ==> 2 < 1` but will
* never be able to use ModusPonens on this to prove `2 < 1`. Well we could, but
* since this is obviously false it would mean we accidentally assumed false
* somewhere else - this is also called an _inconsistency_.
*/
lemma ModusPonens(p1: bool, p2: bool)
requires p1 ==> p2
requires p1
ensures p2
{}
/* We can write a lemma above as implications in ensures clauses, rather than
* using preconditions. The key difference is that calling `FromAndProveLeft(p1,
* p2)` for example will cause Dafny to immediately prove `p1 && p2`, whereas we
* can always call `AndProvesBoth(p1, p2)` and Dafny won't check anything
* (because the implications are true regardless of p1 and p2). */
lemma AndProvesBoth(p1: bool, p2: bool)
ensures p1 && p2 ==> p1
ensures p1 && p2 ==> p2
{}
/* Let's introduce one more logical connective: `p <==> q`, "p if and only if q"
* (also written "iff" and pronounced "if and only if"). This has the same truth
* value as `p == q`. The whole thing is sometimes called a "biconditional".
* This rule is a little like modus ponens but requiring the implication is
* stronger than needed. */
lemma ProveFromBiconditional(p1: bool, p2: bool)
requires p1
requires p1 <==> p2
ensures p2
{}
/* Simplifying and comprehending logical expressions is something you'll
* gradually get practice with. It can get quite complicated! */
lemma SomeEquivalences(p1: bool, p2: bool)
ensures ((p1 ==> p2) && p1) ==> p2
// !p2 ==> !p1 is called the "contrapositive" of p1 ==> p2. It has the same
// truth value.
ensures (p1 ==> p2) <==> (!p2 ==> !p1)
ensures !(p1 ==> !p2) <==> p1 && p2
ensures ((p1 ==> p2) && (!p1 ==> p2)) <==> p2
// you might want to think about this one:
ensures (!p1 || (p1 ==> p2)) <==> (p1 ==> p2)
{}
lemma SomeMoreEquivalences(p1: bool, p2: bool, p3: bool)
// note on parsing: <==> has the lowest priority, so all of these statements are
// equivalences at the top level
ensures (p1 && p2) && p3 <==> p1 && p2 && p3
// this is what chained implications mean
ensures p1 ==> p2 ==> p3 <==> p1 && p2 ==> p3
ensures p1 ==> (p2 ==> p3) <==> p1 && p2 ==> p3
{}
/* Quantifiers */
/* To express and state more interesting properties, we'll need quantifiers -
* that is, forall and exists. Dafny supports these as a way to write
* propositions, and they produce a boolean value just like the other logical
* connectives. */
lemma AdditionCommutesAsForall()
{
// (ignore the warning "No terms found to trigger on")
// Just to emphasize this is a proposition (a boolean) just like everything
// else we've seen. The big difference is that this forall is clearly not a
// boolean we could evaluate in the normal sense of running it to produce true
// or false - nonetheless Dafny can reason about it mathematically.
var does_addition_commute: bool := forall n: int, m: int :: n + m == m + n;
}
/* In order to illustrate some properties of forall, we'll introduce some
* arbitrary _predicates_ over integers to put in our examples. By not putting a
* body we tell Dafny to define these terms, but not to assume anything about their
* values except that they are deterministic. */
predicate P(x: int)
predicate Q(x: int)
// This is a predicate over two integers, often called a relation. You might
// also hear propositions, predicates, and predicates over multiple values all
// called relations - propositions are just 0-arity and predicates are 1-arity.
predicate R(x: int, y: int)
/* One operation you'll eventually want some fluency in is the ability to negate
* logical expressions. Let's go through the rules. */
lemma SimplifyingNegations(p: bool, q: bool)
ensures !(p && q) <==> !p || !q
ensures !(p || q) <==> !p && !q
ensures !(p ==> q) <==> p && !q
ensures !!p <==> p
ensures !(forall x :: P(x)) <==> (exists x :: !P(x))
ensures !(exists x :: P(x)) <==> (forall x :: !P(x))
{}
/* Dafny supports a "where" clause in a forall. It's a shorthand for implication. */
lemma WhereIsJustImplies()
// we need parentheses around each side for this to have the desired meaning
ensures (forall x | P(x) :: Q(x)) <==> (forall x :: P(x) ==> Q(x))
{}
lemma NotForallWhere()
ensures !(forall x | P(x) :: Q(x)) <==> exists x :: P(x) && !Q(x)
{}
/* Dafny also supports a "where" clause in an exists, as a shorthand for &&. */
lemma ExistsWhereIsJustAnd()
// we need parentheses around each side for this to have the desired meaning
ensures (exists x | P(x) :: Q(x)) <==> (exists x :: P(x) && Q(x))
// Why this choice? It's so that the following property holds. Notice that for
// all the negation rules we reverse && and ||, and exists and forall; this
// preserves that _duality_ (a formal and pervasive concept in math and
// logic).
ensures !(forall x | P(x) :: Q(x)) <==> (exists x | P(x) :: !Q(x))
{}
|
332 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_pregel algorithms_skeleton_nondet-permutation.dfy | module Permutation
{
/**
* Given n >= 0, generate a permuation of {0,...,n-1} nondeterministically.
*/
method Generate(n: int) returns (perm: array<int>)
requires n >= 0
ensures perm != null
ensures perm.Length == n
ensures fresh(perm)
ensures isValid(perm, n)
{
var all := set x | 0 <= x < n;
var used := {};
perm := new int[n];
CardinalityLemma(n, all);
while used < all
invariant used <= all
invariant |used| <= |all|
invariant forall i | 0 <= i < |used| :: perm[i] in used
invariant distinct'(perm, |used|)
decreases |all| - |used|
{
CardinalityOrderingLemma(used, all);
var dst :| dst in all && dst !in used;
perm[|used|] := dst;
used := used + {dst};
}
assert used == all;
print perm;
}
predicate isValid(a: array<int>, n: nat)
requires a != null && a.Length == n
reads a
{
assume forall i | 0 <= i < n :: i in a[..];
distinct(a)
&& (forall i | 0 <= i < a.Length :: 0 <= a[i] < n)
&& (forall i | 0 <= i < n :: i in a[..])
}
predicate distinct(a: array<int>)
requires a != null
reads a
{
distinct'(a, a.Length)
}
predicate distinct'(a: array<int>, n: int)
requires a != null
requires a.Length >= n
reads a
{
forall i,j | 0 <= i < n && 0 <= j < n && i != j :: a[i] != a[j]
}
lemma CardinalityLemma (size: int, s: set<int>)
requires size >= 0
requires s == set x | 0 <= x < size
ensures size == |s|
{
if(size == 0) {
assert size == |(set x | 0 <= x < size)|;
} else {
CardinalityLemma(size - 1, s - {size - 1});
}
}
lemma CardinalityOrderingLemma<T> (s1: set<T>, s2: set<T>)
requires s1 < s2
ensures |s1| < |s2|
{
var e :| e in s2 - s1;
if s1 != s2 - {e} {
CardinalityOrderingLemma(s1, s2 - {e});
}
}
lemma SetDiffLemma<T> (s1: set<T>, s2: set<T>)
requires s1 < s2
ensures s2 - s1 != {}
{
var e :| e in s2 - s1;
if s2 - s1 != {e} {} // What does Dafny prove here???
}
}
| module Permutation
{
/**
* Given n >= 0, generate a permuation of {0,...,n-1} nondeterministically.
*/
method Generate(n: int) returns (perm: array<int>)
requires n >= 0
ensures perm != null
ensures perm.Length == n
ensures fresh(perm)
ensures isValid(perm, n)
{
var all := set x | 0 <= x < n;
var used := {};
perm := new int[n];
CardinalityLemma(n, all);
while used < all
{
CardinalityOrderingLemma(used, all);
var dst :| dst in all && dst !in used;
perm[|used|] := dst;
used := used + {dst};
}
print perm;
}
predicate isValid(a: array<int>, n: nat)
requires a != null && a.Length == n
reads a
{
assume forall i | 0 <= i < n :: i in a[..];
distinct(a)
&& (forall i | 0 <= i < a.Length :: 0 <= a[i] < n)
&& (forall i | 0 <= i < n :: i in a[..])
}
predicate distinct(a: array<int>)
requires a != null
reads a
{
distinct'(a, a.Length)
}
predicate distinct'(a: array<int>, n: int)
requires a != null
requires a.Length >= n
reads a
{
forall i,j | 0 <= i < n && 0 <= j < n && i != j :: a[i] != a[j]
}
lemma CardinalityLemma (size: int, s: set<int>)
requires size >= 0
requires s == set x | 0 <= x < size
ensures size == |s|
{
if(size == 0) {
} else {
CardinalityLemma(size - 1, s - {size - 1});
}
}
lemma CardinalityOrderingLemma<T> (s1: set<T>, s2: set<T>)
requires s1 < s2
ensures |s1| < |s2|
{
var e :| e in s2 - s1;
if s1 != s2 - {e} {
CardinalityOrderingLemma(s1, s2 - {e});
}
}
lemma SetDiffLemma<T> (s1: set<T>, s2: set<T>)
requires s1 < s2
ensures s2 - s1 != {}
{
var e :| e in s2 - s1;
if s2 - s1 != {e} {} // What does Dafny prove here???
}
}
|
333 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_vampire project_original_Searching.dfy | // Assuming Array is Object Blood
// Blood Array<int>
// index
method Find(blood: array<int>, key: int) returns (index: int)
requires blood != null
ensures 0 <= index ==> index < blood.Length && blood[index] == key
ensures index < 0 ==> forall k :: 0 <= k < blood.Length ==> blood[k] != key
{
index := 0;
while index < blood.Length
invariant 0 <= index <= blood.Length
invariant forall k :: 0 <= k < index ==> blood[k] != key
{
if blood[index] == key { return; }
index := index + 1;
}
index := -1;
}
| // Assuming Array is Object Blood
// Blood Array<int>
// index
method Find(blood: array<int>, key: int) returns (index: int)
requires blood != null
ensures 0 <= index ==> index < blood.Length && blood[index] == key
ensures index < 0 ==> forall k :: 0 <= k < blood.Length ==> blood[k] != key
{
index := 0;
while index < blood.Length
{
if blood[index] == key { return; }
index := index + 1;
}
index := -1;
}
|
334 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_variant examples_KatzManna.dfy | // RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
method NinetyOne(x: int, ghost proveFunctionalPostcondition: bool) returns (z: int)
ensures proveFunctionalPostcondition ==> z == if x > 101 then x-10 else 91;
{
var y1 := x;
var y2 := 1;
while (true)
// the following two invariants are needed only to prove the postcondition
invariant proveFunctionalPostcondition ==> 100 < x ==> y1 == x;
invariant proveFunctionalPostcondition ==> x <= 100 < y1 && y2 == 1 ==> y1 == 101;
// the following two lines justify termination, as in the paper by Katz and Manna
invariant (y1 <= 111 && y2 >= 1) || (y1 == x && y2 == 1);
decreases -2*y1 + 21*y2 + 2*(if x < 111 then 111 else x);
{
if (y1 > 100) {
if (y2 == 1) {
break;
} else {
y1 := y1 - 10;
y2 := y2 - 1;
}
} else {
y1 := y1 + 11;
y2 := y2 + 1;
}
}
z := y1 - 10;
}
method Gcd(x1: int, x2: int)
requires 1 <= x1 && 1 <= x2;
{
var y1 := x1;
var y2 := x2;
while (y1 != y2)
invariant 1 <= y1 && 1 <= y2;
decreases y1 + y2;
{
while (y1 > y2)
invariant 1 <= y1 && 1 <= y2;
{
y1 := y1 - y2;
}
while (y2 > y1)
invariant 1 <= y1 && 1 <= y2;
{
y2 := y2 - y1;
}
}
}
method Determinant(X: array2<int>, M: int) returns (z: int)
requires 1 <= M;
requires X != null && M == X.Length0 && M == X.Length1;
modifies X;
{
var y := X[1-1,1-1];
var a := 1;
while (a != M)
invariant 1 <= a <= M;
{
var b := a + 1;
while (b != M+1)
invariant a+1 <= b <= M+1;
{
var c := M;
while (c != a)
invariant a <= c <= M;
{
assume X[a-1,a-1] != 0;
X[b-1, c-1] := X[b-1,c-1] - X[b-1,a-1] / X[a-1,a-1] * X[a-1,c-1];
c := c - 1;
}
b := b + 1;
}
a := a + 1;
y := y * X[a-1,a-1];
}
z := y;
}
| // RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
method NinetyOne(x: int, ghost proveFunctionalPostcondition: bool) returns (z: int)
ensures proveFunctionalPostcondition ==> z == if x > 101 then x-10 else 91;
{
var y1 := x;
var y2 := 1;
while (true)
// the following two invariants are needed only to prove the postcondition
// the following two lines justify termination, as in the paper by Katz and Manna
{
if (y1 > 100) {
if (y2 == 1) {
break;
} else {
y1 := y1 - 10;
y2 := y2 - 1;
}
} else {
y1 := y1 + 11;
y2 := y2 + 1;
}
}
z := y1 - 10;
}
method Gcd(x1: int, x2: int)
requires 1 <= x1 && 1 <= x2;
{
var y1 := x1;
var y2 := x2;
while (y1 != y2)
{
while (y1 > y2)
{
y1 := y1 - y2;
}
while (y2 > y1)
{
y2 := y2 - y1;
}
}
}
method Determinant(X: array2<int>, M: int) returns (z: int)
requires 1 <= M;
requires X != null && M == X.Length0 && M == X.Length1;
modifies X;
{
var y := X[1-1,1-1];
var a := 1;
while (a != M)
{
var b := a + 1;
while (b != M+1)
{
var c := M;
while (c != a)
{
assume X[a-1,a-1] != 0;
X[b-1, c-1] := X[b-1,c-1] - X[b-1,a-1] / X[a-1,a-1] * X[a-1,c-1];
c := c - 1;
}
b := b + 1;
}
a := a + 1;
y := y * X[a-1,a-1];
}
z := y;
}
|
335 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_variant examples_SumOfCubes.dfy | // RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
class SumOfCubes {
static function SumEmUp(n: int, m: int): int
requires 0 <= n && n <= m;
decreases m - n;
{
if m == n then 0 else n*n*n + SumEmUp(n+1, m)
}
static method Socu(n: int, m: int) returns (r: int)
requires 0 <= n && n <= m;
ensures r == SumEmUp(n, m);
{
var a := SocuFromZero(m);
var b := SocuFromZero(n);
r := a - b;
Lemma0(n, m);
}
static method SocuFromZero(k: int) returns (r: int)
requires 0 <= k;
ensures r == SumEmUp(0, k);
{
var g := Gauss(k);
r := g * g;
Lemma1(k);
}
ghost static method Lemma0(n: int, m: int)
requires 0 <= n && n <= m;
ensures SumEmUp(n, m) == SumEmUp(0, m) - SumEmUp(0, n);
{
var k := n;
while (k < m)
invariant n <= k && k <= m;
invariant SumEmDown(0, n) + SumEmDown(n, k) == SumEmDown(0, k);
{
k := k + 1;
}
Lemma3(0, n);
Lemma3(n, k);
Lemma3(0, k);
}
static function GSum(k: int): int
requires 0 <= k;
{
if k == 0 then 0 else GSum(k-1) + k-1
}
static method Gauss(k: int) returns (r: int)
requires 0 <= k;
ensures r == GSum(k);
{
r := k * (k - 1) / 2;
Lemma2(k);
}
ghost static method Lemma1(k: int)
requires 0 <= k;
ensures SumEmUp(0, k) == GSum(k) * GSum(k);
{
var i := 0;
while (i < k)
invariant i <= k;
invariant SumEmDown(0, i) == GSum(i) * GSum(i);
{
Lemma2(i);
i := i + 1;
}
Lemma3(0, k);
}
ghost static method Lemma2(k: int)
requires 0 <= k;
ensures 2 * GSum(k) == k * (k - 1);
{
var i := 0;
while (i < k)
invariant i <= k;
invariant 2 * GSum(i) == i * (i - 1);
{
i := i + 1;
}
}
static function SumEmDown(n: int, m: int): int
requires 0 <= n && n <= m;
{
if m == n then 0 else SumEmDown(n, m-1) + (m-1)*(m-1)*(m-1)
}
ghost static method Lemma3(n: int, m: int)
requires 0 <= n && n <= m;
ensures SumEmUp(n, m) == SumEmDown(n, m);
{
var k := n;
while (k < m)
invariant n <= k && k <= m;
invariant SumEmUp(n, m) == SumEmDown(n, k) + SumEmUp(k, m);
{
k := k + 1;
}
}
}
| // RUN: %dafny /compile:0 /dprint:"%t.dprint" "%s" > "%t"
// RUN: %diff "%s.expect" "%t"
class SumOfCubes {
static function SumEmUp(n: int, m: int): int
requires 0 <= n && n <= m;
{
if m == n then 0 else n*n*n + SumEmUp(n+1, m)
}
static method Socu(n: int, m: int) returns (r: int)
requires 0 <= n && n <= m;
ensures r == SumEmUp(n, m);
{
var a := SocuFromZero(m);
var b := SocuFromZero(n);
r := a - b;
Lemma0(n, m);
}
static method SocuFromZero(k: int) returns (r: int)
requires 0 <= k;
ensures r == SumEmUp(0, k);
{
var g := Gauss(k);
r := g * g;
Lemma1(k);
}
ghost static method Lemma0(n: int, m: int)
requires 0 <= n && n <= m;
ensures SumEmUp(n, m) == SumEmUp(0, m) - SumEmUp(0, n);
{
var k := n;
while (k < m)
{
k := k + 1;
}
Lemma3(0, n);
Lemma3(n, k);
Lemma3(0, k);
}
static function GSum(k: int): int
requires 0 <= k;
{
if k == 0 then 0 else GSum(k-1) + k-1
}
static method Gauss(k: int) returns (r: int)
requires 0 <= k;
ensures r == GSum(k);
{
r := k * (k - 1) / 2;
Lemma2(k);
}
ghost static method Lemma1(k: int)
requires 0 <= k;
ensures SumEmUp(0, k) == GSum(k) * GSum(k);
{
var i := 0;
while (i < k)
{
Lemma2(i);
i := i + 1;
}
Lemma3(0, k);
}
ghost static method Lemma2(k: int)
requires 0 <= k;
ensures 2 * GSum(k) == k * (k - 1);
{
var i := 0;
while (i < k)
{
i := i + 1;
}
}
static function SumEmDown(n: int, m: int): int
requires 0 <= n && n <= m;
{
if m == n then 0 else SumEmDown(n, m-1) + (m-1)*(m-1)*(m-1)
}
ghost static method Lemma3(n: int, m: int)
requires 0 <= n && n <= m;
ensures SumEmUp(n, m) == SumEmDown(n, m);
{
var k := n;
while (k < m)
{
k := k + 1;
}
}
}
|
336 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_verified algorithms_inductive_props.dfy | // This file demonstrates how to "close" a critical "gap" between definitions
// between Dafny and Coq.
// In general, most commonly-used "building blocks" in Coq can be mapped to Dafny:
// [Coq] [Dafny]
// --------------------------------------------------------------------
// Inductive (Set) datatype
// Definition function/predicate
// Fixpoint function/predicate (with `decreases`)
// Theorem & Proof lemma
// Type (Set, e.g. `list nat`) still a type (e.g. `seq<nat>`)
// Type (Prop, e.g. `1+1==2`) encode in `requires` or `ensures`
// N/A (at least NOT built-in) method (imperative programming)
//
// Inductive (Prop) ??? (discussed in this file)
// Dafny's way to define Coq's `Fixpoint` predicate:
ghost predicate even(n: nat) {
match n {
case 0 => true
case 1 => false
case _ => even(n - 2)
}
}
// all below are automatically proved:
lemma a0() ensures even(4) {}
lemma a1() ensures !even(3) {}
lemma a2(n: nat) requires even(n) ensures even(n + 2) {}
lemma a3(n: nat) requires even(n + 2) ensures even(n) {}
// Dafny lacks syntax to define `Inductive` Prop like in Coq.
// We'll show two workarounds for this.
// Workaround 1: simulate with "rules"
datatype EvenRule =
| ev_0
| ev_SS(r: EvenRule)
{
ghost function apply(): nat {
match this {
case ev_0 => 0
case ev_SS(r) => r.apply() + 2
}
}
}
ghost predicate Even(n: nat) {
exists r: EvenRule :: r.apply() == n
}
// then we can prove by "constructing" or "destructing" just like in Coq:
lemma b0() ensures Even(4) {
assert ev_SS(ev_SS(ev_0)).apply() == 4;
}
lemma b1() ensures !Even(3) {
if r: EvenRule :| r.apply() == 3 {
assert r.ev_SS? && r.r.apply() == 1;
}
}
lemma b2(n: nat) requires Even(n) ensures Even(n + 2) {
var r: EvenRule :| r.apply() == n;
assert ev_SS(r).apply() == n + 2;
}
lemma b3(n: nat) requires Even(n + 2) ensures Even(n) {
var r: EvenRule :| r.apply() == n + 2;
assert r.ev_SS? && r.r.apply() == n;
}
// Workaround 2: using "higher-order" predicates
type P = nat -> bool
ghost predicate Ev(ev: P) {
&& ev(0)
&& (forall n: nat | ev(n) :: ev(n + 2))
}
// we explicitly say that `ev` is the "strictest" `P` that satisfies `Ev`:
ghost predicate Minimal(Ev: P -> bool, ev: P) {
&& Ev(ev)
&& (forall ev': P, n: nat | Ev(ev') :: ev(n) ==> ev'(n))
}
// In this approach, some lemmas are a bit tricky to prove...
lemma c0(ev: P) requires Minimal(Ev, ev) ensures ev(4) {
assert ev(2);
}
lemma c1(ev: P) requires Minimal(Ev, ev) ensures !ev(3) {
var cex := (n: nat) => ( // `cex` stands for "counterexample"
n != 1 && n != 3
);
assert Ev(cex);
}
lemma c2(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n) ensures ev(n + 2) {}
lemma c3(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n + 2) ensures ev(n) {
if !ev(n) {
var cex := (m: nat) => (
m != n + 2 && ev(m)
);
assert Ev(cex);
}
}
// Finally, we "circularly" prove the equivalence among these three:
lemma a_implies_b(n: nat) requires even(n) ensures Even(n) {
if n == 0 {
assert ev_0.apply() == 0;
} else {
a_implies_b(n - 2);
var r: EvenRule :| r.apply() == n - 2;
assert ev_SS(r).apply() == n;
}
}
lemma b_implies_c(ev: P, n: nat) requires Minimal(Ev, ev) && Even(n) ensures ev(n) {
var r: EvenRule :| r.apply() == n;
if r.ev_SS? {
assert r.r.apply() == n - 2;
b_implies_c(ev, n - 2);
}
}
lemma c_implies_a(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n) ensures even(n) {
if n == 1 {
var cex := (m: nat) => (
m != 1
);
assert Ev(cex);
} else if n >= 2 {
c3(ev, n - 2);
c_implies_a(ev, n - 2);
}
}
| // This file demonstrates how to "close" a critical "gap" between definitions
// between Dafny and Coq.
// In general, most commonly-used "building blocks" in Coq can be mapped to Dafny:
// [Coq] [Dafny]
// --------------------------------------------------------------------
// Inductive (Set) datatype
// Definition function/predicate
// Fixpoint function/predicate (with `decreases`)
// Theorem & Proof lemma
// Type (Set, e.g. `list nat`) still a type (e.g. `seq<nat>`)
// Type (Prop, e.g. `1+1==2`) encode in `requires` or `ensures`
// N/A (at least NOT built-in) method (imperative programming)
//
// Inductive (Prop) ??? (discussed in this file)
// Dafny's way to define Coq's `Fixpoint` predicate:
ghost predicate even(n: nat) {
match n {
case 0 => true
case 1 => false
case _ => even(n - 2)
}
}
// all below are automatically proved:
lemma a0() ensures even(4) {}
lemma a1() ensures !even(3) {}
lemma a2(n: nat) requires even(n) ensures even(n + 2) {}
lemma a3(n: nat) requires even(n + 2) ensures even(n) {}
// Dafny lacks syntax to define `Inductive` Prop like in Coq.
// We'll show two workarounds for this.
// Workaround 1: simulate with "rules"
datatype EvenRule =
| ev_0
| ev_SS(r: EvenRule)
{
ghost function apply(): nat {
match this {
case ev_0 => 0
case ev_SS(r) => r.apply() + 2
}
}
}
ghost predicate Even(n: nat) {
exists r: EvenRule :: r.apply() == n
}
// then we can prove by "constructing" or "destructing" just like in Coq:
lemma b0() ensures Even(4) {
}
lemma b1() ensures !Even(3) {
if r: EvenRule :| r.apply() == 3 {
}
}
lemma b2(n: nat) requires Even(n) ensures Even(n + 2) {
var r: EvenRule :| r.apply() == n;
}
lemma b3(n: nat) requires Even(n + 2) ensures Even(n) {
var r: EvenRule :| r.apply() == n + 2;
}
// Workaround 2: using "higher-order" predicates
type P = nat -> bool
ghost predicate Ev(ev: P) {
&& ev(0)
&& (forall n: nat | ev(n) :: ev(n + 2))
}
// we explicitly say that `ev` is the "strictest" `P` that satisfies `Ev`:
ghost predicate Minimal(Ev: P -> bool, ev: P) {
&& Ev(ev)
&& (forall ev': P, n: nat | Ev(ev') :: ev(n) ==> ev'(n))
}
// In this approach, some lemmas are a bit tricky to prove...
lemma c0(ev: P) requires Minimal(Ev, ev) ensures ev(4) {
}
lemma c1(ev: P) requires Minimal(Ev, ev) ensures !ev(3) {
var cex := (n: nat) => ( // `cex` stands for "counterexample"
n != 1 && n != 3
);
}
lemma c2(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n) ensures ev(n + 2) {}
lemma c3(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n + 2) ensures ev(n) {
if !ev(n) {
var cex := (m: nat) => (
m != n + 2 && ev(m)
);
}
}
// Finally, we "circularly" prove the equivalence among these three:
lemma a_implies_b(n: nat) requires even(n) ensures Even(n) {
if n == 0 {
} else {
a_implies_b(n - 2);
var r: EvenRule :| r.apply() == n - 2;
}
}
lemma b_implies_c(ev: P, n: nat) requires Minimal(Ev, ev) && Even(n) ensures ev(n) {
var r: EvenRule :| r.apply() == n;
if r.ev_SS? {
b_implies_c(ev, n - 2);
}
}
lemma c_implies_a(ev: P, n: nat) requires Minimal(Ev, ev) && ev(n) ensures even(n) {
if n == 1 {
var cex := (m: nat) => (
m != 1
);
} else if n >= 2 {
c3(ev, n - 2);
c_implies_a(ev, n - 2);
}
}
|
337 | Program-Verification-Dataset_tmp_tmpgbdrlnu__Dafny_verified algorithms_lol_sort.dfy | // By `lol sort` here, I refer to a seemingly-broken sorting algorithm,
// which actually somehow manages to work perfectly:
//
// for i in 0..n
// for j in 0..n
// if i < j
// swap a[i], a[j]
//
// It is perhaps the simpliest sorting algorithm to "memorize",
// even "symmetrically beautiful" as if `i` and `j` just played highly
// similar roles. And technically it's still O(n^2) time lol...
//
// Proving its correctness is tricky (interesting) though.
// Successfully verified with [Dafny 3.3.0.31104] in about 5 seconds.
// We define "valid permutation" using multiset:
predicate valid_permut(a: seq<int>, b: seq<int>)
requires |a| == |b|
{
multiset(a) == multiset(b)
}
// This is a swap-based sorting algorithm, so permutedness is trivial:
// note that: if i == j, the spec just says a[..] remains the same.
method swap(a: array<int>, i: int, j: int)
requires 0 <= i < a.Length && 0 <= j < a.Length
modifies a
ensures a[..] == old(a[..]) [i := old(a[j])] [j := old(a[i])]
ensures valid_permut(a[..], old(a[..]))
{
a[i], a[j] := a[j], a[i];
}
// We then define "sorted" (by increasing order):
predicate sorted(a: seq<int>)
{
forall i, j | 0 <= i <= j < |a| :: a[i] <= a[j]
}
// Now, the lol sort algorithm:
// (Some invariants were tricky to find, but Dafny was smart enough otherwise)
method lol_sort(a: array<int>)
modifies a
ensures valid_permut(a[..], old(a[..]))
ensures sorted(a[..])
{
for i := 0 to a.Length
invariant valid_permut(a[..], old(a[..]))
invariant sorted(a[..i])
{
for j := 0 to a.Length
invariant valid_permut(a[..], old(a[..]))
invariant j < i ==> forall k | 0 <= k < j :: a[k] <= a[i]
invariant j < i ==> sorted(a[..i])
invariant j >= i ==> sorted(a[..i+1])
{
if a[i] < a[j] {
swap(a, i, j);
}
}
}
}
method Main() {
var a := new int[] [3,1,4,1,5,9,2,6];
lol_sort(a);
print a[..];
// `expect` is a run-time assert, more suitable than `assert` on complicated testcases:
expect a[..] == [1,1,2,3,4,5,6,9];
var empty := new int[] [];
lol_sort(empty);
assert empty[..] == [];
}
| // By `lol sort` here, I refer to a seemingly-broken sorting algorithm,
// which actually somehow manages to work perfectly:
//
// for i in 0..n
// for j in 0..n
// if i < j
// swap a[i], a[j]
//
// It is perhaps the simpliest sorting algorithm to "memorize",
// even "symmetrically beautiful" as if `i` and `j` just played highly
// similar roles. And technically it's still O(n^2) time lol...
//
// Proving its correctness is tricky (interesting) though.
// Successfully verified with [Dafny 3.3.0.31104] in about 5 seconds.
// We define "valid permutation" using multiset:
predicate valid_permut(a: seq<int>, b: seq<int>)
requires |a| == |b|
{
multiset(a) == multiset(b)
}
// This is a swap-based sorting algorithm, so permutedness is trivial:
// note that: if i == j, the spec just says a[..] remains the same.
method swap(a: array<int>, i: int, j: int)
requires 0 <= i < a.Length && 0 <= j < a.Length
modifies a
ensures a[..] == old(a[..]) [i := old(a[j])] [j := old(a[i])]
ensures valid_permut(a[..], old(a[..]))
{
a[i], a[j] := a[j], a[i];
}
// We then define "sorted" (by increasing order):
predicate sorted(a: seq<int>)
{
forall i, j | 0 <= i <= j < |a| :: a[i] <= a[j]
}
// Now, the lol sort algorithm:
// (Some invariants were tricky to find, but Dafny was smart enough otherwise)
method lol_sort(a: array<int>)
modifies a
ensures valid_permut(a[..], old(a[..]))
ensures sorted(a[..])
{
for i := 0 to a.Length
{
for j := 0 to a.Length
{
if a[i] < a[j] {
swap(a, i, j);
}
}
}
}
method Main() {
var a := new int[] [3,1,4,1,5,9,2,6];
lol_sort(a);
print a[..];
// `expect` is a run-time assert, more suitable than `assert` on complicated testcases:
expect a[..] == [1,1,2,3,4,5,6,9];
var empty := new int[] [];
lol_sort(empty);
}
|
338 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_04_Hoangkim_ex_04_Hoangkim.dfy | //Problem 01
method sumOdds(n: nat) returns (sum: nat)
requires n > 0;
ensures sum == n * n;
{
sum := 1;
var i := 0;
while i < n-1
invariant 0 <= i < n;
invariant sum == (i + 1) * (i + 1);
{
i := i + 1;
sum := sum + 2 * i + 1;
}
assert sum == n * n;
}
//problem02
//a)
method intDiv(n:int, d:int) returns (q:int, r:int)
requires n >= d && n >= 0 && d > 0 ;
ensures (d*q)+r == n && 0 <= q <= n/2 && 0 <= r < d;
//b)c)
method intDivImpl(n:int, d:int) returns (q:int, r:int)
requires n >= d && n >= 0 && d > 0;
ensures (d*q)+r == n && 0 <= q <= n/2 && 0 <= r < d;
{
q := 0;
r := n;
while r >= d
invariant r == n - q * d;
invariant d <= r;
r := r-1;
{
r := r - d;
q := q + 1;
}
assert n == (d * q) + r;
}
| //Problem 01
method sumOdds(n: nat) returns (sum: nat)
requires n > 0;
ensures sum == n * n;
{
sum := 1;
var i := 0;
while i < n-1
{
i := i + 1;
sum := sum + 2 * i + 1;
}
}
//problem02
//a)
method intDiv(n:int, d:int) returns (q:int, r:int)
requires n >= d && n >= 0 && d > 0 ;
ensures (d*q)+r == n && 0 <= q <= n/2 && 0 <= r < d;
//b)c)
method intDivImpl(n:int, d:int) returns (q:int, r:int)
requires n >= d && n >= 0 && d > 0;
ensures (d*q)+r == n && 0 <= q <= n/2 && 0 <= r < d;
{
q := 0;
r := n;
while r >= d
r := r-1;
{
r := r - d;
q := q + 1;
}
}
|
339 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_05_Hoangkim_ex_05_Hoangkim.dfy |
//Problem01
function fib(n: nat):nat
{
if n < 2 then n else fib(n-2)+fib(n-1)
}
method fibIter(n:nat) returns (a:nat)
requires n > 0
ensures a == fib(n)
{
a := 0;
var b,x := 1,0;
while x < n
invariant 0 <= x <= n
invariant a == fib(x)
invariant b == fib(x+1)
{
a,b := b,a+b;
//why a,b := b,a+b is okay
//but when I write a := b; //# Because this
// b := a+b; //# is not the same !!
//is error? //# {a = 1 , b = 2 } a := b ; b := a+b { b = 4 }, but
x := x+1; //# {a = 1 , b = 2 } a, b := b,a+b { b = 3 }
}
assert a == fib(n);
}
//# 2 pts
//Problem02
function fact(n:nat):nat
{if n==0 then 1 else n*fact(n-1)}
method factIter(n:nat) returns (a:nat)
requires n >= 0;
ensures a == fact(n)
{
a := 1;
var i := 1;
while i <= n
invariant 1 <= i <= n+1
invariant a == fact(i-1)
{
a := a * i;
i := i + 1;
}
assert a == fact(n);
}
//# 3 pts
//Problem03
function gcd(m: nat, n: nat): nat
requires m > 0 && n > 0
{
if m == n then m
else if m > n then gcd(m - n, n)
else gcd(m, n - m)
}
method gcdI(m: int, n: int) returns (g: int)
requires m > 0 && n > 0
ensures g == gcd(m, n);
{
var x: int;
g := m;
x := n;
while (g != x)
invariant x > 0;
invariant g > 0;
invariant gcd(g, x) == gcd(m, n);
decreases x+g;
{
if (g > x)
{
g := g - x;
}
else
{
x := x - g;
}
}
}
//# 3 pts
// # sum: 9 pts
|
//Problem01
function fib(n: nat):nat
{
if n < 2 then n else fib(n-2)+fib(n-1)
}
method fibIter(n:nat) returns (a:nat)
requires n > 0
ensures a == fib(n)
{
a := 0;
var b,x := 1,0;
while x < n
{
a,b := b,a+b;
//why a,b := b,a+b is okay
//but when I write a := b; //# Because this
// b := a+b; //# is not the same !!
//is error? //# {a = 1 , b = 2 } a := b ; b := a+b { b = 4 }, but
x := x+1; //# {a = 1 , b = 2 } a, b := b,a+b { b = 3 }
}
}
//# 2 pts
//Problem02
function fact(n:nat):nat
{if n==0 then 1 else n*fact(n-1)}
method factIter(n:nat) returns (a:nat)
requires n >= 0;
ensures a == fact(n)
{
a := 1;
var i := 1;
while i <= n
{
a := a * i;
i := i + 1;
}
}
//# 3 pts
//Problem03
function gcd(m: nat, n: nat): nat
requires m > 0 && n > 0
{
if m == n then m
else if m > n then gcd(m - n, n)
else gcd(m, n - m)
}
method gcdI(m: int, n: int) returns (g: int)
requires m > 0 && n > 0
ensures g == gcd(m, n);
{
var x: int;
g := m;
x := n;
while (g != x)
{
if (g > x)
{
g := g - x;
}
else
{
x := x - g;
}
}
}
//# 3 pts
// # sum: 9 pts
|
340 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_06_Hoangkim_ex06-solution.dfy | ghost function gcd(x:int,y:int):int
requires x > 0 && y > 0
{
if x==y then x
else if x > y then gcd(x-y,y)
else gcd(x,y-x)
}
method gcdI(m:int, n:int) returns (d:int)
requires m > 0 && n > 0
ensures d == gcd(m,n)
{
var x,y := m,n;
d := 1;
while x != y
decreases x+y
invariant x > 0 && y > 0
invariant gcd(x,y) == gcd(m,n)
{ if x > y { x := x-y; } else { y := y-x; }
}
d := x;
}
ghost function gcd'(x:int,y:int):int
requires x > 0 && y > 0
decreases x+y,y // x+y decreases or x+y remains unchanged while y decreases
{
if x==y then x
else if x > y then gcd'(x-y,y)
else gcd'(y,x)
}
| ghost function gcd(x:int,y:int):int
requires x > 0 && y > 0
{
if x==y then x
else if x > y then gcd(x-y,y)
else gcd(x,y-x)
}
method gcdI(m:int, n:int) returns (d:int)
requires m > 0 && n > 0
ensures d == gcd(m,n)
{
var x,y := m,n;
d := 1;
while x != y
{ if x > y { x := x-y; } else { y := y-x; }
}
d := x;
}
ghost function gcd'(x:int,y:int):int
requires x > 0 && y > 0
{
if x==y then x
else if x > y then gcd'(x-y,y)
else gcd'(y,x)
}
|
341 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_06_Hoangkim_ex_06_hoangkim.dfy |
//Problem01
//a)
ghost function gcd(x: int, y: int): int
requires x > 0 && y > 0
{
if x == y then x
else if x > y then gcd(x - y, y)
else gcd(x, y - x)
}
method gcdI(m: int, n: int) returns (d: int)
requires m > 0 && n > 0
ensures d == gcd(m, n);
{
var x: int;
d := m;
x := n;
while (d != x)
invariant x > 0;
invariant d > 0;
invariant gcd(d, x) == gcd(m, n);
decreases x+d;
{
if (d > x)
{
d := d - x;
}
else
{
x := x - d;
}
}
}
//b)
ghost function gcd'(x: int, y: int): int
requires x > 0 && y > 0
decreases if x > y then x else y
{
if x == y then x
else if x > y then gcd'(x - y, y)
else gcd(y, x)
}
|
//Problem01
//a)
ghost function gcd(x: int, y: int): int
requires x > 0 && y > 0
{
if x == y then x
else if x > y then gcd(x - y, y)
else gcd(x, y - x)
}
method gcdI(m: int, n: int) returns (d: int)
requires m > 0 && n > 0
ensures d == gcd(m, n);
{
var x: int;
d := m;
x := n;
while (d != x)
{
if (d > x)
{
d := d - x;
}
else
{
x := x - d;
}
}
}
//b)
ghost function gcd'(x: int, y: int): int
requires x > 0 && y > 0
{
if x == y then x
else if x > y then gcd'(x - y, y)
else gcd(y, x)
}
|
342 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_07_Hoangkim_ex07_Hoangkim.dfy | //Problem01
//a)
method swap(a: array<int>, i: nat, j: nat)
modifies a
requires a != null && a.Length > 0 && i < a.Length && j < a.Length
ensures a[i] == old(a[j])
ensures a[j] == old(a[i])
{
a[i], a[j] := a[j], a[i];
}
//b)
//Problem04
method FindMin(a: array<int>, lo: nat) returns (minIdx: nat)
requires a != null && a.Length > 0 && lo < a.Length
ensures lo <= minIdx < a.Length
ensures forall x :: lo <= x < a.Length ==> a[minIdx] <= a[x]
{
var j := lo;
minIdx := lo;
while j < a.Length
invariant lo <= j <= a.Length
invariant lo <= minIdx < a.Length
invariant forall k :: lo <= k < j ==> a[k] >= a[minIdx]
decreases a.Length - j
{
if(a[j] < a[minIdx]) { minIdx := j; }
j := j + 1;
}
}
//Problem02
ghost predicate sorted(a:seq<int>)
{
forall i | 0 < i < |a| :: a[i-1] <= a[i]
}
method selectionSort(a: array<int>)
modifies a
//ensures multiset(a[..]) == multiset(old(a[..]))
//ensures sorted(a[..])
{
var i := 0;
while(i < a.Length)
invariant 0 <= i <= a.Length
invariant forall k, l :: 0 <= k < i <= l < a.Length ==> a[k] <= a[l]
invariant sorted(a[..i])
{
var mx := FindMin(a, i);
//swap(a,i,mx);
a[i], a[mx] := a[mx], a[i];
i := i + 1;
}
}
//Problem03
| //Problem01
//a)
method swap(a: array<int>, i: nat, j: nat)
modifies a
requires a != null && a.Length > 0 && i < a.Length && j < a.Length
ensures a[i] == old(a[j])
ensures a[j] == old(a[i])
{
a[i], a[j] := a[j], a[i];
}
//b)
//Problem04
method FindMin(a: array<int>, lo: nat) returns (minIdx: nat)
requires a != null && a.Length > 0 && lo < a.Length
ensures lo <= minIdx < a.Length
ensures forall x :: lo <= x < a.Length ==> a[minIdx] <= a[x]
{
var j := lo;
minIdx := lo;
while j < a.Length
{
if(a[j] < a[minIdx]) { minIdx := j; }
j := j + 1;
}
}
//Problem02
ghost predicate sorted(a:seq<int>)
{
forall i | 0 < i < |a| :: a[i-1] <= a[i]
}
method selectionSort(a: array<int>)
modifies a
//ensures multiset(a[..]) == multiset(old(a[..]))
//ensures sorted(a[..])
{
var i := 0;
while(i < a.Length)
{
var mx := FindMin(a, i);
//swap(a,i,mx);
a[i], a[mx] := a[mx], a[i];
i := i + 1;
}
}
//Problem03
|
343 | Programmverifikation-und-synthese_tmp_tmppurk6ime_PVS_Assignment_ex_10_Hoangkim_ex10_hoangkim.dfy | //Problem01
method square0(n:nat) returns (sqn : nat)
ensures sqn == n*n
{
sqn := 0;
var i:= 0;
var x;
while i < n
invariant i <= n && sqn == i*i
{
x := 2*i+1;
sqn := sqn+x;
i := i+1;
}
}
/*
3 Verification conditions
1. VC1: Precondiotion implies the loop variant
n ∈ ℕ => sqn = 0*0 ∧ i = 0 ∧ x=? ∧ i≤n
n >= 0 => 0 = 0*0 ∧ i = 0 ∧ i≤n
n >= 0 => 0 = 0*0 ∧ 0 ≤ n
2. VC2: Loop invariant and loop guard preserve the loop invariant.
VC2: i < n ∧ i+1 ≤ n ∧ sqn = i * i ⇒ sqn = sqn + x ∧ i = i + 1 ∧ x = 2 * i + 1
3.VC3: Loop terminates, and the loop invariant implies the postcondition.
VC3: ¬(i < n) ∧ i ≤ n ∧ sqn = i * i ⇒ sqn = n * n
Simplified VC for square0
1. true, since 0 = 0 and n >= 0 => 0 ≤ n
2. true, i < n => i + 1 <= n
3. true, ¬(i < n) ∧ i ≤ n ∧ sqn = i * i ⇒ sqn = n * n since ¬(i < n) ∧ i ≤ n imply i = n
*/
method square1(n:nat) returns (sqn : nat)
ensures sqn == n*n
{
sqn := 0;
var i:= 0;
while i < n
invariant i <= n && sqn == i*i
{
var x := 2*i+1;
sqn := sqn+x;
i := i+1;
}
}
//Problem02
//As you can see below, Dafny claims that after executing the following method
//strange() we will have that 1=2;
method q(x:nat, y:nat) returns (z:nat)
requires y - x > 2
ensures x < z*z < y
method strange()
ensures 1==2
{
var x := 4;
var c:nat := q(x,2*x);
}
/*(a). Do you have an explanation for this behaviour?
Answer:
the method strange() doesn't have any input or output. This method initializes
variable x with value 4. Then it calculates variable c as a result of calling
method 'q' with x as first var and 2*x as second var.the strange method does not
specify any postcondition. Therefore, we cannot make any assumptions about the
behavior or the value of c after calling q.
We can change ensures in strange() to false and it's still verified
*/
/*(b) {true}var x:nat := 4; var c := q(x,2*x); {1 = 2 }
precond in strange(): difference between 'y' and 'x' muss be greater than 2,
square from 'z' will be a value between 'x' and 'y'
apply the Hoare rules step by step:
1. {true} as a precondition
2. we assign 4 to 'x' and having {4=4}
3. assign value q(x, 2 * x) to c, substitute the postcondition of 'q' in place of 'c'
post cond of q will be x < z*z < 2*x. Replacing c we having {x < z * z < 2 * x}
4. we having the statement {x < z*z < 2*x} => {1 = 2} as postcondtion
as we know the statment {1 = 2} is always false. true => false is always false
*/
//Problem 3
//Use what you know about the weakest preconditions/strongest postconditions to ex-
//plain why the following code verifies:
method test0(){
var x:int := *;
assume x*x < 100;
assert x<= 9;
}
/*
WP: is a condition that, if satisfied before the execution of a program, guarantees the
satisfaction of a specified postcondition
SP: is a condition that must hold after the execution of a program, assuming a specified
precondition
The strongest postcondition for assert is x<=9
Analyze the code:
The strongest postcondition for the assert statement assert x <= 9; is x <= 9. This
postcondition asserts that the value of x should be less than or equal to 9 after the
execution of the program. To ensure this postcondition, we need to find a weakest precondition
(WP) that guarantees x <= 9 after executing the code.
The "assume" statement introduces a precondition.
It assumes that the square of x is less than 100. In other words, it assumes that x is
within the range (0, 10) since the largest possible square less than 100 is 9 * 9 = 81.
*/
| //Problem01
method square0(n:nat) returns (sqn : nat)
ensures sqn == n*n
{
sqn := 0;
var i:= 0;
var x;
while i < n
{
x := 2*i+1;
sqn := sqn+x;
i := i+1;
}
}
/*
3 Verification conditions
1. VC1: Precondiotion implies the loop variant
n ∈ ℕ => sqn = 0*0 ∧ i = 0 ∧ x=? ∧ i≤n
n >= 0 => 0 = 0*0 ∧ i = 0 ∧ i≤n
n >= 0 => 0 = 0*0 ∧ 0 ≤ n
2. VC2: Loop invariant and loop guard preserve the loop invariant.
VC2: i < n ∧ i+1 ≤ n ∧ sqn = i * i ⇒ sqn = sqn + x ∧ i = i + 1 ∧ x = 2 * i + 1
3.VC3: Loop terminates, and the loop invariant implies the postcondition.
VC3: ¬(i < n) ∧ i ≤ n ∧ sqn = i * i ⇒ sqn = n * n
Simplified VC for square0
1. true, since 0 = 0 and n >= 0 => 0 ≤ n
2. true, i < n => i + 1 <= n
3. true, ¬(i < n) ∧ i ≤ n ∧ sqn = i * i ⇒ sqn = n * n since ¬(i < n) ∧ i ≤ n imply i = n
*/
method square1(n:nat) returns (sqn : nat)
ensures sqn == n*n
{
sqn := 0;
var i:= 0;
while i < n
{
var x := 2*i+1;
sqn := sqn+x;
i := i+1;
}
}
//Problem02
//As you can see below, Dafny claims that after executing the following method
//strange() we will have that 1=2;
method q(x:nat, y:nat) returns (z:nat)
requires y - x > 2
ensures x < z*z < y
method strange()
ensures 1==2
{
var x := 4;
var c:nat := q(x,2*x);
}
/*(a). Do you have an explanation for this behaviour?
Answer:
the method strange() doesn't have any input or output. This method initializes
variable x with value 4. Then it calculates variable c as a result of calling
method 'q' with x as first var and 2*x as second var.the strange method does not
specify any postcondition. Therefore, we cannot make any assumptions about the
behavior or the value of c after calling q.
We can change ensures in strange() to false and it's still verified
*/
/*(b) {true}var x:nat := 4; var c := q(x,2*x); {1 = 2 }
precond in strange(): difference between 'y' and 'x' muss be greater than 2,
square from 'z' will be a value between 'x' and 'y'
apply the Hoare rules step by step:
1. {true} as a precondition
2. we assign 4 to 'x' and having {4=4}
3. assign value q(x, 2 * x) to c, substitute the postcondition of 'q' in place of 'c'
post cond of q will be x < z*z < 2*x. Replacing c we having {x < z * z < 2 * x}
4. we having the statement {x < z*z < 2*x} => {1 = 2} as postcondtion
as we know the statment {1 = 2} is always false. true => false is always false
*/
//Problem 3
//Use what you know about the weakest preconditions/strongest postconditions to ex-
//plain why the following code verifies:
method test0(){
var x:int := *;
assume x*x < 100;
}
/*
WP: is a condition that, if satisfied before the execution of a program, guarantees the
satisfaction of a specified postcondition
SP: is a condition that must hold after the execution of a program, assuming a specified
precondition
The strongest postcondition for assert is x<=9
Analyze the code:
The strongest postcondition for the assert statement assert x <= 9; is x <= 9. This
postcondition asserts that the value of x should be less than or equal to 9 after the
execution of the program. To ensure this postcondition, we need to find a weakest precondition
(WP) that guarantees x <= 9 after executing the code.
The "assume" statement introduces a precondition.
It assumes that the square of x is less than 100. In other words, it assumes that x is
within the range (0, 10) since the largest possible square less than 100 is 9 * 9 = 81.
*/
|
344 | Programmverifikation-und-synthese_tmp_tmppurk6ime_example_DafnyIntro_01_Simple_Loops.dfy | // ****************************************************************************************
// DafnyIntro.dfy
// ****************************************************************************************
// We write a program to sum all numbers from 1 to n
//
// Gauss' formula states that 1 + 2 + 3 + ... + (n-1) + n == n*(n+1)/2
//
// We take this a specification, thus in effect we use Dafny to prove Gauss' formula:
// In essence Dafny does an inductive proof. It needs help with a loop "invariant".
// This is a condition which is
// - true at the beginning of the loop
// - maintained with each passage through the loop body.
// These requirements correspond to an inductive proof
// - the invariant is the inductive hypothesis H(i)
// - it must be true for i=0
// - it must remain true when stepping from i to i+1,
// Here we use two invariants I1 and I2, which amounts to the same as using I1 && I2:
method Gauss(n:int) returns (sum:int)
requires n >= 0
ensures sum == n*(n+1)/2 //
{
sum := 0;
var i := 0;
while i < n
invariant sum == i*(i+1)/2
invariant i <= n
{
i := i+1;
sum := sum + i;
}
}
// As a second example, we add the first n odd numbers
// This yields n*n, i.e.
//
// 1 + 3 + 5 + 7 + 9 + 11 + ... 2n+1 == n*n
//
// Here is the proof using Dafny:
method sumOdds(n:nat) returns (sum:nat)
ensures sum == n*n;
{
sum := 0;
var i := 0;
while i < n
invariant sum == i*i // the inductive hypothesis
invariant i <= n
{
sum := sum + 2*i+1;
i := i+1; // the step from i to i+1
}
}
// This verifies, so the proof is complete !!
| // ****************************************************************************************
// DafnyIntro.dfy
// ****************************************************************************************
// We write a program to sum all numbers from 1 to n
//
// Gauss' formula states that 1 + 2 + 3 + ... + (n-1) + n == n*(n+1)/2
//
// We take this a specification, thus in effect we use Dafny to prove Gauss' formula:
// In essence Dafny does an inductive proof. It needs help with a loop "invariant".
// This is a condition which is
// - true at the beginning of the loop
// - maintained with each passage through the loop body.
// These requirements correspond to an inductive proof
// - the invariant is the inductive hypothesis H(i)
// - it must be true for i=0
// - it must remain true when stepping from i to i+1,
// Here we use two invariants I1 and I2, which amounts to the same as using I1 && I2:
method Gauss(n:int) returns (sum:int)
requires n >= 0
ensures sum == n*(n+1)/2 //
{
sum := 0;
var i := 0;
while i < n
{
i := i+1;
sum := sum + i;
}
}
// As a second example, we add the first n odd numbers
// This yields n*n, i.e.
//
// 1 + 3 + 5 + 7 + 9 + 11 + ... 2n+1 == n*n
//
// Here is the proof using Dafny:
method sumOdds(n:nat) returns (sum:nat)
ensures sum == n*n;
{
sum := 0;
var i := 0;
while i < n
{
sum := sum + 2*i+1;
i := i+1; // the step from i to i+1
}
}
// This verifies, so the proof is complete !!
|
345 | ProjectosCVS_tmp_tmp_02_gmcw_Handout 1_CVS_handout1_55754_55780.dfy | /**
CVS 2021-22 Handout 1
Authors
Gonçalo Martins Lourenço nº55780
Joana Soares Faria nº55754
*/
// First Exercise
lemma peasantMultLemma(a:int, b:int)
requires b >= 0
ensures b % 2 == 0 ==> (a * b == 2 * a * b / 2)
ensures b % 2 == 1 ==> (a * b == a + 2 * a * (b - 1) / 2)
{
if (b % 2 == 0 && b > 0) {
peasantMultLemma(a, b - 2);
}
if (b % 2 == 1 && b > 1) {
peasantMultLemma(a, b - 2);
}
}
method peasantMult(a: int, b: int) returns (r: int)
requires b > 0
ensures r == a * b
{
r := 0;
var aa := a;
var bb := b;
while(bb > 0)
decreases bb
invariant 0 <= bb <= b
invariant r + aa * bb == a * b
{
// Use of lemma was not necessary for a successful verification
// peasantMultLemma(aa, bb);
if (bb % 2 == 0)
{
aa := 2 * aa;
bb := bb / 2;
} else if (bb % 2 == 1)
{
r := r + aa;
aa := 2 * aa;
bb := (bb-1) / 2;
}
}
}
//Second Exercise
method euclidianDiv(a: int,b : int) returns (q: int,r: int)
requires a >= 0
requires b > 0
ensures a == b * q + r
{
r := a;
q := 0;
while(r - b >= 0)
decreases r - b
invariant 0 <= r <= a
// invariant a == b * q + r
invariant r == a - b * q
{
r := r - b;
q := q + 1;
}
}
| /**
CVS 2021-22 Handout 1
Authors
Gonçalo Martins Lourenço nº55780
Joana Soares Faria nº55754
*/
// First Exercise
lemma peasantMultLemma(a:int, b:int)
requires b >= 0
ensures b % 2 == 0 ==> (a * b == 2 * a * b / 2)
ensures b % 2 == 1 ==> (a * b == a + 2 * a * (b - 1) / 2)
{
if (b % 2 == 0 && b > 0) {
peasantMultLemma(a, b - 2);
}
if (b % 2 == 1 && b > 1) {
peasantMultLemma(a, b - 2);
}
}
method peasantMult(a: int, b: int) returns (r: int)
requires b > 0
ensures r == a * b
{
r := 0;
var aa := a;
var bb := b;
while(bb > 0)
{
// Use of lemma was not necessary for a successful verification
// peasantMultLemma(aa, bb);
if (bb % 2 == 0)
{
aa := 2 * aa;
bb := bb / 2;
} else if (bb % 2 == 1)
{
r := r + aa;
aa := 2 * aa;
bb := (bb-1) / 2;
}
}
}
//Second Exercise
method euclidianDiv(a: int,b : int) returns (q: int,r: int)
requires a >= 0
requires b > 0
ensures a == b * q + r
{
r := a;
q := 0;
while(r - b >= 0)
// invariant a == b * q + r
{
r := r - b;
q := q + 1;
}
}
|
346 | QS_BoilerPlate1_tmp_tmpa29vtz9__Ex2.dfy | function sorted(s : seq<int>) : bool {
forall k1, k2 :: 0 <= k1 <= k2 < |s| ==> s[k1] <= s[k2]
}
// Ex1
method copyArr(a : array<int>, l : int, r : int) returns (ret : array<int>)
requires 0 <= l < r <= a.Length
ensures ret[..] == a[l..r]
{
var size := r - l;
ret := new int[size];
var i := 0;
while(i < size)
invariant a[..] == old(a[..])
invariant 0 <= i <= size
invariant ret[..i] == a[l..(l + i)]
decreases size - i
{
ret[i] := a[i + l];
i := i + 1;
}
return;
}
// Ex2
method mergeArr(a : array<int>, l : int, m : int, r : int)
requires 0 <= l < m < r <= a.Length
requires sorted(a[l..m]) && sorted(a[m..r])
ensures sorted(a[l..r])
ensures a[..l] == old(a[..l])
ensures a[r..] == old(a[r..])
modifies a
{
var left := copyArr(a, l, m);
var right := copyArr(a, m, r);
var i := 0;
var j := 0;
var cur := l;
ghost var old_arr := a[..];
while(cur < r)
decreases a.Length - cur
invariant 0 <= i <= left.Length
invariant 0 <= j <= right.Length
invariant l <= cur <= r
invariant cur == i + j + l
invariant a[..l] == old_arr[..l]
invariant a[r..] == old_arr[r..]
invariant sorted(a[l..cur])
invariant sorted(left[..])
invariant sorted(right[..])
invariant i < left.Length && cur > l ==> a[cur - 1] <= left[i]
invariant j < right.Length && cur > l ==> a[cur - 1] <= right[j]
{
if((i == left.Length && j < right.Length) || (j != right.Length && left[i] > right[j])) {
a[cur] := right[j];
j := j + 1;
}
else if((j == right.Length && i < left.Length) || (i != left.Length && left[i] <= right[j])) {
a[cur] := left[i];
i := i + 1;
}
cur := cur + 1;
}
return;
}
// Ex3
method sort(a : array<int>)
ensures sorted(a[..])
modifies a
{
if(a.Length == 0) { return; }
else { sortAux(a, 0, a.Length); }
}
method sortAux(a : array<int>, l : int, r : int)
ensures sorted(a[l..r])
ensures a[..l] == old(a[..l])
ensures a[r..] == old(a[r..])
requires 0 <= l < r <= a.Length
modifies a
decreases r - l
{
if(l >= (r - 1)) {return;}
else {
var m := l + (r - l) / 2;
sortAux(a, l, m);
sortAux(a, m, r);
mergeArr(a, l, m, r);
return;
}
}
| function sorted(s : seq<int>) : bool {
forall k1, k2 :: 0 <= k1 <= k2 < |s| ==> s[k1] <= s[k2]
}
// Ex1
method copyArr(a : array<int>, l : int, r : int) returns (ret : array<int>)
requires 0 <= l < r <= a.Length
ensures ret[..] == a[l..r]
{
var size := r - l;
ret := new int[size];
var i := 0;
while(i < size)
{
ret[i] := a[i + l];
i := i + 1;
}
return;
}
// Ex2
method mergeArr(a : array<int>, l : int, m : int, r : int)
requires 0 <= l < m < r <= a.Length
requires sorted(a[l..m]) && sorted(a[m..r])
ensures sorted(a[l..r])
ensures a[..l] == old(a[..l])
ensures a[r..] == old(a[r..])
modifies a
{
var left := copyArr(a, l, m);
var right := copyArr(a, m, r);
var i := 0;
var j := 0;
var cur := l;
ghost var old_arr := a[..];
while(cur < r)
{
if((i == left.Length && j < right.Length) || (j != right.Length && left[i] > right[j])) {
a[cur] := right[j];
j := j + 1;
}
else if((j == right.Length && i < left.Length) || (i != left.Length && left[i] <= right[j])) {
a[cur] := left[i];
i := i + 1;
}
cur := cur + 1;
}
return;
}
// Ex3
method sort(a : array<int>)
ensures sorted(a[..])
modifies a
{
if(a.Length == 0) { return; }
else { sortAux(a, 0, a.Length); }
}
method sortAux(a : array<int>, l : int, r : int)
ensures sorted(a[l..r])
ensures a[..l] == old(a[..l])
ensures a[r..] == old(a[r..])
requires 0 <= l < r <= a.Length
modifies a
{
if(l >= (r - 1)) {return;}
else {
var m := l + (r - l) / 2;
sortAux(a, l, m);
sortAux(a, m, r);
mergeArr(a, l, m, r);
return;
}
}
|
347 | SENG2011_tmp_tmpgk5jq85q_ass1_ex7.dfy | // successfully verifies
method BigFoot(step: nat) // DO NOT CHANGE
requires 0 < step <= 42;
{
var indx := 0; // DO NOT CHANGE
while indx<=42 // DO NOT CHANGE
invariant 0 <= indx <= step + 42 && indx % step == 0
decreases 42 - indx
{ indx := indx+step; } // DO NOT CHANGE
assert 0 <= indx <= step + 42 && indx % step == 0 && indx > 42;
}
| // successfully verifies
method BigFoot(step: nat) // DO NOT CHANGE
requires 0 < step <= 42;
{
var indx := 0; // DO NOT CHANGE
while indx<=42 // DO NOT CHANGE
{ indx := indx+step; } // DO NOT CHANGE
}
|
348 | SENG2011_tmp_tmpgk5jq85q_ass1_ex8.dfy | // successfully verifies
method GetEven(a: array<nat>)
requires true;
ensures forall i:int :: 0<=i<a.Length ==> a[i] % 2 == 0
modifies a
{
var i := 0;
while i < a.Length
invariant 0 <= i <= a.Length && forall j:int :: 0<=j<i ==> a[j] % 2 == 0
decreases a.Length - i
{
if a[i] % 2 != 0
{
a[i] := a[i] + 1;
}
i := i + 1;
}
}
| // successfully verifies
method GetEven(a: array<nat>)
requires true;
ensures forall i:int :: 0<=i<a.Length ==> a[i] % 2 == 0
modifies a
{
var i := 0;
while i < a.Length
{
if a[i] % 2 != 0
{
a[i] := a[i] + 1;
}
i := i + 1;
}
}
|
349 | SENG2011_tmp_tmpgk5jq85q_ass2_ex1.dfy | // method verifies
method StringSwap(s: string, i:nat, j:nat) returns (t: string)
requires i >= 0 && j >= 0 && |s| >= 0;
requires |s| > 0 ==> i < |s| && j < |s|;
ensures multiset(s[..]) == multiset(t[..]);
ensures |s| == |t|;
ensures |s| > 0 ==> forall k:nat :: k != i && k != j && k < |s| ==> t[k] == s[k]
ensures |s| > 0 ==> t[i] == s[j] && t[j] == s[i];
ensures |s| == 0 ==> t == s;
{
t := s;
if |s| == 0 {
return t;
}
t := t[i := s[j]];
t := t[j := s[i]];
}
method check() {
var a:string := "1scow2";
var b:string := StringSwap(a, 1, 5);
assert b == "12cows";
var c:string := "";
var d:string := StringSwap(c, 1, 2);
assert c == d;
}
// string == seq<Char>
//give se2011 ass2 ex1.dfy
| // method verifies
method StringSwap(s: string, i:nat, j:nat) returns (t: string)
requires i >= 0 && j >= 0 && |s| >= 0;
requires |s| > 0 ==> i < |s| && j < |s|;
ensures multiset(s[..]) == multiset(t[..]);
ensures |s| == |t|;
ensures |s| > 0 ==> forall k:nat :: k != i && k != j && k < |s| ==> t[k] == s[k]
ensures |s| > 0 ==> t[i] == s[j] && t[j] == s[i];
ensures |s| == 0 ==> t == s;
{
t := s;
if |s| == 0 {
return t;
}
t := t[i := s[j]];
t := t[j := s[i]];
}
method check() {
var a:string := "1scow2";
var b:string := StringSwap(a, 1, 5);
var c:string := "";
var d:string := StringSwap(c, 1, 2);
}
// string == seq<Char>
//give se2011 ass2 ex1.dfy
|
350 | SENG2011_tmp_tmpgk5jq85q_ass2_ex2.dfy | // verifies
// check that string between indexes low and high-1 are sorted
predicate Sorted(a: string, low:int, high:int)
requires 0 <= low <= high <= |a|
{
forall j, k :: low <= j < k < high ==> a[j] <= a[k]
}
method String3Sort(a: string) returns (b: string)
requires |a| == 3;
ensures Sorted(b, 0, |b|);
ensures |a| == |b|;
ensures multiset{b[0], b[1], b[2]} == multiset{a[0], a[1], a[2]};
{
b := a;
if (b[0] > b[1]) {
b := b[0 := b[1]][1 := b[0]];
}
if (b[1] > b[2]) {
b := b[1 := b[2]][2 := b[1]];
}
if (b[0] > b[1]) {
b := b[0 := b[1]][1 := b[0]];
}
}
method check() {
var a:string := "cba";
var b:string := String3Sort(a);
assert b=="abc";
var a1:string := "aaa";
var b1:string := String3Sort(a1);
assert b1=="aaa";
var a2:string := "abc";
var b2:string := String3Sort(a2);
assert b2=="abc";
var a3:string := "cab";
var b3:string := String3Sort(a3);
assert b3=="abc";
var a4:string := "bac";
var b4:string := String3Sort(a4);
assert b4=="abc";
var a5:string := "bba";
var b5:string := String3Sort(a5);
assert b5=="abb";
var a6:string := "aba";
var b6:string := String3Sort(a6);
assert b6=="aab";
var a7:string := "acb";
var b7:string := String3Sort(a7);
assert b7=="abc";
var a8:string := "bca";
var b8:string := String3Sort(a8);
assert b8=="abc";
var a9:string := "bab";
var b9:string := String3Sort(a9);
assert b9=="abb";
var a10:string := "abb";
var b10:string := String3Sort(a10);
assert b10=="abb";
}
| // verifies
// check that string between indexes low and high-1 are sorted
predicate Sorted(a: string, low:int, high:int)
requires 0 <= low <= high <= |a|
{
forall j, k :: low <= j < k < high ==> a[j] <= a[k]
}
method String3Sort(a: string) returns (b: string)
requires |a| == 3;
ensures Sorted(b, 0, |b|);
ensures |a| == |b|;
ensures multiset{b[0], b[1], b[2]} == multiset{a[0], a[1], a[2]};
{
b := a;
if (b[0] > b[1]) {
b := b[0 := b[1]][1 := b[0]];
}
if (b[1] > b[2]) {
b := b[1 := b[2]][2 := b[1]];
}
if (b[0] > b[1]) {
b := b[0 := b[1]][1 := b[0]];
}
}
method check() {
var a:string := "cba";
var b:string := String3Sort(a);
var a1:string := "aaa";
var b1:string := String3Sort(a1);
var a2:string := "abc";
var b2:string := String3Sort(a2);
var a3:string := "cab";
var b3:string := String3Sort(a3);
var a4:string := "bac";
var b4:string := String3Sort(a4);
var a5:string := "bba";
var b5:string := String3Sort(a5);
var a6:string := "aba";
var b6:string := String3Sort(a6);
var a7:string := "acb";
var b7:string := String3Sort(a7);
var a8:string := "bca";
var b8:string := String3Sort(a8);
var a9:string := "bab";
var b9:string := String3Sort(a9);
var a10:string := "abb";
var b10:string := String3Sort(a10);
}
|
351 | SENG2011_tmp_tmpgk5jq85q_ass2_ex3.dfy | // verifies
// all bs are before all as which are before all ds
predicate sortedbad(s:string)
{
// all b's are before all a's and d's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'b' && (s[j] == 'a' || s[j] == 'd') ==> i < j &&
// all a's are after all b's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'a' && s[j] == 'b' ==> i > j &&
// all a's are before all d's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'a' && s[j] == 'd' ==> i < j &&
// all d's are after a;; b's and a's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'd' && (s[j] == 'a' || s[j] == 'b') ==> i > j
}
method BadSort(a: string) returns (b: string)
requires forall k :: 0 <= k < |a| ==> a[k] == 'b' || a[k] == 'a' || a[k] == 'd';
ensures sortedbad(b);
ensures multiset(a[..]) == multiset(b[..]);
ensures |a| == |b|;
{
b := a;
var next := 0;
var white := 0;
var blue := |b|; // colours between next and blue unsorted
while (next != blue) // if next==blue, no colours left to sort
invariant forall k :: 0 <= k < |b| ==> b[k] == 'b' || b[k] == 'a' || b[k] == 'd';
invariant 0 <= white <= next <= blue <= |b|;
// ensure next, white, blue are correct
invariant forall i :: 0 <= i < white ==> b[i] == 'b';
invariant forall i :: white <= i < next ==> b[i] == 'a';
invariant forall i :: blue <= i < |b| ==> b[i] == 'd';
// all b's are before all a's and d's
invariant forall i,j :: 0 <= i < next && 0 <= j < next && b[i] == 'b' && (b[j] == 'a' || b[j] == 'd') ==> i < j
// all a's are after all b's
invariant forall i,j :: 0 <= i < next && 0 <= j < next && b[i] == 'a' && b[j] == 'b' ==> i > j
// all a's are before all d's
invariant forall i,j :: 0 <= i < next && 0 <= j < next && b[i] == 'a' && b[j] == 'd' ==> i < j
// all d's are after a;; b's and a's
invariant forall i,j :: 0 <= i < next && 0 <= j < next && b[i] == 'd' && (b[j] == 'a' || b[j] == 'b') ==> i > j
invariant multiset(b[..]) == multiset(a[..]);
invariant |a| == |b|;
{
if b[next] == 'b' {
var tmp := b[next];
b := b[next := b[white]];
b := b[white := tmp];
next := next + 1;
white := white + 1;
} else if b[next] == 'a' {
next := next + 1;
} else if b[next] == 'd'{
blue := blue - 1;
var tmp := b[next];
b := b[next := b[blue]];
b := b[blue := tmp];
}
}
}
method check() {
var f:string := "dabdabdab";
var g:string := BadSort(f);
assert multiset(f)==multiset(g);
assert sortedbad(g);
/*
f := "dba"; // testcase1
g := BadSort(f);
assert g=="bad";
f := "aaaaaaaa"; // testcase 2
g := BadSort(f);
assert g=="aaaaaaaa";
*/
/*
var a:string := "dabdabdab";
var b:string := BadSort(a);
assert multiset(a) == multiset(b);
assert b == "bbbaaaddd";
// apparently not possible ot verify this
*/
}
| // verifies
// all bs are before all as which are before all ds
predicate sortedbad(s:string)
{
// all b's are before all a's and d's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'b' && (s[j] == 'a' || s[j] == 'd') ==> i < j &&
// all a's are after all b's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'a' && s[j] == 'b' ==> i > j &&
// all a's are before all d's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'a' && s[j] == 'd' ==> i < j &&
// all d's are after a;; b's and a's
forall i,j :: 0 <= i < |s| && 0 <= j < |s| && s[i] == 'd' && (s[j] == 'a' || s[j] == 'b') ==> i > j
}
method BadSort(a: string) returns (b: string)
requires forall k :: 0 <= k < |a| ==> a[k] == 'b' || a[k] == 'a' || a[k] == 'd';
ensures sortedbad(b);
ensures multiset(a[..]) == multiset(b[..]);
ensures |a| == |b|;
{
b := a;
var next := 0;
var white := 0;
var blue := |b|; // colours between next and blue unsorted
while (next != blue) // if next==blue, no colours left to sort
// ensure next, white, blue are correct
// all b's are before all a's and d's
// all a's are after all b's
// all a's are before all d's
// all d's are after a;; b's and a's
{
if b[next] == 'b' {
var tmp := b[next];
b := b[next := b[white]];
b := b[white := tmp];
next := next + 1;
white := white + 1;
} else if b[next] == 'a' {
next := next + 1;
} else if b[next] == 'd'{
blue := blue - 1;
var tmp := b[next];
b := b[next := b[blue]];
b := b[blue := tmp];
}
}
}
method check() {
var f:string := "dabdabdab";
var g:string := BadSort(f);
/*
f := "dba"; // testcase1
g := BadSort(f);
f := "aaaaaaaa"; // testcase 2
g := BadSort(f);
*/
/*
var a:string := "dabdabdab";
var b:string := BadSort(a);
// apparently not possible ot verify this
*/
}
|
352 | SENG2011_tmp_tmpgk5jq85q_ass2_ex5.dfy | // verifies
function expo(x:int, n:nat): int
requires n >= 0;
{
if (n == 0) then 1
else x * expo(x, n - 1)
}
lemma {:induction false} Expon23(n: nat)
requires n >= 0;
ensures ((expo(2, 3 * n) - expo(3, n))) % 5 == 0;
{
if (n == 0) {
assert (expo(2, 3 * 0) - expo(3, 0)) % 5 == 0;
} else if (n == 1) {
assert (expo(2, 3 * 1) - expo(3, 1)) % 5 == 0;
} else {
var i:nat := n;
var j:nat := n;
// assume true for n
// prove for n - 1
Expon23(n - 1);
assert (expo(2, 3 * (i - 1)) - expo(3, i - 1)) % 5 == 0;
assert (expo(2, (3 * i) - 3) - expo(3, (n - 1))) % 5 == 0;
//assert expo(2, 2 + 3) == expo(2, 2) * expo(2, 3);
assert expo(2, i - 0) == expo(2, i);
assert expo(2, i - 1) == expo(2, i) / expo(2, 1);
//assert expo(2, i - 2) == expo(2, i) / expo(2, 2);
//assert expo(2, i - 3) == expo(2, i) / expo(2, 3); // training
assert expo(2, (1 * i) - 0) == expo(2, (1 * i));
assert expo(2, (2 * i) - 1) == expo(2, (2 * i)) / expo(2, 1);
assert expo(2, (3 * 1) - 3) == expo(2, (3 * 1)) / expo(2, 3);
assert expo(2, (3 * i) - 0) == expo(2, (3 * i));
assert expo(2, (3 * i) - 1) == expo(2, (3 * i)) / expo(2, 1);
assert expo(2, (3 * i) - 2) == expo(2, (3 * i)) / expo(2, 2);
assert expo(2, (3 * i) - 3) == expo(2, (3 * i)) / expo(2, 3);
assert expo(3, (i - 1)) == expo(3, i) / expo (3, 1);
assert expo(2, (3 * i) - 3) - expo(3, (i - 1)) == expo(2, (3 * i)) / expo(2,3) - expo(3, i) / expo (3, 1);
assert expo(2, 3) % 5 == expo(3, 1);
assert (expo(2, (3 * i)) * 6) % 5 == expo(2, (3 * i)) % 5;
assert (expo(2, (3 * i)) * expo(2, 3)) % 5 == (expo(2, (3 * i)) * expo(3, 1)) % 5;
assert (expo(2, (3 * i)) * expo(2,3) - expo(3, i) * expo (3, 1)) % 5 == (expo(2, (3 * i)) * expo(3, 1) - expo(3, i) * expo (3, 1)) % 5;
assert (expo(2, (3 * i)) * expo(3, 1) - expo(3, i) * expo (3, 1)) % 5 == (expo(3, 1) * (expo(2, (3 * i)) - expo(3, i))) % 5;
assert (expo(2, (3 * i)) - expo(3, i)) % 5 == 0;
}
}
method check() {
assert expo(2, 3) == 8;
assert expo(-2, 3) == -8;
assert expo(3, 0) == 1;
assert expo(0, 0) == 1;
assert expo(10, 2) == 100;
}
| // verifies
function expo(x:int, n:nat): int
requires n >= 0;
{
if (n == 0) then 1
else x * expo(x, n - 1)
}
lemma {:induction false} Expon23(n: nat)
requires n >= 0;
ensures ((expo(2, 3 * n) - expo(3, n))) % 5 == 0;
{
if (n == 0) {
} else if (n == 1) {
} else {
var i:nat := n;
var j:nat := n;
// assume true for n
// prove for n - 1
Expon23(n - 1);
//assert expo(2, 2 + 3) == expo(2, 2) * expo(2, 3);
//assert expo(2, i - 2) == expo(2, i) / expo(2, 2);
//assert expo(2, i - 3) == expo(2, i) / expo(2, 3); // training
}
}
method check() {
}
|
353 | SENG2011_tmp_tmpgk5jq85q_exam_ex2.dfy | method Getmini(a:array<int>) returns(mini:nat)
requires a.Length > 0
ensures 0 <= mini < a.Length // mini is an index of a
ensures forall x :: 0 <= x < a.Length ==> a[mini] <= a[x] // a[mini] is the minimum value
ensures forall x :: 0 <= x < mini ==> a[mini] < a[x] // a[mini] is the first min
{
// find mini
var min:int := a[0];
var i:int := 0;
while i < a.Length
invariant 0 <= i <= a.Length
invariant forall x :: 0 <= x < i ==> min <= a[x] // min is the smallest so far
invariant min in a[..] // min is always in a
{
if a[i] < min {
min := a[i];
}
i := i + 1;
}
//assert min in a[..]; // min is in a -> it will be found by this loop
// find first occurance
var k:int := 0;
while k < a.Length
invariant 0 <= k <= a.Length
invariant forall x :: 0 <= x < k ==> min < a[x]
{
if a[k] == min {
return k;
}
k := k + 1;
}
}
/*
method check() {
var data := new int[][9,5,42,5,5]; // minimum 5 first at index 1
var mini := Getmini(data);
//print mini;
assert mini==1;
}
*/
| method Getmini(a:array<int>) returns(mini:nat)
requires a.Length > 0
ensures 0 <= mini < a.Length // mini is an index of a
ensures forall x :: 0 <= x < a.Length ==> a[mini] <= a[x] // a[mini] is the minimum value
ensures forall x :: 0 <= x < mini ==> a[mini] < a[x] // a[mini] is the first min
{
// find mini
var min:int := a[0];
var i:int := 0;
while i < a.Length
{
if a[i] < min {
min := a[i];
}
i := i + 1;
}
//assert min in a[..]; // min is in a -> it will be found by this loop
// find first occurance
var k:int := 0;
while k < a.Length
{
if a[k] == min {
return k;
}
k := k + 1;
}
}
/*
method check() {
var data := new int[][9,5,42,5,5]; // minimum 5 first at index 1
var mini := Getmini(data);
//print mini;
}
*/
|
354 | SENG2011_tmp_tmpgk5jq85q_exam_ex3.dfy | method Symmetric(a: array<int>) returns (flag: bool)
ensures flag == true ==> forall x :: 0 <= x < a.Length ==> a[x] == a[a.Length - x - 1]
ensures flag == false ==> exists x :: 0 <= x < a.Length && a[x] != a[a.Length - x - 1]
{
// empty == symmetrical
if a.Length == 0 {
return true;
}
var i:int := 0;
while i < a.Length
invariant 0 <= i <= a.Length // probably only need to check to halfway but this works as well
invariant forall x :: 0 <= x < i ==> a[x] == a[a.Length - x - 1]
{
if a[i] != a[a.Length - i - 1] {
return false;
}
i := i + 1;
}
return true;
}
/*
method Main() {
var data1 := new int[][1,2,3,2,1];
var f1 := Symmetric(data1);
assert f1;
var data2 := new int[][1,2];
var f2 := Symmetric(data2);
assert !f2;
//print f2;
}
*/
| method Symmetric(a: array<int>) returns (flag: bool)
ensures flag == true ==> forall x :: 0 <= x < a.Length ==> a[x] == a[a.Length - x - 1]
ensures flag == false ==> exists x :: 0 <= x < a.Length && a[x] != a[a.Length - x - 1]
{
// empty == symmetrical
if a.Length == 0 {
return true;
}
var i:int := 0;
while i < a.Length
{
if a[i] != a[a.Length - i - 1] {
return false;
}
i := i + 1;
}
return true;
}
/*
method Main() {
var data1 := new int[][1,2,3,2,1];
var f1 := Symmetric(data1);
var data2 := new int[][1,2];
var f2 := Symmetric(data2);
//print f2;
}
*/
|
355 | SENG2011_tmp_tmpgk5jq85q_exam_ex4.dfy | lemma {:induction false} Divby2(n: nat)
ensures (n*(n-1))%2 == 0
{
if n == 0 {
assert (1*(1-1))%2 == 0; // base case
} else {
Divby2(n - 1); // proved in case n - 1
assert (n-1)*(n-2) == n*n -3*n + 2; // expanded case n - 1
}
}
| lemma {:induction false} Divby2(n: nat)
ensures (n*(n-1))%2 == 0
{
if n == 0 {
} else {
Divby2(n - 1); // proved in case n - 1
}
}
|
356 | SENG2011_tmp_tmpgk5jq85q_flex_ex1.dfy | // sums from index 0 -> i - 1
function sumcheck(s: array<int>, i: int): int
requires 0 <= i <= s.Length
reads s
{
if i == 0 then 0
else s[i - 1] + sumcheck(s, i - 1)
}
// returns sum of array
method sum(s: array<int>) returns (a:int)
requires s.Length > 0
ensures sumcheck(s, s.Length) == a
{
a := 0;
var i:int := 0;
while i < s.Length
invariant 0 <= i <= s.Length && a == sumcheck(s, i)
{
a := a + s[i];
i := i + 1;
}
}
method Main() {
var a: array<int> := new int[4];
a[0] := 1;
a[1] := 3;
a[2] := 3;
a[3] := 2;
assert a[..] == [1,3,3,2];
var s:= sum(a);
assert a[0] == 1 && a[1] == 3 && a[2] == 3 && a[3] == 2;
assert s == sumcheck(a, a.Length);
print "\nThe sum of all elements in [1,3,3,2] is ";
print s;
}
| // sums from index 0 -> i - 1
function sumcheck(s: array<int>, i: int): int
requires 0 <= i <= s.Length
reads s
{
if i == 0 then 0
else s[i - 1] + sumcheck(s, i - 1)
}
// returns sum of array
method sum(s: array<int>) returns (a:int)
requires s.Length > 0
ensures sumcheck(s, s.Length) == a
{
a := 0;
var i:int := 0;
while i < s.Length
{
a := a + s[i];
i := i + 1;
}
}
method Main() {
var a: array<int> := new int[4];
a[0] := 1;
a[1] := 3;
a[2] := 3;
a[3] := 2;
var s:= sum(a);
print "\nThe sum of all elements in [1,3,3,2] is ";
print s;
}
|
357 | SENG2011_tmp_tmpgk5jq85q_flex_ex2.dfy | function maxcheck(s: array<nat>, i: int, max: int): int
requires 0 <= i <= s.Length
reads s
{
if i == 0 then max
else if s[i - 1] > max then maxcheck(s, i - 1, s[i - 1])
else maxcheck(s, i - 1, max)
}
method max(s: array<nat>) returns (a:int)
requires s.Length > 0
ensures forall x :: 0 <= x < s.Length ==> a >= s[x]
ensures a in s[..]
{
a := s[0];
var i:int := 0;
while i < s.Length
invariant 0 <= i <= s.Length
invariant forall x :: 0 <= x < i ==> a >= s[x]
invariant a in s[..]
{
if (s[i] > a) {
a := s[i];
}
i := i + 1;
}
}
method Checker() {
var a := new nat[][1,2,3,50,5,51];
// ghost var a := [1,2,3];
var n := max(a);
// assert a[..] == [1,2,3];
assert n == 51;
// assert MAXIMUM(1,2) == 2;
// assert ret_max(a,a.Length-1) == 12;
// assert ret_max(a,a.Length-1) == x+3;
}
| function maxcheck(s: array<nat>, i: int, max: int): int
requires 0 <= i <= s.Length
reads s
{
if i == 0 then max
else if s[i - 1] > max then maxcheck(s, i - 1, s[i - 1])
else maxcheck(s, i - 1, max)
}
method max(s: array<nat>) returns (a:int)
requires s.Length > 0
ensures forall x :: 0 <= x < s.Length ==> a >= s[x]
ensures a in s[..]
{
a := s[0];
var i:int := 0;
while i < s.Length
{
if (s[i] > a) {
a := s[i];
}
i := i + 1;
}
}
method Checker() {
var a := new nat[][1,2,3,50,5,51];
// ghost var a := [1,2,3];
var n := max(a);
// assert a[..] == [1,2,3];
// assert MAXIMUM(1,2) == 2;
// assert ret_max(a,a.Length-1) == 12;
// assert ret_max(a,a.Length-1) == x+3;
}
|
358 | SENG2011_tmp_tmpgk5jq85q_flex_ex5.dfy | method firste(a: array<char>) returns (c:int)
ensures -1 <= c < a.Length
ensures 0 <= c < a.Length ==> a[c] == 'e' && forall x :: 0 <= x < c ==> a[x] != 'e'
ensures c == -1 ==> forall x :: 0 <= x < a.Length ==> a[x] != 'e'
{
var i:int := 0;
while i < a.Length
invariant 0 <= i <= a.Length
invariant forall x :: 0 <= x < i ==> a[x] != 'e'
{
if a[i] == 'e' {
return i;
}
i := i + 1;
}
return -1;
}
method Main(){
var a := new char[6]['c','h','e','e','s','e'];
var p := firste(a);
print p;
//assert p == 2;
}
| method firste(a: array<char>) returns (c:int)
ensures -1 <= c < a.Length
ensures 0 <= c < a.Length ==> a[c] == 'e' && forall x :: 0 <= x < c ==> a[x] != 'e'
ensures c == -1 ==> forall x :: 0 <= x < a.Length ==> a[x] != 'e'
{
var i:int := 0;
while i < a.Length
{
if a[i] == 'e' {
return i;
}
i := i + 1;
}
return -1;
}
method Main(){
var a := new char[6]['c','h','e','e','s','e'];
var p := firste(a);
print p;
//assert p == 2;
}
|
359 | SENG2011_tmp_tmpgk5jq85q_p1.dfy | method Reverse(a: array<char>) returns (b: array<char>)
requires a.Length > 0
ensures a.Length == b.Length
ensures forall x :: 0 <= x < a.Length ==> b[x] == a[a.Length - x - 1]
{
// copy array a to new array b
b := new char[a.Length];
var k := 0;
while (k < a.Length)
invariant 0 <= k <= a.Length;
invariant forall x :: 0 <= x < k ==> b[x] == a[a.Length - x - 1]
decreases a.Length - k
{
b[k] := a[a.Length - 1 - k];
k := k + 1;
}
/*
var i:int := 0;
while i < a.Length
invariant a.Length == b.Length
invariant 0 <= i <= a.Length
invariant 0 <= i <= b.Length
//invariant multiset(a[..]) == multiset(b[..])
invariant forall x :: 0 <= x < i ==> b[x] == a[a.Length - x - 1]
decreases a.Length - i
{
b[i] := a[a.Length - 1 - i];
i := i + 1;
}
*/
}
method Main()
{
var a := new char[8];
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7] := 'd', 'e', 's', 'r', 'e', 'v', 'e', 'r';
var b := Reverse(a);
assert b[..] == [ 'r', 'e', 'v', 'e', 'r', 's', 'e', 'd' ];
print b[..];
a := new char[1];
a[0] := '!';
b := Reverse(a);
assert b[..] == [ '!' ];
print b[..], '\n';
}
| method Reverse(a: array<char>) returns (b: array<char>)
requires a.Length > 0
ensures a.Length == b.Length
ensures forall x :: 0 <= x < a.Length ==> b[x] == a[a.Length - x - 1]
{
// copy array a to new array b
b := new char[a.Length];
var k := 0;
while (k < a.Length)
{
b[k] := a[a.Length - 1 - k];
k := k + 1;
}
/*
var i:int := 0;
while i < a.Length
//invariant multiset(a[..]) == multiset(b[..])
{
b[i] := a[a.Length - 1 - i];
i := i + 1;
}
*/
}
method Main()
{
var a := new char[8];
a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7] := 'd', 'e', 's', 'r', 'e', 'v', 'e', 'r';
var b := Reverse(a);
print b[..];
a := new char[1];
a[0] := '!';
b := Reverse(a);
print b[..], '\n';
}
|
360 | SENG2011_tmp_tmpgk5jq85q_p2.dfy | method AbsIt(s: array<int>) modifies s;
//requires
ensures forall x :: 0 <= x < s.Length ==> old(s[x]) < 0 ==> s[x] == -old(s[x])
ensures forall x :: 0 <= x < s.Length ==> old(s[x]) >= 0 ==> s[x] == old(s[x])
{
var i:int := 0;
while i < s.Length
invariant 0 <= i <= s.Length
//invariant forall x :: 0 <= x < i ==> s[x] >= 0
//invariant forall x :: 0 <= x < i ==> old(s[x]) < 0 ==> s[x] == -old(s[x])
//invariant forall x :: 0 <= x < i ==> old(s[x]) >= 0 ==> s[x] == old(s[x])
invariant forall k :: 0 <= k < i ==> old(s[k]) < 0 ==> s[k] == -old(s[k])// negatives are abs'ed
invariant forall k :: 0 <= k < i ==> old(s[k]) >= 0 ==> s[k] == old(s[k]) // positives left alone
invariant forall k:: i <= k < s.Length ==> old(s[k]) == s[k] // not yet touched
{
if (s[i] < 0) {
s[i] := -s[i];
}
i := i + 1;
}
}
| method AbsIt(s: array<int>) modifies s;
//requires
ensures forall x :: 0 <= x < s.Length ==> old(s[x]) < 0 ==> s[x] == -old(s[x])
ensures forall x :: 0 <= x < s.Length ==> old(s[x]) >= 0 ==> s[x] == old(s[x])
{
var i:int := 0;
while i < s.Length
//invariant forall x :: 0 <= x < i ==> s[x] >= 0
//invariant forall x :: 0 <= x < i ==> old(s[x]) < 0 ==> s[x] == -old(s[x])
//invariant forall x :: 0 <= x < i ==> old(s[x]) >= 0 ==> s[x] == old(s[x])
{
if (s[i] < 0) {
s[i] := -s[i];
}
i := i + 1;
}
}
|
361 | SiLemma_tmp_tmpfxtryv2w_utils.dfy | module Utils {
lemma AllBelowBoundSize(bound: nat)
ensures
var below := set n : nat | n < bound :: n;
|below| == bound
decreases bound
{
if bound == 0 {
} else {
AllBelowBoundSize(bound-1);
var belowminus := set n : nat | n < bound-1 :: n;
var below := set n : nat | n < bound :: n;
assert below == belowminus + {bound-1};
}
}
lemma SizeOfContainedSet(a: set<nat>, b: set<nat>)
requires forall n: nat :: n in a ==> n in b
ensures |a| <= |b|
decreases |a|
{
if |a| == 0 {
} else {
var y :| y in a;
var new_a := a - {y};
var new_b := b - {y};
SizeOfContainedSet(new_a, new_b);
}
}
lemma BoundedSetSize(bound: nat, values: set<nat>)
requires forall n :: n in values ==> n < bound
ensures |values| <= bound
{
var all_below_bound := set n : nat | n < bound :: n;
AllBelowBoundSize(bound);
assert |all_below_bound| == bound;
assert forall n :: n in values ==> n in all_below_bound;
SizeOfContainedSet(values, all_below_bound);
}
lemma MappedSetSize<T, U>(s: set<T>, f: T->U, t: set<U>)
requires forall n: T, m: T :: m != n ==> f(n) != f(m)
requires t == set n | n in s :: f(n)
ensures |s| == |t|
decreases |s|
{
var t := set n | n in s :: f(n);
if |s| == 0 {
} else {
var y :| y in s;
var new_s := s - {y};
var new_t := t - {f(y)};
assert new_t == set n | n in new_s :: f(n);
MappedSetSize(new_s, f, new_t);
}
}
lemma SetSizes<T>(a: set<T>, b: set<T>, c: set<T>)
requires c == a + b
requires forall t: T :: t in a ==> t !in b
requires forall t: T :: t in b ==> t !in a
ensures |c| == |a| + |b|
{
}
}
| module Utils {
lemma AllBelowBoundSize(bound: nat)
ensures
var below := set n : nat | n < bound :: n;
|below| == bound
{
if bound == 0 {
} else {
AllBelowBoundSize(bound-1);
var belowminus := set n : nat | n < bound-1 :: n;
var below := set n : nat | n < bound :: n;
}
}
lemma SizeOfContainedSet(a: set<nat>, b: set<nat>)
requires forall n: nat :: n in a ==> n in b
ensures |a| <= |b|
{
if |a| == 0 {
} else {
var y :| y in a;
var new_a := a - {y};
var new_b := b - {y};
SizeOfContainedSet(new_a, new_b);
}
}
lemma BoundedSetSize(bound: nat, values: set<nat>)
requires forall n :: n in values ==> n < bound
ensures |values| <= bound
{
var all_below_bound := set n : nat | n < bound :: n;
AllBelowBoundSize(bound);
SizeOfContainedSet(values, all_below_bound);
}
lemma MappedSetSize<T, U>(s: set<T>, f: T->U, t: set<U>)
requires forall n: T, m: T :: m != n ==> f(n) != f(m)
requires t == set n | n in s :: f(n)
ensures |s| == |t|
{
var t := set n | n in s :: f(n);
if |s| == 0 {
} else {
var y :| y in s;
var new_s := s - {y};
var new_t := t - {f(y)};
MappedSetSize(new_s, f, new_t);
}
}
lemma SetSizes<T>(a: set<T>, b: set<T>, c: set<T>)
requires c == a + b
requires forall t: T :: t in a ==> t !in b
requires forall t: T :: t in b ==> t !in a
ensures |c| == |a| + |b|
{
}
}
|
362 | Simulink-To_dafny_tmp_tmpbcuesj2t_Tank.dfy | datatype Valve = ON | OFF
class Pipe{
var v1: Valve; //outlet valve
var v2: Valve; //inlet Valve
var v3: Valve; //outlet valve
var in_flowv1: int; //flow in valve v1
var in_flowv2: int; //flow in vave v2
var in_flowv3: int; //flow in valve v3
constructor()
{
this.v1:= OFF;
this.v2:= ON;
}
}
class Tank
{
var pipe: Pipe;
var height: int;
constructor()
{
pipe := new Pipe();
}
}
method checkRegulation(tank: Tank)
//requires tank.pipe.v1==OFF && tank.pipe.v2==ON && (tank.pipe.v3==OFF || tank.pipe.v2==ON)
ensures (tank.height>10 && tank.pipe.v1==OFF && tank.pipe.v3==ON && tank.pipe.v2==old(tank.pipe.v2))
|| (tank.height <8 && tank.pipe.v1== OFF && tank.pipe.v2== ON && tank.pipe.v3==old(tank.pipe.v3))
|| ((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1))
modifies tank.pipe;
{
if(tank.height >10)
{
tank.pipe.v1:= OFF;
tank.pipe.v3:= ON;
assert((tank.height>10 && tank.pipe.v1==OFF && tank.pipe.v3==ON && tank.pipe.v2==old(tank.pipe.v2)));
}
else if(tank.height <8)
{
tank.pipe.v1:= OFF;
tank.pipe.v2:= ON;
assert((tank.height <8 && tank.pipe.v1== OFF && tank.pipe.v2== ON && tank.pipe.v3==old(tank.pipe.v3)));
}
assume(((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1)));
/*else if(tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1> 5)
{
tank.pipe.v2:= OFF;
assume(((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1)));
} */
}
| datatype Valve = ON | OFF
class Pipe{
var v1: Valve; //outlet valve
var v2: Valve; //inlet Valve
var v3: Valve; //outlet valve
var in_flowv1: int; //flow in valve v1
var in_flowv2: int; //flow in vave v2
var in_flowv3: int; //flow in valve v3
constructor()
{
this.v1:= OFF;
this.v2:= ON;
}
}
class Tank
{
var pipe: Pipe;
var height: int;
constructor()
{
pipe := new Pipe();
}
}
method checkRegulation(tank: Tank)
//requires tank.pipe.v1==OFF && tank.pipe.v2==ON && (tank.pipe.v3==OFF || tank.pipe.v2==ON)
ensures (tank.height>10 && tank.pipe.v1==OFF && tank.pipe.v3==ON && tank.pipe.v2==old(tank.pipe.v2))
|| (tank.height <8 && tank.pipe.v1== OFF && tank.pipe.v2== ON && tank.pipe.v3==old(tank.pipe.v3))
|| ((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1))
modifies tank.pipe;
{
if(tank.height >10)
{
tank.pipe.v1:= OFF;
tank.pipe.v3:= ON;
}
else if(tank.height <8)
{
tank.pipe.v1:= OFF;
tank.pipe.v2:= ON;
}
assume(((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1)));
/*else if(tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1> 5)
{
tank.pipe.v2:= OFF;
assume(((tank.pipe.in_flowv3 >5 || tank.pipe.in_flowv1 >5 ) && tank.pipe.v2==OFF && tank.pipe.v3==old(tank.pipe.v3) && tank.pipe.v1==old(tank.pipe.v1)));
} */
}
|
363 | Software-Verification_tmp_tmpv4ueky2d_Best Time to Buy and Sell Stock_best_time_to_buy_and_sell_stock.dfy | method best_time_to_buy_and_sell_stock(prices: array<int>) returns (max_profit: int)
requires 1 <= prices.Length <= 100000
requires forall i :: 0 <= i < prices.Length ==> 0 <= prices[i] <= 10000
ensures forall i, j :: 0 <= i < j < prices.Length ==> max_profit >= prices[j] - prices[i]
{
var min_price := 10001;
max_profit := 0;
var i := 0;
while (i < prices.Length)
invariant 0 <= i <= prices.Length
invariant forall j :: 0 <= j < i ==> min_price <= prices[j]
invariant forall j, k :: 0 <= j < k < i ==> max_profit >= prices[k] - prices[j]
{
var price := prices[i];
if (price < min_price)
{
min_price := price;
}
if (price - min_price > max_profit) {
max_profit := price - min_price;
}
i := i + 1;
}
}
| method best_time_to_buy_and_sell_stock(prices: array<int>) returns (max_profit: int)
requires 1 <= prices.Length <= 100000
requires forall i :: 0 <= i < prices.Length ==> 0 <= prices[i] <= 10000
ensures forall i, j :: 0 <= i < j < prices.Length ==> max_profit >= prices[j] - prices[i]
{
var min_price := 10001;
max_profit := 0;
var i := 0;
while (i < prices.Length)
{
var price := prices[i];
if (price < min_price)
{
min_price := price;
}
if (price - min_price > max_profit) {
max_profit := price - min_price;
}
i := i + 1;
}
}
|
364 | Software-Verification_tmp_tmpv4ueky2d_Contains Duplicate_contains_duplicate.dfy | method contains_duplicate(nums: seq<int>) returns (result: bool)
requires 1 <= |nums| <= 100000
requires forall i :: 0 <= i < |nums| ==> -1000000000 <= nums[i] <= 1000000000
ensures result <==> distinct(nums)
{
var i := 0;
var s: set<int> := {};
while (i < |nums|)
invariant i <= |nums|
invariant forall j :: j in nums[..i] <==> j in s
invariant distinct(nums[..i])
{
var num := nums[i];
if (num in s)
{
return false;
}
s := s + {num};
i := i + 1;
}
return true;
}
predicate distinct(nums: seq<int>) {
forall i, j :: 0 <= i < j < |nums| ==> nums[i] != nums[j]
}
| method contains_duplicate(nums: seq<int>) returns (result: bool)
requires 1 <= |nums| <= 100000
requires forall i :: 0 <= i < |nums| ==> -1000000000 <= nums[i] <= 1000000000
ensures result <==> distinct(nums)
{
var i := 0;
var s: set<int> := {};
while (i < |nums|)
{
var num := nums[i];
if (num in s)
{
return false;
}
s := s + {num};
i := i + 1;
}
return true;
}
predicate distinct(nums: seq<int>) {
forall i, j :: 0 <= i < j < |nums| ==> nums[i] != nums[j]
}
|
365 | Software-Verification_tmp_tmpv4ueky2d_Counting Bits_counting_bits.dfy | method counting_bits(n: int) returns (result: array<int>)
requires 0 <= n <= 100000
ensures result.Length == n + 1
ensures forall i :: 1 <= i < n + 1 ==> result[i] == result[i / 2] + i % 2
{
result := new int[n + 1](i => 0);
var i := 1;
while (i < n + 1)
invariant 1 <= i <= n + 1
invariant forall j :: 1 <= j < i ==> result[j] == result[j / 2] + j % 2
{
result[i] := result[i / 2] + i % 2;
i := i + 1;
}
}
| method counting_bits(n: int) returns (result: array<int>)
requires 0 <= n <= 100000
ensures result.Length == n + 1
ensures forall i :: 1 <= i < n + 1 ==> result[i] == result[i / 2] + i % 2
{
result := new int[n + 1](i => 0);
var i := 1;
while (i < n + 1)
{
result[i] := result[i / 2] + i % 2;
i := i + 1;
}
}
|
366 | Software-Verification_tmp_tmpv4ueky2d_Longest Increasing Subsequence_longest_increasing_subsequence.dfy | method longest_increasing_subsequence(nums: array<int>) returns (max: int)
requires 1 <= nums.Length <= 2500
requires forall i :: 0 <= i < nums.Length ==> -10000 <= nums[i] <= 10000
// TODO: modify the ensures clause so that max is indeed equal to the longest increasing subsequence
ensures max >= 1
{
var length := nums.Length;
if (length == 1)
{
return 1;
}
max := 1;
var dp := new int[length](_ => 1);
var i := 1;
while (i < length)
modifies dp
invariant 1 <= i <= length
invariant max >= 1
{
var j := 0;
while (j < i)
invariant 0 <= j <= i
{
if (nums[j] < nums[i])
{
dp[i] := find_max(dp[i], dp[j] + 1);
}
j := j + 1;
}
max := find_max(max, dp[i]);
i := i + 1;
}
}
// Function
function find_max(x: int, y: int): int
{
if x > y then x
else y
}
| method longest_increasing_subsequence(nums: array<int>) returns (max: int)
requires 1 <= nums.Length <= 2500
requires forall i :: 0 <= i < nums.Length ==> -10000 <= nums[i] <= 10000
// TODO: modify the ensures clause so that max is indeed equal to the longest increasing subsequence
ensures max >= 1
{
var length := nums.Length;
if (length == 1)
{
return 1;
}
max := 1;
var dp := new int[length](_ => 1);
var i := 1;
while (i < length)
modifies dp
{
var j := 0;
while (j < i)
{
if (nums[j] < nums[i])
{
dp[i] := find_max(dp[i], dp[j] + 1);
}
j := j + 1;
}
max := find_max(max, dp[i]);
i := i + 1;
}
}
// Function
function find_max(x: int, y: int): int
{
if x > y then x
else y
}
|
367 | Software-Verification_tmp_tmpv4ueky2d_Non-overlapping Intervals_non_overlapping_intervals.dfy | method non_overlapping_intervals(intervals: array2<int>) returns (count: int)
modifies intervals
requires 1 <= intervals.Length0 <= 100000
requires intervals.Length1 == 2
requires forall i :: 0 <= i < intervals.Length0 ==> -50000 <= intervals[i, 0] <= 50000
requires forall i :: 0 <= i < intervals.Length0 ==> -50000 <= intervals[i, 1] <= 50000
// TODO: modify the ensures clause so that count is indeed equal to the minimum number of intervals we need to remove to make the rest of the intervals non-overlapping.
ensures count >= 0
{
var row := intervals.Length0;
if (row == 0)
{
return 0;
}
bubble_sort(intervals);
var i := 1;
count := 1;
var end := intervals[0, 1];
while (i < row)
invariant 1 <= i <= row
invariant 1 <= count <= i
invariant intervals[0, 1] <= end <= intervals[i - 1, 1]
{
if (intervals[i, 0] >= end)
{
count := count + 1;
end := intervals[i, 1];
}
i := i + 1;
}
return row - count;
}
// Bubble Sort
method bubble_sort(a: array2<int>)
modifies a
requires a.Length1 == 2
ensures sorted(a, 0, a.Length0 - 1)
{
var i := a.Length0 - 1;
while (i > 0)
invariant i < 0 ==> a.Length0 == 0
invariant sorted(a, i, a.Length0 - 1)
invariant partitioned(a, i)
{
var j := 0;
while (j < i)
invariant 0 < i < a.Length0 && 0 <= j <= i
invariant sorted(a, i, a.Length0 - 1)
invariant partitioned(a, i)
invariant forall k :: 0 <= k <= j ==> a[k, 1] <= a[j, 1]
{
if (a[j, 1] > a[j + 1, 1])
{
a[j, 1], a[j + 1, 1] := a[j + 1, 1], a[j, 1];
}
j := j + 1;
}
i := i -1;
}
}
// Predicates for Bubble Sort
predicate sorted(a: array2<int>, l: int, u: int)
reads a
requires a.Length1 == 2
{
forall i, j :: 0 <= l <= i <= j <= u < a.Length0 ==> a[i, 1] <= a[j, 1]
}
predicate partitioned(a: array2<int>, i: int)
reads a
requires a.Length1 == 2
{
forall k, k' :: 0 <= k <= i < k' < a.Length0 ==> a[k, 1] <= a[k', 1]
}
| method non_overlapping_intervals(intervals: array2<int>) returns (count: int)
modifies intervals
requires 1 <= intervals.Length0 <= 100000
requires intervals.Length1 == 2
requires forall i :: 0 <= i < intervals.Length0 ==> -50000 <= intervals[i, 0] <= 50000
requires forall i :: 0 <= i < intervals.Length0 ==> -50000 <= intervals[i, 1] <= 50000
// TODO: modify the ensures clause so that count is indeed equal to the minimum number of intervals we need to remove to make the rest of the intervals non-overlapping.
ensures count >= 0
{
var row := intervals.Length0;
if (row == 0)
{
return 0;
}
bubble_sort(intervals);
var i := 1;
count := 1;
var end := intervals[0, 1];
while (i < row)
{
if (intervals[i, 0] >= end)
{
count := count + 1;
end := intervals[i, 1];
}
i := i + 1;
}
return row - count;
}
// Bubble Sort
method bubble_sort(a: array2<int>)
modifies a
requires a.Length1 == 2
ensures sorted(a, 0, a.Length0 - 1)
{
var i := a.Length0 - 1;
while (i > 0)
{
var j := 0;
while (j < i)
{
if (a[j, 1] > a[j + 1, 1])
{
a[j, 1], a[j + 1, 1] := a[j + 1, 1], a[j, 1];
}
j := j + 1;
}
i := i -1;
}
}
// Predicates for Bubble Sort
predicate sorted(a: array2<int>, l: int, u: int)
reads a
requires a.Length1 == 2
{
forall i, j :: 0 <= l <= i <= j <= u < a.Length0 ==> a[i, 1] <= a[j, 1]
}
predicate partitioned(a: array2<int>, i: int)
reads a
requires a.Length1 == 2
{
forall k, k' :: 0 <= k <= i < k' < a.Length0 ==> a[k, 1] <= a[k', 1]
}
|
368 | Software-Verification_tmp_tmpv4ueky2d_Remove Duplicates from Sorted Array_remove_duplicates_from_sorted_array.dfy | method remove_duplicates_from_sorted_array(nums: seq<int>) returns (result: seq<int>)
requires is_sorted(nums)
requires 1 <= |nums| <= 30000
requires forall i :: 0 <= i < |nums| ==> -100 <= nums[i] <= 100
ensures is_sorted_and_distinct(result)
ensures forall i :: i in nums <==> i in result
{
var previous := nums[0];
result := [nums[0]];
var i := 1;
while (i < |nums|)
invariant 0 <= i <= |nums|
invariant |result| >= 1;
invariant previous in nums[0..i];
invariant previous == result[|result| - 1];
invariant is_sorted_and_distinct(result)
invariant forall j :: j in nums[0..i] <==> j in result
{
if (previous != nums[i])
{
result := result + [nums[i]];
previous := nums[i];
}
i := i + 1;
}
}
// Helper predicate
predicate is_sorted(nums: seq<int>)
{
forall i, j :: 0 <= i < j < |nums| ==> nums[i] <= nums[j]
}
predicate is_sorted_and_distinct(nums: seq<int>)
{
forall i, j :: 0 <= i < j < |nums| ==> nums[i] < nums[j]
}
| method remove_duplicates_from_sorted_array(nums: seq<int>) returns (result: seq<int>)
requires is_sorted(nums)
requires 1 <= |nums| <= 30000
requires forall i :: 0 <= i < |nums| ==> -100 <= nums[i] <= 100
ensures is_sorted_and_distinct(result)
ensures forall i :: i in nums <==> i in result
{
var previous := nums[0];
result := [nums[0]];
var i := 1;
while (i < |nums|)
{
if (previous != nums[i])
{
result := result + [nums[i]];
previous := nums[i];
}
i := i + 1;
}
}
// Helper predicate
predicate is_sorted(nums: seq<int>)
{
forall i, j :: 0 <= i < j < |nums| ==> nums[i] <= nums[j]
}
predicate is_sorted_and_distinct(nums: seq<int>)
{
forall i, j :: 0 <= i < j < |nums| ==> nums[i] < nums[j]
}
|
369 | Software-Verification_tmp_tmpv4ueky2d_Remove Element_remove_element.dfy | method remove_element(nums: array<int>, val: int) returns (i: int)
modifies nums
requires 0 <= nums.Length <= 100
requires forall i :: 0 <= i < nums.Length ==> 0 <= nums[i] <= 50
requires 0 <= val <= 100
ensures forall j :: 0 < j < i < nums.Length ==> nums[j] != val
{
i := 0;
var end := nums.Length - 1;
while i <= end
invariant 0 <= i <= nums.Length
invariant end < nums.Length
invariant forall k :: 0 <= k < i ==> nums[k] != val
{
if (nums[i] == val)
{
if (nums[end] == val)
{
end := end - 1;
}
else {
nums[i] := nums[end];
i := i + 1;
end := end - 1;
}
}
else {
i := i + 1;
}
}
}
| method remove_element(nums: array<int>, val: int) returns (i: int)
modifies nums
requires 0 <= nums.Length <= 100
requires forall i :: 0 <= i < nums.Length ==> 0 <= nums[i] <= 50
requires 0 <= val <= 100
ensures forall j :: 0 < j < i < nums.Length ==> nums[j] != val
{
i := 0;
var end := nums.Length - 1;
while i <= end
{
if (nums[i] == val)
{
if (nums[end] == val)
{
end := end - 1;
}
else {
nums[i] := nums[end];
i := i + 1;
end := end - 1;
}
}
else {
i := i + 1;
}
}
}
|
370 | Software-Verification_tmp_tmpv4ueky2d_Valid Anagram_valid_anagram.dfy | method is_anagram(s: string, t: string) returns (result: bool)
requires |s| == |t|
ensures (multiset(s) == multiset(t)) == result
{
result := is_equal(multiset(s), multiset(t));
}
method is_equal(s: multiset<char>, t: multiset<char>) returns (result: bool)
ensures (s == t) <==> result
{
var s_removed := multiset{};
var s_remaining := s;
while (|s_remaining| > 0)
invariant s_remaining == s - s_removed
invariant forall removed :: removed in s_removed ==> (removed in s &&
removed in t &&
s[removed] == t[removed])
{
var remaining :| remaining in s_remaining;
if !(remaining in s &&
remaining in t &&
s[remaining] == t[remaining]) {
return false;
}
var temp := multiset{};
s_removed := s_removed + temp[remaining := s[remaining]];
s_remaining := s_remaining - temp[remaining := s[remaining]];
}
var t_removed := multiset{};
var t_remaining := t;
while (|t_remaining| > 0)
invariant t_remaining == t - t_removed
invariant forall removed :: removed in t_removed ==> (removed in s &&
removed in t &&
s[removed] == t[removed])
{
var remaining :| remaining in t_remaining;
if !(remaining in s &&
remaining in t &&
s[remaining] == t[remaining]) {
return false;
}
var temp := multiset{};
t_removed := t_removed + temp[remaining := t[remaining]];
t_remaining := t_remaining - temp[remaining := t[remaining]];
}
return true;
}
| method is_anagram(s: string, t: string) returns (result: bool)
requires |s| == |t|
ensures (multiset(s) == multiset(t)) == result
{
result := is_equal(multiset(s), multiset(t));
}
method is_equal(s: multiset<char>, t: multiset<char>) returns (result: bool)
ensures (s == t) <==> result
{
var s_removed := multiset{};
var s_remaining := s;
while (|s_remaining| > 0)
removed in t &&
s[removed] == t[removed])
{
var remaining :| remaining in s_remaining;
if !(remaining in s &&
remaining in t &&
s[remaining] == t[remaining]) {
return false;
}
var temp := multiset{};
s_removed := s_removed + temp[remaining := s[remaining]];
s_remaining := s_remaining - temp[remaining := s[remaining]];
}
var t_removed := multiset{};
var t_remaining := t;
while (|t_remaining| > 0)
removed in t &&
s[removed] == t[removed])
{
var remaining :| remaining in t_remaining;
if !(remaining in s &&
remaining in t &&
s[remaining] == t[remaining]) {
return false;
}
var temp := multiset{};
t_removed := t_removed + temp[remaining := t[remaining]];
t_remaining := t_remaining - temp[remaining := t[remaining]];
}
return true;
}
|
371 | Software-Verification_tmp_tmpv4ueky2d_Valid Palindrome_valid_panlindrome.dfy | method isPalindrome(s: array<char>) returns (result: bool)
requires 1<= s.Length <= 200000
ensures result <==> (forall i:: 0 <= i < s.Length / 2 ==> s[i] == s[s.Length - 1 - i])
{
var length := s.Length;
var i := 0;
while i < length / 2
invariant 0 <= i <= length
invariant forall j:: 0 <= j < i ==> s[j] == s[length - 1 - j]
{
if s[i] != s[length - 1 - i]
{
return false;
}
i := i + 1;
}
return true;
}
| method isPalindrome(s: array<char>) returns (result: bool)
requires 1<= s.Length <= 200000
ensures result <==> (forall i:: 0 <= i < s.Length / 2 ==> s[i] == s[s.Length - 1 - i])
{
var length := s.Length;
var i := 0;
while i < length / 2
{
if s[i] != s[length - 1 - i]
{
return false;
}
i := i + 1;
}
return true;
}
|
372 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_aula1.dfy | method factImp(n: int) returns (r: int)
{
r := 1;
var m := n;
while (m > 0) {
r := r*m;
m := m-1;
}
}
function power(n: int, m: nat) : int {
if m==0 then 1 else n*power(n,m-1)
}
function pow(n: int, m: nat,r: int) : int {
if m==0 then r else pow(n,m-1,r*n)
}
function powerAlt(n: int,m: nat) : int {
pow(n,m,1)
}
// 3
function equivalentes(n: int,m: nat,r: int) : int
ensures power(n,m) == pow(n,m,r)
lemma l1(n: int,m: nat, r: int)
ensures equivalentes(n,m, r) == powerAlt(n,m)
// 4.
function fact(n: nat) : nat
{
if n==0 then 1 else n*fact(n-1)
}
function factAcc(n: nat,a: int) : int
decreases n
{
if (n == 0) then a else factAcc(n-1,n*a)
}
function factAlt(n: nat) : int { factAcc(n,1) }
lemma factAcc_correct(n: nat,a: int)
ensures factAcc(n,a) == fact(n)*a
lemma equiv(n: nat)
ensures fact(n) == factAlt(n) {
factAcc_correct(n, 1);
assert factAcc(n, 1) == fact(n)*1;
assert factAlt(n) == factAcc(n, 1);
assert fact(n) == fact(n)*1;
}
// 5. a)
function mystery1(n: nat,m: nat) : nat
decreases n, m;
ensures mystery1(n,m) == n+m
{ if n==0 then m else mystery1(n-1,m+1) }
// 5. b)
function mystery2(n: nat,m: nat) : nat
decreases m
ensures mystery2(n,m) == n+m
{ if m==0 then n else mystery2(n+1,m-1) }
// 5. c)
function mystery3(n: nat,m: nat) : nat
ensures mystery3(n,m) == n*m
{ if n==0 then 0 else mystery1(m,mystery3(n-1,m)) }
// 5. d)
function mystery4(n: nat,m: nat) : nat
ensures mystery4(n,m) == power(n,m)
{ if m==0 then 1 else mystery3(n,mystery4(n,m-1)) }
// 6
// 8
// 9
// 10
// 11
| method factImp(n: int) returns (r: int)
{
r := 1;
var m := n;
while (m > 0) {
r := r*m;
m := m-1;
}
}
function power(n: int, m: nat) : int {
if m==0 then 1 else n*power(n,m-1)
}
function pow(n: int, m: nat,r: int) : int {
if m==0 then r else pow(n,m-1,r*n)
}
function powerAlt(n: int,m: nat) : int {
pow(n,m,1)
}
// 3
function equivalentes(n: int,m: nat,r: int) : int
ensures power(n,m) == pow(n,m,r)
lemma l1(n: int,m: nat, r: int)
ensures equivalentes(n,m, r) == powerAlt(n,m)
// 4.
function fact(n: nat) : nat
{
if n==0 then 1 else n*fact(n-1)
}
function factAcc(n: nat,a: int) : int
{
if (n == 0) then a else factAcc(n-1,n*a)
}
function factAlt(n: nat) : int { factAcc(n,1) }
lemma factAcc_correct(n: nat,a: int)
ensures factAcc(n,a) == fact(n)*a
lemma equiv(n: nat)
ensures fact(n) == factAlt(n) {
factAcc_correct(n, 1);
}
// 5. a)
function mystery1(n: nat,m: nat) : nat
ensures mystery1(n,m) == n+m
{ if n==0 then m else mystery1(n-1,m+1) }
// 5. b)
function mystery2(n: nat,m: nat) : nat
ensures mystery2(n,m) == n+m
{ if m==0 then n else mystery2(n+1,m-1) }
// 5. c)
function mystery3(n: nat,m: nat) : nat
ensures mystery3(n,m) == n*m
{ if n==0 then 0 else mystery1(m,mystery3(n-1,m)) }
// 5. d)
function mystery4(n: nat,m: nat) : nat
ensures mystery4(n,m) == power(n,m)
{ if m==0 then 1 else mystery3(n,mystery4(n,m-1)) }
// 6
// 8
// 9
// 10
// 11
|
373 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_aula2.dfy | //PRE-CONDITIONS -> REQUIRES
//POST-CONDITIONS -> ENSURES
method max(a: int, b: int) returns (z: int)
requires true
ensures z >= a || z >= b
{
if a > b {
z :=a;
}
else {
z := b;
}
}
method Main() {
var x;
assert true;
x:=max(23,50);
assert x>=50 || x>=23;
}
// 3
method mystery1(n: nat,m: nat) returns (res: nat)
ensures n+m == res
{
if (n==0) {
return m;
}
else {
var aux := mystery1 (n-1,m);
return 1+aux;
}
}
method mystery2(n: nat,m: nat) returns (res: nat)
ensures n*m == res
{
if (n==0) {
return 0;
}
else {
var aux := mystery2(n-1,m);
var aux2 := mystery1(m,aux);
return aux2;
}
}
// 5a
method m1(x: int,y: int) returns (z: int)
requires 0 < x < y
ensures z >= 0 && z < y && z != x
{
if (x > 0 && y > 0 && y > x) {
z := x-1;
}
}
// 5b
method m2(x: nat) returns (y: int)
requires x <= -1
ensures y > x && y < x
{
if (x <= -1) {
y := x+1;
}
}
// 5c
// pode dar false e eles nao serem iguais
//
method m3(x: int,y: int) returns (z: bool)
ensures z ==> x==y
{
if (x == y) {
z := true;
}
else {
z := false;
}
}
// 5d
method m4(x: int,y: int) returns (z: bool)
ensures z ==> x==y && x==y ==> z
{
if (x == y) {
z := true;
}
else {
z := false;
}
}
| //PRE-CONDITIONS -> REQUIRES
//POST-CONDITIONS -> ENSURES
method max(a: int, b: int) returns (z: int)
requires true
ensures z >= a || z >= b
{
if a > b {
z :=a;
}
else {
z := b;
}
}
method Main() {
var x;
x:=max(23,50);
}
// 3
method mystery1(n: nat,m: nat) returns (res: nat)
ensures n+m == res
{
if (n==0) {
return m;
}
else {
var aux := mystery1 (n-1,m);
return 1+aux;
}
}
method mystery2(n: nat,m: nat) returns (res: nat)
ensures n*m == res
{
if (n==0) {
return 0;
}
else {
var aux := mystery2(n-1,m);
var aux2 := mystery1(m,aux);
return aux2;
}
}
// 5a
method m1(x: int,y: int) returns (z: int)
requires 0 < x < y
ensures z >= 0 && z < y && z != x
{
if (x > 0 && y > 0 && y > x) {
z := x-1;
}
}
// 5b
method m2(x: nat) returns (y: int)
requires x <= -1
ensures y > x && y < x
{
if (x <= -1) {
y := x+1;
}
}
// 5c
// pode dar false e eles nao serem iguais
//
method m3(x: int,y: int) returns (z: bool)
ensures z ==> x==y
{
if (x == y) {
z := true;
}
else {
z := false;
}
}
// 5d
method m4(x: int,y: int) returns (z: bool)
ensures z ==> x==y && x==y ==> z
{
if (x == y) {
z := true;
}
else {
z := false;
}
}
|
374 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_aula3.dfy | function fib(n : nat) : nat
{
if (n==0) then 1 else
if (n==1) then 1 else fib(n-1)+fib(n-2)
}
method Fib(n : nat) returns (r:nat)
ensures r == fib(n)
{
if (n == 0) {
return 1;
}
var next:= 2;
r:=1;
var i := 1;
while (i < n)
invariant next == fib(i+1)
invariant r == fib(i)
invariant 1 <= i <= n
{
var tmp := next;
next := next + r;
r := tmp;
i := i + 1;
}
assert r == fib(n);
return r;
}
// 2.
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function add(l : List<int>) : int {
match l
case Nil => 0
case Cons(x,xs) => x + add(xs)
}
method addImp(l : List<int>) returns (r: int)
ensures r == add(l)
{
r := 0;
var ll := l;
while (ll != Nil)
decreases ll
invariant r==add(l) - add(ll)
{
r := r + ll.head;
ll := ll.tail;
}
assert r == add(l);
}
// 3.
method maxArray(arr : array<int>) returns (max: int)
requires arr.Length > 0
ensures forall i: int :: 0 <= i < arr.Length ==> arr[i] <= max
ensures exists x::0 <= x < arr.Length && arr[x] == max
{
max := arr[0];
var index := 1;
while(index < arr.Length)
invariant 0 <= index <= arr.Length
invariant forall i: int :: 0 <= i < index ==> arr[i] <= max
invariant exists x::0 <= x < arr.Length && arr[x] == max
{
if (arr[index] > max) {
max := arr[index];
}
index := index + 1;
}
}
// 5.
method maxArrayReverse(arr : array<int>) returns (max: int)
requires arr.Length > 0
ensures forall i: int :: 0 <= i < arr.Length ==> arr[i] <= max
ensures exists x::0 <= x < arr.Length && arr[x] == max
{
var ind := arr.Length - 1;
max := arr[ind];
while ind > 0
invariant 0 <= ind <= arr.Length
invariant forall i: int :: ind <= i < arr.Length ==> arr[i] <= max
invariant exists x::0 <= x < arr.Length && arr[x] == max
{
if (arr[ind - 1] > max) {
max := arr[ind - 1];
}
ind := ind - 1;
}
}
// 6
function sum(n: nat) : nat
{
if (n == 0) then 0 else n + sum(n-1)
}
method sumBackwards(n: nat) returns (r: nat)
ensures r == sum(n)
{
var i := n;
r := 0;
while i > 0
invariant 0 <= i <= n
invariant r == sum(n) - sum(i)
{
r := r + i;
i := i - 1;
}
}
| function fib(n : nat) : nat
{
if (n==0) then 1 else
if (n==1) then 1 else fib(n-1)+fib(n-2)
}
method Fib(n : nat) returns (r:nat)
ensures r == fib(n)
{
if (n == 0) {
return 1;
}
var next:= 2;
r:=1;
var i := 1;
while (i < n)
{
var tmp := next;
next := next + r;
r := tmp;
i := i + 1;
}
return r;
}
// 2.
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
function add(l : List<int>) : int {
match l
case Nil => 0
case Cons(x,xs) => x + add(xs)
}
method addImp(l : List<int>) returns (r: int)
ensures r == add(l)
{
r := 0;
var ll := l;
while (ll != Nil)
{
r := r + ll.head;
ll := ll.tail;
}
}
// 3.
method maxArray(arr : array<int>) returns (max: int)
requires arr.Length > 0
ensures forall i: int :: 0 <= i < arr.Length ==> arr[i] <= max
ensures exists x::0 <= x < arr.Length && arr[x] == max
{
max := arr[0];
var index := 1;
while(index < arr.Length)
{
if (arr[index] > max) {
max := arr[index];
}
index := index + 1;
}
}
// 5.
method maxArrayReverse(arr : array<int>) returns (max: int)
requires arr.Length > 0
ensures forall i: int :: 0 <= i < arr.Length ==> arr[i] <= max
ensures exists x::0 <= x < arr.Length && arr[x] == max
{
var ind := arr.Length - 1;
max := arr[ind];
while ind > 0
{
if (arr[ind - 1] > max) {
max := arr[ind - 1];
}
ind := ind - 1;
}
}
// 6
function sum(n: nat) : nat
{
if (n == 0) then 0 else n + sum(n-1)
}
method sumBackwards(n: nat) returns (r: nat)
ensures r == sum(n)
{
var i := n;
r := 0;
while i > 0
{
r := r + i;
i := i - 1;
}
}
|
375 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_aula5.dfy | /*Ex1 Given the leaky specification of class Set found in Appendix ??, use the techniques from
class (the use of ghost state and dynamic frames) so that the specification no longer leaks
the internal representation. Produce client code that correctly connects to your revised
Set class. */
class Set {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
requires size() < maxSize()
ensures RepInv()
modifies this,Repr
ensures fresh(Repr - old(Repr))
{
var f:int := find(v);
if (f < 0) {
store[nelems] := v;
elems := elems + {v};
assert forall i:: 0 <= i < nelems ==> old(store[i]) == store[i];
nelems := nelems + 1;
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
decreases nelems-i
invariant 0 <= i <= nelems;
invariant forall j::(0<=j< i) ==> x != store[j];
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new Set(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
/*2. Using the corrected version of Set as a baseline, implement a PositiveSet class that
enforces the invariant that all numbers in the set are strictly positive. */
class PositiveSet {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
&& (forall x :: x in elems ==> x > 0)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
requires size() < maxSize()
ensures RepInv()
modifies this,Repr
ensures fresh(Repr - old(Repr))
{
if(v > 0) {
var f:int := find(v);
if (f < 0) {
store[nelems] := v;
elems := elems + {v};
assert forall i:: 0 <= i < nelems ==> old(store[i]) == store[i];
nelems := nelems + 1;
}
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
decreases nelems-i
invariant 0 <= i <= nelems;
invariant forall j::(0<=j< i) ==> x != store[j];
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new PositiveSet(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
/*
* Implement a savings account.
* A savings account is actually made up of two balances.
*
* One is the checking balance, here account owner can deposit and withdraw
* money at will. There is only one restriction on withdrawing. In a regular
* bank account, the account owner can make withdrawals as long as he has the
* balance for it, i.e., the user cannot withdraw more money than the user has.
* In a savings account, the checking balance can go negative as long as it does
* not surpass half of what is saved in the savings balance. Consider the
* following example:
*
* Savings = 10
* Checking = 0
* Operation 1: withdraw 10 This operation is not valid. Given that the
* the user only has $$10, his checking account
* can only decrease down to $$-5 (10/2).
*
* Operation 2: withdraw 2 Despite the fact that the checking balance of
* the user is zero,
* money in his savings account, therefore, this
* operation is valid, and the result would be
* something like:
* Savings = 10;
* Checking = -2
*
* Regarding depositing money in the savings balance (save), this operation has
* one small restrictions. It is only possible to save money to the savings
* balance when the user is not in debt; i.e. to save money into savings, the
* checking must be non-negative.
*
* Given the states:
* STATE 1 STATE 2
* Savings = 10 Savings = 10
* Checking = -5 Checking = 0
*
* and the operation save($$60000000000), the operation is valid when executed
* in STATE 2 but not in STATE 1.
*
* Finally, when withdrawing from the savings balance, an operation we will
* call rescue, the amount the user can withdraw depends on the negativity of
* the user’s checking account. For instance:
*
* Savings: 12
* Checking: -5
*
* In the case, the user could withdraw at most two double dollars ($$). If the
* user withdrew more than that, the balance of the checking account would
* go beyond the -50% of the savings account; big no no.
*
*/
class SavingsAccount {
var cbalance: int;
var sbalance: int;
ghost var Repr:set<object>;
ghost predicate RepInv()
reads this,Repr
{
this in Repr
&& cbalance >= -sbalance/2
}
ghost predicate PositiveChecking()
reads this,Repr
{
cbalance >= 0
}
constructor()
ensures fresh(Repr-{this})
ensures RepInv()
{
Repr := {this};
cbalance := 0;
sbalance := 0;
}
method deposit(amount:int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
cbalance := cbalance + amount;
}
method withdraw(amount:int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance-amount >= -sbalance/2)
{
cbalance := cbalance - amount;
}
}
method save(amount: int)
requires amount > 0
requires PositiveChecking()
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance >= 0)
{
sbalance := sbalance + amount;
}
}
method rescue(amount: int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance >= -(sbalance-amount)/2)
{
sbalance := sbalance - amount;
}
}
}
/*Ex 4 Change your specification and implementation of the ASet ADT to include a growing
array of integer values. */
class GrowingSet {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
ensures RepInv()
modifies Repr
ensures fresh(Repr - old(Repr))
{
var f:int := find(v);
assert forall i:: 0 <= i < nelems ==> old(store[i]) == store[i];
if (f < 0) {
if(nelems == store.Length) {
var tmp := new int[store.Length * 2];
var i:= 0;
while i < store.Length
invariant 0 <= i <= store.Length < tmp.Length
invariant forall j :: 0 <= j < i ==> old(store[j]) == tmp[j]
modifies tmp
{
tmp[i] := store[i];
i := i + 1;
}
Repr := Repr - {store} + {tmp};
store := tmp;
}
store[nelems] := v;
elems := elems + {v};
assert forall i:: 0 <= i < nelems ==> old(store[i]) == store[i];
nelems := nelems + 1;
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
decreases nelems-i
invariant 0 <= i <= nelems;
invariant forall j::(0<=j< i) ==> x != store[j];
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new GrowingSet(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
| /*Ex1 Given the leaky specification of class Set found in Appendix ??, use the techniques from
class (the use of ghost state and dynamic frames) so that the specification no longer leaks
the internal representation. Produce client code that correctly connects to your revised
Set class. */
class Set {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
requires size() < maxSize()
ensures RepInv()
modifies this,Repr
ensures fresh(Repr - old(Repr))
{
var f:int := find(v);
if (f < 0) {
store[nelems] := v;
elems := elems + {v};
nelems := nelems + 1;
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new Set(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
/*2. Using the corrected version of Set as a baseline, implement a PositiveSet class that
enforces the invariant that all numbers in the set are strictly positive. */
class PositiveSet {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
&& (forall x :: x in elems ==> x > 0)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
requires size() < maxSize()
ensures RepInv()
modifies this,Repr
ensures fresh(Repr - old(Repr))
{
if(v > 0) {
var f:int := find(v);
if (f < 0) {
store[nelems] := v;
elems := elems + {v};
nelems := nelems + 1;
}
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new PositiveSet(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
/*
* Implement a savings account.
* A savings account is actually made up of two balances.
*
* One is the checking balance, here account owner can deposit and withdraw
* money at will. There is only one restriction on withdrawing. In a regular
* bank account, the account owner can make withdrawals as long as he has the
* balance for it, i.e., the user cannot withdraw more money than the user has.
* In a savings account, the checking balance can go negative as long as it does
* not surpass half of what is saved in the savings balance. Consider the
* following example:
*
* Savings = 10
* Checking = 0
* Operation 1: withdraw 10 This operation is not valid. Given that the
* the user only has $$10, his checking account
* can only decrease down to $$-5 (10/2).
*
* Operation 2: withdraw 2 Despite the fact that the checking balance of
* the user is zero,
* money in his savings account, therefore, this
* operation is valid, and the result would be
* something like:
* Savings = 10;
* Checking = -2
*
* Regarding depositing money in the savings balance (save), this operation has
* one small restrictions. It is only possible to save money to the savings
* balance when the user is not in debt; i.e. to save money into savings, the
* checking must be non-negative.
*
* Given the states:
* STATE 1 STATE 2
* Savings = 10 Savings = 10
* Checking = -5 Checking = 0
*
* and the operation save($$60000000000), the operation is valid when executed
* in STATE 2 but not in STATE 1.
*
* Finally, when withdrawing from the savings balance, an operation we will
* call rescue, the amount the user can withdraw depends on the negativity of
* the user’s checking account. For instance:
*
* Savings: 12
* Checking: -5
*
* In the case, the user could withdraw at most two double dollars ($$). If the
* user withdrew more than that, the balance of the checking account would
* go beyond the -50% of the savings account; big no no.
*
*/
class SavingsAccount {
var cbalance: int;
var sbalance: int;
ghost var Repr:set<object>;
ghost predicate RepInv()
reads this,Repr
{
this in Repr
&& cbalance >= -sbalance/2
}
ghost predicate PositiveChecking()
reads this,Repr
{
cbalance >= 0
}
constructor()
ensures fresh(Repr-{this})
ensures RepInv()
{
Repr := {this};
cbalance := 0;
sbalance := 0;
}
method deposit(amount:int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
cbalance := cbalance + amount;
}
method withdraw(amount:int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance-amount >= -sbalance/2)
{
cbalance := cbalance - amount;
}
}
method save(amount: int)
requires amount > 0
requires PositiveChecking()
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance >= 0)
{
sbalance := sbalance + amount;
}
}
method rescue(amount: int)
requires amount > 0
requires RepInv()
ensures RepInv()
modifies Repr
{
if(cbalance >= -(sbalance-amount)/2)
{
sbalance := sbalance - amount;
}
}
}
/*Ex 4 Change your specification and implementation of the ASet ADT to include a growing
array of integer values. */
class GrowingSet {
var store:array<int>;
var nelems: int;
ghost var Repr : set<object>
ghost var elems : set<int>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && store in Repr &&
0 < store.Length
&& 0 <= nelems <= store.Length
&& (forall i :: 0 <= i < nelems ==> store[i] in elems)
&& (forall x :: x in elems ==> exists i :: 0 <= i < nelems && store[i] == x)
}
// the construction operation
constructor(n: int)
requires 0 < n
ensures RepInv()
ensures fresh(Repr-{this})
{
store := new int[n];
Repr := {this,store};
elems := {};
nelems := 0;
}
// returns the number of elements in the set
function size():int
requires RepInv()
ensures RepInv()
reads Repr
{ nelems }
// returns the maximum number of elements in the set
function maxSize():int
requires RepInv()
ensures RepInv()
reads Repr
{ store.Length }
// checks if the element given is in the set
method contains(v:int) returns (b:bool)
requires RepInv()
ensures RepInv()
ensures b <==> v in elems
{
var i := find(v);
return i >= 0;
}
// adds a new element to the set if space available
method add(v:int)
requires RepInv()
ensures RepInv()
modifies Repr
ensures fresh(Repr - old(Repr))
{
var f:int := find(v);
if (f < 0) {
if(nelems == store.Length) {
var tmp := new int[store.Length * 2];
var i:= 0;
while i < store.Length
modifies tmp
{
tmp[i] := store[i];
i := i + 1;
}
Repr := Repr - {store} + {tmp};
store := tmp;
}
store[nelems] := v;
elems := elems + {v};
nelems := nelems + 1;
}
}
// private method that should not be in the
method find(x:int) returns (r:int)
requires RepInv()
ensures RepInv()
ensures r < 0 ==> x !in elems
ensures r >=0 ==> x in elems;
{
var i:int := 0;
while (i<nelems)
{
if (store[i]==x) { return i; }
i := i + 1;
}
return -1;
}
method Main()
{
var s := new GrowingSet(10);
if (s.size() < s.maxSize()) {
s.add(2);
var b := s.contains(2);
if (s.size() < s.maxSize()) {
s.add(3);
}
}
}
}
|
376 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_handout1.dfy | // 1 a)
// [ai, aj[
function sum(a: array<int>, i: int, j: int) : int
requires 0 <= i <= j <= a.Length
reads a
decreases j
{
if i == j then 0
else a[j-1] + sum(a, i, j-1)
}
// 1 b)
method query(a: array<int>, i: int, j: int) returns (res : int)
requires 0 <= i <= j <= a.Length
ensures res == sum(a, i, j)
{
res := 0;
var ind := j-1;
while ind >= i
invariant i-1 <= ind < j
invariant res == sum(a, i, j) - sum(a, i, ind+1)
decreases ind
{
res := res + a[ind];
ind := ind - 1;
}
}
// 1 c)
// a -> [1, 10, 3, −4, 5]
// c -> [0, 1, 11, 14, 10, 15]
method queryFast(a: array<int>, c: array<int>, i: int, j: int) returns (r: int)
requires 0 <= i <= j <= a.Length
requires is_prefix_sum_for(a,c)
ensures r == sum(a, i, j)
{
var k := i;
proof(a, 0, j, k);
r := c[j] - c[i];
}
predicate is_prefix_sum_for (a: array<int>, c: array<int>)
reads c, a
{
a.Length + 1 == c.Length && forall i: int :: 0 <= i <= a.Length ==> c[i] == sum(a, 0, i)
}
lemma proof(a: array<int>, i: int, j: int, k:int)
requires 0 <= i <= k <= j <= a.Length
ensures sum(a, i, k) + sum(a, k, j) == sum(a, i, j)
// 2
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
method from_array<T>(a: array<T>) returns (l: List<T>)
ensures forall i: int :: 0 <= i < a.Length ==> mem(a[i], l)
ensures forall x: T :: mem(x, l) ==> exists y: int :: 0 <= y < a.Length && a[y] == x
{
l := Nil;
var i := a.Length - 1;
while i >= 0
invariant 0 <= i+1 <= a.Length
invariant forall j: int :: i < j < a.Length ==> mem(a[j], l)
invariant forall x: T :: mem(x, l) ==> exists y: int :: i+1 <= y < a.Length && a[y] == x
decreases i
{
l := Cons(a[i], l);
i := i - 1;
}
}
function mem<T(==)> (x: T, l: List<T>) : bool
{
match l
case Nil => false
case Cons(h, t) => h == x || mem(x, t)
}
| // 1 a)
// [ai, aj[
function sum(a: array<int>, i: int, j: int) : int
requires 0 <= i <= j <= a.Length
reads a
{
if i == j then 0
else a[j-1] + sum(a, i, j-1)
}
// 1 b)
method query(a: array<int>, i: int, j: int) returns (res : int)
requires 0 <= i <= j <= a.Length
ensures res == sum(a, i, j)
{
res := 0;
var ind := j-1;
while ind >= i
{
res := res + a[ind];
ind := ind - 1;
}
}
// 1 c)
// a -> [1, 10, 3, −4, 5]
// c -> [0, 1, 11, 14, 10, 15]
method queryFast(a: array<int>, c: array<int>, i: int, j: int) returns (r: int)
requires 0 <= i <= j <= a.Length
requires is_prefix_sum_for(a,c)
ensures r == sum(a, i, j)
{
var k := i;
proof(a, 0, j, k);
r := c[j] - c[i];
}
predicate is_prefix_sum_for (a: array<int>, c: array<int>)
reads c, a
{
a.Length + 1 == c.Length && forall i: int :: 0 <= i <= a.Length ==> c[i] == sum(a, 0, i)
}
lemma proof(a: array<int>, i: int, j: int, k:int)
requires 0 <= i <= k <= j <= a.Length
ensures sum(a, i, k) + sum(a, k, j) == sum(a, i, j)
// 2
datatype List<T> = Nil | Cons(head: T, tail: List<T>)
method from_array<T>(a: array<T>) returns (l: List<T>)
ensures forall i: int :: 0 <= i < a.Length ==> mem(a[i], l)
ensures forall x: T :: mem(x, l) ==> exists y: int :: 0 <= y < a.Length && a[y] == x
{
l := Nil;
var i := a.Length - 1;
while i >= 0
{
l := Cons(a[i], l);
i := i - 1;
}
}
function mem<T(==)> (x: T, l: List<T>) : bool
{
match l
case Nil => false
case Cons(h, t) => h == x || mem(x, t)
}
|
377 | Software-building-and-verification-Projects_tmp_tmp5tm1srrn_CVS-projeto_handout2.dfy | datatype List<T> = Nil | Cons(head:T,tail:List<T>)
datatype Option<T> = None | Some(elem:T)
ghost function mem<T>(x:T,l:List<T>) : bool {
match l {
case Nil => false
case Cons(y,xs) => x==y || mem(x,xs)
}
}
ghost function length<T>(l:List<T>) : int {
match l {
case Nil => 0
case Cons(_,xs) => 1 + length(xs)
}
}
function list_find<K(==),V(!new)>(k:K,l:List<(K,V)>) : Option<V>
ensures match list_find(k,l) {
case None => forall v :: !mem((k,v),l)
case Some(v) => mem((k,v),l)
}
decreases l
{
match l {
case Nil => None
case Cons((k',v),xs) => if k==k' then Some(v) else list_find(k,xs)
}
}
function list_remove<K(==,!new),V(!new)>(k:K, l:List<(K,V)>) : List<(K,V)>
decreases l
ensures forall k',v :: mem((k',v),list_remove(k,l)) <==> (mem((k',v),l) && k != k')
{
match l {
case Nil => Nil
case Cons((k',v),xs) => if k==k' then list_remove(k,xs) else
Cons((k',v),list_remove(k,xs))
}
}
class Hashtable<K(==,!new),V(!new)> {
var size : int
var data : array<List<(K,V)>>
ghost var Repr : set<object>
ghost var elems : map<K,Option<V>>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && data in Repr && data.Length > 0 &&
(forall i :: 0 <= i < data.Length ==> valid_hash(data, i)) &&
(forall k,v :: valid_data(k,v,elems,data))
}
ghost predicate valid_hash(data: array<List<(K,V)>>, i: int)
requires 0 <= i < data.Length
reads data
{
forall k,v :: mem((k,v), data[i]) ==> (bucket(k,data.Length) == i)
}
ghost predicate valid_data(k: K,v: V,elems: map<K, Option<V>>, data: array<List<(K,V)>>)
reads this, Repr, data
requires data.Length > 0
{
(k in elems && elems[k] == Some(v)) <==> mem((k,v), data[bucket(k, data.Length)])
}
function hash(key:K) : int
ensures hash(key) >= 0
function bucket(k: K, n: int) : int
requires n > 0
ensures 0 <= bucket(k, n) < n
{
hash(k) % n
}
constructor(n:int)
requires n > 0
ensures RepInv()
ensures fresh(Repr-{this})
ensures elems == map[]
ensures size == 0
{
size := 0;
data := new List<(K,V)>[n](i => Nil);
Repr := {this, data};
elems := map[];
}
method clear()
requires RepInv()
ensures RepInv()
ensures elems == map[]
ensures fresh(Repr - old(Repr))
modifies Repr
{
var i := 0;
while i < data.Length
invariant 0 <= i <= data.Length
invariant forall j :: 0 <= j < i ==> data[j] == Nil
modifies data
{
data[i] := Nil;
i := i + 1;
}
size := 0;
elems := map[];
}
method resize()
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures forall key :: key in old(elems) ==> key in elems
ensures forall k,v :: k in old(elems) && old(elems)[k] == Some(v) ==> k in elems && elems[k] == Some(v)
modifies Repr
{
var newData := new List<(K,V)>[data.Length * 2](i => Nil);
var i := 0;
var oldSize := data.Length;
var newSize := newData.Length;
assert forall i :: 0 <= i < data.Length ==> valid_hash(data,i);
while i < data.Length
modifies newData
invariant RepInv()
invariant 0 <= i <= data.Length
invariant newData != data
invariant old(data) == data
invariant old(size) == size
invariant Repr == old(Repr)
invariant 0 < oldSize == data.Length
invariant data.Length*2 == newData.Length == newSize
invariant forall j :: 0 <= j < newSize ==> valid_hash(newData, j)
invariant forall k,v :: (
if 0<= bucket(k, oldSize) < i then
valid_data(k,v,elems,newData)
else
!mem((k,v), newData[bucket(k, newSize)]))
{
assert valid_hash(data,i);
assert forall k,v :: (
if 0 <= bucket(k, oldSize) < i then
valid_data(k,v,elems,data)
else if bucket(k, oldSize) == i then
((k in elems && elems[k] == Some(v))
<==> mem((k,v), data[bucket(k,data.Length)]) || mem((k,v), newData[bucket(k, newSize)]))
else
!mem((k,v), newData[bucket(k, newSize)]));
rehash(data[i],newData,i,oldSize,newSize);
i := i + 1;
}
Repr := Repr - {data} + {newData};
data := newData;
}
method rehash(l: List<(K,V)>, newData: array<List<(K,V)>>,i: int, oldSize: int, newSize: int)
requires newData != data
requires 0 < oldSize == data.Length
requires newData.Length == 2 * oldSize == newSize
requires forall k,v :: mem((k,v), l) ==> bucket(k, oldSize) == i
requires forall j :: 0 <= j < newSize ==> valid_hash(newData, j)
requires forall k,v :: (
if 0 <= bucket(k, oldSize) < i then
valid_data(k,v,elems,newData)
else if bucket(k, oldSize) == i then
((k in elems && elems[k] == Some(v))
<==> mem((k,v), l) || mem((k,v),newData[bucket(k, newSize)]))
else
!mem((k,v),newData[bucket(k, newSize)]))
ensures forall j :: 0 <= j < newSize ==> valid_hash(newData, j)
ensures forall k,v ::
(if 0 <= bucket(k, oldSize) <= i then
valid_data(k,v,elems,newData)
else
!mem((k,v),newData[bucket(k, newSize)]))
modifies newData
decreases l
{
match l {
case Nil => return;
case Cons((k,v), r) => {
var b := bucket(k, newSize);
newData[b] := Cons((k,v), newData[b]);
rehash(r, newData, i, oldSize, newSize);
}
}
}
method find(k: K) returns (r: Option<V>)
requires RepInv()
ensures RepInv()
ensures match r
case None => (k !in elems || (k in elems && elems[k] == None))
case Some(v) => (k in elems && elems[k] == Some(v))
{
assert forall k, v :: valid_data(k,v,elems,data) && ((k in elems && elems[k] == Some(v)) <==> (mem((k,v),data[bucket(k,data.Length)])));
var idx := bucket(k, data.Length);
r := list_find(k, data[idx]);
assert match list_find(k,data[bucket(k, data.Length)])
case None => forall v :: idx == bucket(k,data.Length) && !mem((k,v),data[idx])
case Some(v) => mem((k,v),data[bucket(k,data.Length)]);
}
method remove(k: K)
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures k !in elems || elems[k] == None
ensures forall key :: key != k && key in old(elems) ==> key in elems && elems[key] == old(elems[key])
modifies Repr
{
assert forall i :: 0 <= i < data.Length ==> valid_hash(data, i);
assert forall k,v :: valid_data(k,v,elems,data);
var idx := bucket(k, data.Length);
var opt := list_find(k, data[idx]);
assert forall i :: 0 <= i < data.Length ==> valid_hash(data,i) && (forall k,v:: mem((k,v), data[i]) ==> (bucket(k,data.Length) == i));
match opt
case None =>
assert forall k,v :: valid_data(k,v,elems, data) && ((k in elems && elems[k] == Some(v)) <==> (mem((k,v), data[bucket(k, data.Length)])));
assert forall i :: 0 <= i < data.Length ==> valid_hash(data,i);
assert forall v :: !mem((k,v),data[bucket(k,data.Length)]);
case Some(v) =>
assert forall k,v :: valid_data(k,v,elems,data) && ((k in elems && elems[k] == Some(v)) <==> (mem((k,v),data[bucket(k,data.Length)])));
var idx := bucket(k, data.Length);
data[idx] := list_remove(k, data[idx]);
elems := elems[k := None];
size := size - 1;
}
method add(k:K,v:V)
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures k in elems && elems[k] == Some(v)
ensures forall key :: key != k && key in old(elems) ==> key in elems
modifies Repr
{
if(size >= data.Length * 3/4) {
resize();
}
remove(k);
assert forall i :: 0 <= i < data.Length ==> valid_hash(data, i);
var ind := bucket(k,data.Length);
assert forall i :: 0 <= i < data.Length ==> valid_hash(data, i) && (forall k,v:: mem((k,v), data[i]) ==> (bucket(k,data.Length) == i));
assert forall k,v :: valid_data(k,v,elems, data) && ((k in elems && elems[k] == Some(v)) <==> (mem((k,v), data[bucket(k, data.Length)])));
assert forall k,v :: mem((k,v), data[ind]) ==> (bucket(k,data.Length) == ind);
data[ind] := Cons((k,v), data[ind]);
elems := elems[k := Some(v)];
assert bucket(k,data.Length) == ind;
assert mem((k,v), data[bucket(k,data.Length)]);
size := size + 1;
assert k in elems && elems[k] == Some(v);
}
}
| datatype List<T> = Nil | Cons(head:T,tail:List<T>)
datatype Option<T> = None | Some(elem:T)
ghost function mem<T>(x:T,l:List<T>) : bool {
match l {
case Nil => false
case Cons(y,xs) => x==y || mem(x,xs)
}
}
ghost function length<T>(l:List<T>) : int {
match l {
case Nil => 0
case Cons(_,xs) => 1 + length(xs)
}
}
function list_find<K(==),V(!new)>(k:K,l:List<(K,V)>) : Option<V>
ensures match list_find(k,l) {
case None => forall v :: !mem((k,v),l)
case Some(v) => mem((k,v),l)
}
{
match l {
case Nil => None
case Cons((k',v),xs) => if k==k' then Some(v) else list_find(k,xs)
}
}
function list_remove<K(==,!new),V(!new)>(k:K, l:List<(K,V)>) : List<(K,V)>
ensures forall k',v :: mem((k',v),list_remove(k,l)) <==> (mem((k',v),l) && k != k')
{
match l {
case Nil => Nil
case Cons((k',v),xs) => if k==k' then list_remove(k,xs) else
Cons((k',v),list_remove(k,xs))
}
}
class Hashtable<K(==,!new),V(!new)> {
var size : int
var data : array<List<(K,V)>>
ghost var Repr : set<object>
ghost var elems : map<K,Option<V>>
ghost predicate RepInv()
reads this, Repr
{
this in Repr && data in Repr && data.Length > 0 &&
(forall i :: 0 <= i < data.Length ==> valid_hash(data, i)) &&
(forall k,v :: valid_data(k,v,elems,data))
}
ghost predicate valid_hash(data: array<List<(K,V)>>, i: int)
requires 0 <= i < data.Length
reads data
{
forall k,v :: mem((k,v), data[i]) ==> (bucket(k,data.Length) == i)
}
ghost predicate valid_data(k: K,v: V,elems: map<K, Option<V>>, data: array<List<(K,V)>>)
reads this, Repr, data
requires data.Length > 0
{
(k in elems && elems[k] == Some(v)) <==> mem((k,v), data[bucket(k, data.Length)])
}
function hash(key:K) : int
ensures hash(key) >= 0
function bucket(k: K, n: int) : int
requires n > 0
ensures 0 <= bucket(k, n) < n
{
hash(k) % n
}
constructor(n:int)
requires n > 0
ensures RepInv()
ensures fresh(Repr-{this})
ensures elems == map[]
ensures size == 0
{
size := 0;
data := new List<(K,V)>[n](i => Nil);
Repr := {this, data};
elems := map[];
}
method clear()
requires RepInv()
ensures RepInv()
ensures elems == map[]
ensures fresh(Repr - old(Repr))
modifies Repr
{
var i := 0;
while i < data.Length
modifies data
{
data[i] := Nil;
i := i + 1;
}
size := 0;
elems := map[];
}
method resize()
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures forall key :: key in old(elems) ==> key in elems
ensures forall k,v :: k in old(elems) && old(elems)[k] == Some(v) ==> k in elems && elems[k] == Some(v)
modifies Repr
{
var newData := new List<(K,V)>[data.Length * 2](i => Nil);
var i := 0;
var oldSize := data.Length;
var newSize := newData.Length;
while i < data.Length
modifies newData
if 0<= bucket(k, oldSize) < i then
valid_data(k,v,elems,newData)
else
!mem((k,v), newData[bucket(k, newSize)]))
{
if 0 <= bucket(k, oldSize) < i then
valid_data(k,v,elems,data)
else if bucket(k, oldSize) == i then
((k in elems && elems[k] == Some(v))
<==> mem((k,v), data[bucket(k,data.Length)]) || mem((k,v), newData[bucket(k, newSize)]))
else
!mem((k,v), newData[bucket(k, newSize)]));
rehash(data[i],newData,i,oldSize,newSize);
i := i + 1;
}
Repr := Repr - {data} + {newData};
data := newData;
}
method rehash(l: List<(K,V)>, newData: array<List<(K,V)>>,i: int, oldSize: int, newSize: int)
requires newData != data
requires 0 < oldSize == data.Length
requires newData.Length == 2 * oldSize == newSize
requires forall k,v :: mem((k,v), l) ==> bucket(k, oldSize) == i
requires forall j :: 0 <= j < newSize ==> valid_hash(newData, j)
requires forall k,v :: (
if 0 <= bucket(k, oldSize) < i then
valid_data(k,v,elems,newData)
else if bucket(k, oldSize) == i then
((k in elems && elems[k] == Some(v))
<==> mem((k,v), l) || mem((k,v),newData[bucket(k, newSize)]))
else
!mem((k,v),newData[bucket(k, newSize)]))
ensures forall j :: 0 <= j < newSize ==> valid_hash(newData, j)
ensures forall k,v ::
(if 0 <= bucket(k, oldSize) <= i then
valid_data(k,v,elems,newData)
else
!mem((k,v),newData[bucket(k, newSize)]))
modifies newData
{
match l {
case Nil => return;
case Cons((k,v), r) => {
var b := bucket(k, newSize);
newData[b] := Cons((k,v), newData[b]);
rehash(r, newData, i, oldSize, newSize);
}
}
}
method find(k: K) returns (r: Option<V>)
requires RepInv()
ensures RepInv()
ensures match r
case None => (k !in elems || (k in elems && elems[k] == None))
case Some(v) => (k in elems && elems[k] == Some(v))
{
var idx := bucket(k, data.Length);
r := list_find(k, data[idx]);
case None => forall v :: idx == bucket(k,data.Length) && !mem((k,v),data[idx])
case Some(v) => mem((k,v),data[bucket(k,data.Length)]);
}
method remove(k: K)
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures k !in elems || elems[k] == None
ensures forall key :: key != k && key in old(elems) ==> key in elems && elems[key] == old(elems[key])
modifies Repr
{
var idx := bucket(k, data.Length);
var opt := list_find(k, data[idx]);
match opt
case None =>
case Some(v) =>
var idx := bucket(k, data.Length);
data[idx] := list_remove(k, data[idx]);
elems := elems[k := None];
size := size - 1;
}
method add(k:K,v:V)
requires RepInv()
ensures RepInv()
ensures fresh(Repr - old(Repr))
ensures k in elems && elems[k] == Some(v)
ensures forall key :: key != k && key in old(elems) ==> key in elems
modifies Repr
{
if(size >= data.Length * 3/4) {
resize();
}
remove(k);
var ind := bucket(k,data.Length);
data[ind] := Cons((k,v), data[ind]);
elems := elems[k := Some(v)];
size := size + 1;
}
}
|
378 | TFG_tmp_tmpbvsao41w_Algoritmos Dafny_div_ent_it.dfy | method div_ent_it(a: int, b: int) returns (c: int, r: int)
// Algoritmo iterativo de la división de enteros
// que calcula su cociente y resto
requires a >= 0 && b > 0
ensures a == b*c + r && 0 <= r < b
{
c := 0; r := a ;
while (r >= b)
invariant a == b * c + r && r >= 0 && b > 0
decreases r
{
c := c + 1 ;
r := r - b ;
}
}
method Main()
{
var c, r := div_ent_it(6 , 2) ;
print "Cociente: ", c, ", Resto: ", r ;
}
| method div_ent_it(a: int, b: int) returns (c: int, r: int)
// Algoritmo iterativo de la división de enteros
// que calcula su cociente y resto
requires a >= 0 && b > 0
ensures a == b*c + r && 0 <= r < b
{
c := 0; r := a ;
while (r >= b)
{
c := c + 1 ;
r := r - b ;
}
}
method Main()
{
var c, r := div_ent_it(6 , 2) ;
print "Cociente: ", c, ", Resto: ", r ;
}
|
379 | TFG_tmp_tmpbvsao41w_Algoritmos Dafny_suma_it.dfy | method suma_it(V: array<int>) returns (x: int)
// Algoritmo iterativo que calcula la
// suma de las componentes de un vector
ensures x == suma_vector(V, 0)
{
var n := V.Length ;
x := 0 ;
while (n != 0)
invariant 0 <= n <= V.Length && x == suma_vector(V, n)
decreases n
{
x := x + V[n - 1] ;
n := n - 1 ;
}
}
function suma_vector(V: array<int>, n: nat): int
// x = V[n] + V[n + 1] + ... + V[N - 1]
// Funcion auxiliar para la suma de
// las componentes de un vector
requires 0 <= n <= V.Length
decreases V.Length - n
reads V
{
if (n == V.Length) then 0
else V[n] + suma_vector(V, n + 1)
}
method Main()
{
var v := new int[] [-1, 2, 5, -5, 8] ;
var w := new int[] [ 1, 0, 5, 5, 8] ;
var s1 := suma_it(v) ;
var s2 := suma_it(w) ;
print "La suma del vector v es: ", s1, "\n" ;
print "La suma del vector w es: ", s2 ;
}
| method suma_it(V: array<int>) returns (x: int)
// Algoritmo iterativo que calcula la
// suma de las componentes de un vector
ensures x == suma_vector(V, 0)
{
var n := V.Length ;
x := 0 ;
while (n != 0)
{
x := x + V[n - 1] ;
n := n - 1 ;
}
}
function suma_vector(V: array<int>, n: nat): int
// x = V[n] + V[n + 1] + ... + V[N - 1]
// Funcion auxiliar para la suma de
// las componentes de un vector
requires 0 <= n <= V.Length
reads V
{
if (n == V.Length) then 0
else V[n] + suma_vector(V, n + 1)
}
method Main()
{
var v := new int[] [-1, 2, 5, -5, 8] ;
var w := new int[] [ 1, 0, 5, 5, 8] ;
var s1 := suma_it(v) ;
var s2 := suma_it(w) ;
print "La suma del vector v es: ", s1, "\n" ;
print "La suma del vector w es: ", s2 ;
}
|
380 | Trab1-Metodos-Formais_tmp_tmp_8fa4trr_circular-array.dfy | /*
Class CircularArray.
Names:
Arthur Sudbrack Ibarra,
Miguel Torres de Castro,
Felipe Grosze Nipper,
Willian Magnum Albeche,
Luiz Eduardo Mello dos Reis.
*/
class {:autocontracts} CircularArray {
/*
Implementation
*/
var arr: array<int>; // The array.
var start: nat; // The index of the first element.
var size: nat; // The number of elements in the queue.
/*
Abstraction.
*/
ghost const Capacity: nat; // The capacity of the queue. (WE WERE UNABLE TO MAKE THE SIZE OF THE ARRAY DYNAMIC).
ghost var Elements: seq<int>; // The elements in the array represented as a sequence.
/*
Class invariant.
*/
ghost predicate Valid()
{
0 <= start < arr.Length &&
0 <= size <= arr.Length &&
Capacity == arr.Length &&
Elements == if start + size <= arr.Length
then arr[start..start + size]
else arr[start..] + arr[..size - (arr.Length - start)]
}
/*
Constructor.
*/
constructor EmptyQueue(capacity: nat)
requires capacity > 0
ensures Elements == []
ensures Capacity == capacity
{
arr := new int[capacity];
start := 0;
size := 0;
Capacity := capacity;
Elements := [];
}
/*
Enqueue Method
*/
method Enqueue(e: int)
requires !IsFull()
ensures Elements == old(Elements) + [e]
{
arr[(start + size) % arr.Length] := e;
size := size + 1;
Elements := Elements + [e];
}
/*
Dequeue method.
*/
method Dequeue() returns (e: int)
requires !IsEmpty()
ensures Elements == old(Elements)[1..]
ensures e == old(Elements)[0]
{
e := arr[start];
if start + 1 < arr.Length {
start := start + 1;
}
else {
start := 0;
}
size := size - 1;
Elements := Elements[1..];
}
/*
Contains predicate.
*/
predicate Contains(e: int)
ensures Contains(e) == (e in Elements)
{
if start + size < arr.Length then
e in arr[start..start + size]
else
e in arr[start..] + arr[..size - (arr.Length - start)]
}
/*
Size function.
*/
function Size(): nat
ensures Size() == |Elements|
{
size
}
/*
IsEmpty predicate.
*/
predicate IsEmpty()
ensures IsEmpty() <==> (|Elements| == 0)
{
size == 0
}
/*
IsFull predicate.
*/
predicate IsFull()
ensures IsFull() <==> |Elements| == Capacity
{
size == arr.Length
}
/*
GetAt method.
(Not requested in the assignment, but useful).
*/
method GetAt(i: nat) returns (e: int)
requires i < size
ensures e == Elements[i]
{
e := arr[(start + i) % arr.Length];
}
/*
AsSequence method.
(Auxiliary method for the Concatenate method)
*/
method AsSequence() returns (s: seq<int>)
ensures s == Elements
{
s := if start + size <= arr.Length
then arr[start..start + size]
else arr[start..] + arr[..size - (arr.Length - start)];
}
/*
Concatenate method.
*/
method Concatenate(q1: CircularArray) returns(q2: CircularArray)
requires q1.Valid()
requires q1 != this
ensures fresh(q2)
ensures q2.Capacity == Capacity + q1.Capacity
ensures q2.Elements == Elements + q1.Elements
{
q2 := new CircularArray.EmptyQueue(arr.Length + q1.arr.Length);
var s1 := AsSequence();
var s2 := q1.AsSequence();
var both := s1 + s2;
forall i | 0 <= i < size
{
q2.arr[i] := both[i];
}
q2.size := size + q1.size;
q2.start := 0;
q2.Elements := Elements + q1.Elements;
print q2.arr.Length;
print q2.size;
}
}
/*
Main method.
Here the the CircularArray class is demonstrated.
*/
method Main()
{
var q := new CircularArray.EmptyQueue(10); // Create a new queue.
assert q.IsEmpty(); // The queue must be empty.
q.Enqueue(1); // Enqueue the element 1.
assert !q.IsEmpty(); // The queue must now not be empty.
assert q.Size() == 1; // The queue must have size 1 after the enqueue.
assert q.Contains(1); // The queue must contain the element 1.
var e1 := q.GetAt(0); // Get the element at index 0.
assert e1 == 1; // The element at index 0 must be 1.
q.Enqueue(2); // Enqueue the element 2.
assert q.Size() == 2; // The queue must have size 2 after the enqueue.
assert q.Contains(2); // The queue must contain the element 2.
var e2 := q.GetAt(1); // Get the element at index 1.
assert e2 == 2; // The element at index 1 must be 2.
var e := q.Dequeue(); // Dequeue the element 1.
assert e == 1; // The dequeued element must be 1.
assert q.Size() == 1; // The queue must have size 1 after the dequeue.
assert !q.Contains(1); // The queue must NOT contain the element 1 anymore.
q.Enqueue(3); // Enqueue the element 3.
assert q.Size() == 2; // The queue must have size 2 after the enqueue.
assert q.Contains(3); // The queue must contain the element 3.
e := q.Dequeue(); // Dequeue the element 2.
assert e == 2; // The dequeued element must be 2.
assert q.Size() == 1; // The queue must have size 1 after the dequeue.
assert !q.Contains(2); // The queue must NOT contain the element 2 anymore.
e := q.Dequeue(); // Dequeue the element 3.
assert e == 3; // The dequeued element must be 3.
assert q.Size() == 0; // The queue must have size 0 after the dequeue.
assert !q.Contains(3); // The queue must NOT contain the element 3 anymore.
assert q.IsEmpty(); // The queue must now be empty.
assert q.Size() == 0; // The queue must now have size 0.
}
| /*
Class CircularArray.
Names:
Arthur Sudbrack Ibarra,
Miguel Torres de Castro,
Felipe Grosze Nipper,
Willian Magnum Albeche,
Luiz Eduardo Mello dos Reis.
*/
class {:autocontracts} CircularArray {
/*
Implementation
*/
var arr: array<int>; // The array.
var start: nat; // The index of the first element.
var size: nat; // The number of elements in the queue.
/*
Abstraction.
*/
ghost const Capacity: nat; // The capacity of the queue. (WE WERE UNABLE TO MAKE THE SIZE OF THE ARRAY DYNAMIC).
ghost var Elements: seq<int>; // The elements in the array represented as a sequence.
/*
Class invariant.
*/
ghost predicate Valid()
{
0 <= start < arr.Length &&
0 <= size <= arr.Length &&
Capacity == arr.Length &&
Elements == if start + size <= arr.Length
then arr[start..start + size]
else arr[start..] + arr[..size - (arr.Length - start)]
}
/*
Constructor.
*/
constructor EmptyQueue(capacity: nat)
requires capacity > 0
ensures Elements == []
ensures Capacity == capacity
{
arr := new int[capacity];
start := 0;
size := 0;
Capacity := capacity;
Elements := [];
}
/*
Enqueue Method
*/
method Enqueue(e: int)
requires !IsFull()
ensures Elements == old(Elements) + [e]
{
arr[(start + size) % arr.Length] := e;
size := size + 1;
Elements := Elements + [e];
}
/*
Dequeue method.
*/
method Dequeue() returns (e: int)
requires !IsEmpty()
ensures Elements == old(Elements)[1..]
ensures e == old(Elements)[0]
{
e := arr[start];
if start + 1 < arr.Length {
start := start + 1;
}
else {
start := 0;
}
size := size - 1;
Elements := Elements[1..];
}
/*
Contains predicate.
*/
predicate Contains(e: int)
ensures Contains(e) == (e in Elements)
{
if start + size < arr.Length then
e in arr[start..start + size]
else
e in arr[start..] + arr[..size - (arr.Length - start)]
}
/*
Size function.
*/
function Size(): nat
ensures Size() == |Elements|
{
size
}
/*
IsEmpty predicate.
*/
predicate IsEmpty()
ensures IsEmpty() <==> (|Elements| == 0)
{
size == 0
}
/*
IsFull predicate.
*/
predicate IsFull()
ensures IsFull() <==> |Elements| == Capacity
{
size == arr.Length
}
/*
GetAt method.
(Not requested in the assignment, but useful).
*/
method GetAt(i: nat) returns (e: int)
requires i < size
ensures e == Elements[i]
{
e := arr[(start + i) % arr.Length];
}
/*
AsSequence method.
(Auxiliary method for the Concatenate method)
*/
method AsSequence() returns (s: seq<int>)
ensures s == Elements
{
s := if start + size <= arr.Length
then arr[start..start + size]
else arr[start..] + arr[..size - (arr.Length - start)];
}
/*
Concatenate method.
*/
method Concatenate(q1: CircularArray) returns(q2: CircularArray)
requires q1.Valid()
requires q1 != this
ensures fresh(q2)
ensures q2.Capacity == Capacity + q1.Capacity
ensures q2.Elements == Elements + q1.Elements
{
q2 := new CircularArray.EmptyQueue(arr.Length + q1.arr.Length);
var s1 := AsSequence();
var s2 := q1.AsSequence();
var both := s1 + s2;
forall i | 0 <= i < size
{
q2.arr[i] := both[i];
}
q2.size := size + q1.size;
q2.start := 0;
q2.Elements := Elements + q1.Elements;
print q2.arr.Length;
print q2.size;
}
}
/*
Main method.
Here the the CircularArray class is demonstrated.
*/
method Main()
{
var q := new CircularArray.EmptyQueue(10); // Create a new queue.
q.Enqueue(1); // Enqueue the element 1.
var e1 := q.GetAt(0); // Get the element at index 0.
q.Enqueue(2); // Enqueue the element 2.
var e2 := q.GetAt(1); // Get the element at index 1.
var e := q.Dequeue(); // Dequeue the element 1.
q.Enqueue(3); // Enqueue the element 3.
e := q.Dequeue(); // Dequeue the element 2.
e := q.Dequeue(); // Dequeue the element 3.
}
|
381 | VerifiedMergeSortDafny_tmp_tmpva7qms1b_MergeSort.dfy | method mergeSimple(a1: seq<int>, a2: seq<int>, start: int, end: int, b: array<int>)
modifies b
requires sorted_seq(a1)
requires sorted_seq(a2)
requires 0 <= start <= end <= b.Length
requires |a1| + |a2| == end - start + 1
ensures sorted_slice(b, start, end)
{
var a1Pos := 0;
var a2Pos := 0;
var k := start;
while k < end
invariant (0 <= k && k <= end)
invariant sorted_slice(b, start, k)
invariant (|a1| - a1Pos) + (|a2| - a2Pos) == end - k + 1
invariant 0 <= a1Pos <= |a1|
invariant 0 <= a2Pos <= |a2|
invariant forall i :: start <= i < k && a1Pos < |a1| ==> b[i] <= a1[a1Pos]
invariant forall i :: start <= i < k && a2Pos < |a2| ==> b[i] <= a2[a2Pos]
{
if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] <= a2[a2Pos] {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] > a2[a2Pos] {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
} else if a1Pos < |a1| {
assert(a2Pos >= |a2|);
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else {
assert(a1Pos >= |a1|);
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
}
k := k + 1;
}
}
method merge(a1: seq<int>, a2: seq<int>, start: int, end: int, b: array<int>)
modifies b
requires sorted_seq(a1)
requires sorted_seq(a2)
requires end - start == |a1| + |a2|
requires 0 <= start < end < |a1| && end <= |a2| < b.Length
requires end < |a1| && end < |a2|
ensures sorted_slice(b, start, end)
requires b.Length == |a2| + |a1|
ensures merged(a1, a2, b, start, end)
{
assert forall xs : seq<int> :: xs[0..|xs|] == xs;
assert forall xs : seq<int>, a,b : int :: 0 <= a < b < |xs| ==> xs[a..b+1] == xs[a..b] + [xs[b]];
var a1Pos := 0;
var a2Pos := 0;
var k := start;
while k < end
invariant (0 <= k && k <= end)
invariant sorted_slice(b, start, k)
invariant (|a1| - a1Pos) + (|a2| - a2Pos) == end - k
invariant 0 <= a1Pos <= |a1|
invariant 0 <= a2Pos <= |a2|
invariant forall i :: start <= i < k && a1Pos < |a1| ==> b[i] <= a1[a1Pos]
invariant forall i :: start <= i < k && a2Pos < |a2| ==> b[i] <= a2[a2Pos]
invariant merged(a1[0..a1Pos], a2[0..a2Pos], b, start, k)
{
if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] <= a2[a2Pos] {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] > a2[a2Pos] {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
} else if a1Pos < |a1| {
assert(a2Pos >= |a2|);
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else {
assert(a1Pos >= |a1|);
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
}
k := k + 1;
}
}
predicate merged(a1: seq<int>, a2: seq<int>, b: array<int>, start: int, end: int)
reads b
requires end - start == |a2| + |a1|
requires 0 <= start <= end <= b.Length
{
multiset(a1) + multiset(a2) == multiset(b[start..end])
}
predicate sorted_slice(a: array<int>, start: int, end: int)
requires 0 <= start <= end <= a.Length
reads a
{
forall i, j :: start <= i <= j < end ==> a[i] <= a[j]
}
predicate sorted_seq(a: seq<int>)
{
forall i, j :: 0 <= i <= j < |a| ==> a[i] <= a[j]
}
predicate sorted(a: array<int>)
reads a
{
forall i, j :: 0 <= i < j < a.Length ==> a[i] <= a[j]
}
| method mergeSimple(a1: seq<int>, a2: seq<int>, start: int, end: int, b: array<int>)
modifies b
requires sorted_seq(a1)
requires sorted_seq(a2)
requires 0 <= start <= end <= b.Length
requires |a1| + |a2| == end - start + 1
ensures sorted_slice(b, start, end)
{
var a1Pos := 0;
var a2Pos := 0;
var k := start;
while k < end
{
if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] <= a2[a2Pos] {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] > a2[a2Pos] {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
} else if a1Pos < |a1| {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
}
k := k + 1;
}
}
method merge(a1: seq<int>, a2: seq<int>, start: int, end: int, b: array<int>)
modifies b
requires sorted_seq(a1)
requires sorted_seq(a2)
requires end - start == |a1| + |a2|
requires 0 <= start < end < |a1| && end <= |a2| < b.Length
requires end < |a1| && end < |a2|
ensures sorted_slice(b, start, end)
requires b.Length == |a2| + |a1|
ensures merged(a1, a2, b, start, end)
{
var a1Pos := 0;
var a2Pos := 0;
var k := start;
while k < end
{
if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] <= a2[a2Pos] {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else if a1Pos < |a1| && a2Pos < |a2| && a1[a1Pos] > a2[a2Pos] {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
} else if a1Pos < |a1| {
b[k] := a1[a1Pos];
a1Pos := a1Pos + 1;
} else {
b[k] := a2[a2Pos];
a2Pos := a2Pos + 1;
}
k := k + 1;
}
}
predicate merged(a1: seq<int>, a2: seq<int>, b: array<int>, start: int, end: int)
reads b
requires end - start == |a2| + |a1|
requires 0 <= start <= end <= b.Length
{
multiset(a1) + multiset(a2) == multiset(b[start..end])
}
predicate sorted_slice(a: array<int>, start: int, end: int)
requires 0 <= start <= end <= a.Length
reads a
{
forall i, j :: start <= i <= j < end ==> a[i] <= a[j]
}
predicate sorted_seq(a: seq<int>)
{
forall i, j :: 0 <= i <= j < |a| ==> a[i] <= a[j]
}
predicate sorted(a: array<int>)
reads a
{
forall i, j :: 0 <= i < j < a.Length ==> a[i] <= a[j]
}
|
382 | Workshop_tmp_tmp0cu11bdq_Lecture_Answers_max_array.dfy | // http://verifythus.cost-ic0701.org/common-example/arraymax-in-dafny
method max(a:array<int>) returns(max:int)
requires a != null;
ensures forall j :: j >= 0 && j < a.Length ==> max >= a[j]; //max is larger then anything in the array
ensures a.Length > 0 ==> exists j :: j >= 0 && j < a.Length && max == a[j]; //max is an element in the array
{
if (a.Length == 0) {
max := 0;
return;
}
max := a[0];
var i := 1;
while i < a.Length
invariant i <= a.Length //i is bounded by the array
invariant forall j :: 0 <= j < i ==> max >= a[j] //max is bigger or equal to anything seen so far (up to j)
invariant exists j :: 0 <= j < i && max==a[j] //max exists somewhere in the seen portion of the array
{
if a[i] > max
{
max := a[i];
}
i := i + 1;
}
}
| // http://verifythus.cost-ic0701.org/common-example/arraymax-in-dafny
method max(a:array<int>) returns(max:int)
requires a != null;
ensures forall j :: j >= 0 && j < a.Length ==> max >= a[j]; //max is larger then anything in the array
ensures a.Length > 0 ==> exists j :: j >= 0 && j < a.Length && max == a[j]; //max is an element in the array
{
if (a.Length == 0) {
max := 0;
return;
}
max := a[0];
var i := 1;
while i < a.Length
{
if a[i] > max
{
max := a[i];
}
i := i + 1;
}
}
|
383 | Workshop_tmp_tmp0cu11bdq_Lecture_Answers_selection_sort.dfy | //https://homepage.cs.uiowa.edu/~tinelli/classes/181/Fall21/Tools/Dafny/Examples/selection-sort.shtml
predicate sorted (a: array<int>)
requires a != null
reads a
{
sorted'(a, a.Length)
}
predicate sorted' (a: array<int>, i: int)
requires a != null
requires 0 <= i <= a.Length
reads a
{
forall k :: 0 < k < i ==> a[k-1] <= a[k]
}
// Selection sort on arrays
method SelectionSort(a: array<int>)
modifies a
ensures sorted(a)
//ensures multiset(old(a[..])) == multiset(a[..])
{
var n := 0;
while (n != a.Length)
invariant 0 <= n <= a.Length
invariant forall i, j :: 0 <= i < n <= j < a.Length ==> a[i] <= a[j] //all the values in the sorted section will be lower then any value in the non sorted section
invariant forall k1, k2 :: 0 <= k1 < k2 < n ==> a[k1] <= a[k2] //all values in the sorted section are sorted with respect to one another
{
var mindex := n;
var m := n + 1;
while (m != a.Length)
invariant n <= m <= a.Length //m (search idx) between valid range
invariant n <= mindex < m <= a.Length // minIndex between valid range
invariant forall i :: n <= i < m ==> a[mindex] <= a[i] //mindex is current smallest in range n < m
{
if (a[m] < a[mindex]) {
mindex := m;
}
m := m + 1;
}
a[n], a[mindex] := a[mindex], a[n];
n := n + 1;
}
}
| //https://homepage.cs.uiowa.edu/~tinelli/classes/181/Fall21/Tools/Dafny/Examples/selection-sort.shtml
predicate sorted (a: array<int>)
requires a != null
reads a
{
sorted'(a, a.Length)
}
predicate sorted' (a: array<int>, i: int)
requires a != null
requires 0 <= i <= a.Length
reads a
{
forall k :: 0 < k < i ==> a[k-1] <= a[k]
}
// Selection sort on arrays
method SelectionSort(a: array<int>)
modifies a
ensures sorted(a)
//ensures multiset(old(a[..])) == multiset(a[..])
{
var n := 0;
while (n != a.Length)
{
var mindex := n;
var m := n + 1;
while (m != a.Length)
{
if (a[m] < a[mindex]) {
mindex := m;
}
m := m + 1;
}
a[n], a[mindex] := a[mindex], a[n];
n := n + 1;
}
}
|
384 | Workshop_tmp_tmp0cu11bdq_Lecture_Answers_sum_array.dfy | function sumTo( a:array<int>, n:int ) : int
requires a != null;
requires 0 <= n && n <= a.Length;
reads a;
{
if (n == 0) then 0 else sumTo(a, n-1) + a[n-1]
}
method sum_array( a: array<int>) returns (sum: int)
requires a != null;
ensures sum == sumTo(a, a.Length);
{
var i := 0;
sum := 0;
while (i < a.Length)
invariant 0 <= i <= a.Length;
invariant sum == sumTo(a, i);
{
sum := sum + a[i];
i := i + 1;
}
}
| function sumTo( a:array<int>, n:int ) : int
requires a != null;
requires 0 <= n && n <= a.Length;
reads a;
{
if (n == 0) then 0 else sumTo(a, n-1) + a[n-1]
}
method sum_array( a: array<int>) returns (sum: int)
requires a != null;
ensures sum == sumTo(a, a.Length);
{
var i := 0;
sum := 0;
while (i < a.Length)
{
sum := sum + a[i];
i := i + 1;
}
}
|
385 | Workshop_tmp_tmp0cu11bdq_Lecture_Answers_triangle_number.dfy | method TriangleNumber(N: int) returns (t: int)
requires N >= 0
ensures t == N * (N + 1) / 2
{
t := 0;
var n := 0;
while n < N
invariant 0 <= n <= N
invariant t == n * (n + 1) / 2
decreases N - n;// can be left out because it is guessed correctly by Dafny
{
n:= n + 1;
t := t + n;
}
}
| method TriangleNumber(N: int) returns (t: int)
requires N >= 0
ensures t == N * (N + 1) / 2
{
t := 0;
var n := 0;
while n < N
{
n:= n + 1;
t := t + n;
}
}
|
386 | Workshop_tmp_tmp0cu11bdq_Workshop_Answers_Question5.dfy | method rev(a : array<int>)
requires a != null;
modifies a;
ensures forall k :: 0 <= k < a.Length ==> a[k] == old(a[(a.Length - 1) - k]);
{
var i := 0;
while (i < a.Length - 1 - i)
invariant 0 <= i <= a.Length/2;
invariant forall k :: 0 <= k < i || a.Length - 1 - i < k <= a.Length - 1 ==> a[k] == old(a[a.Length - 1 - k]); //The reversed region contains the opposing values
invariant forall k :: i <= k <= a.Length - 1 - i ==> a[k] == old(a[k]); // The non reversed region contains the original values
{
a[i], a[a.Length - 1 - i] := a[a.Length - 1 - i], a[i];
i := i + 1;
}
}
| method rev(a : array<int>)
requires a != null;
modifies a;
ensures forall k :: 0 <= k < a.Length ==> a[k] == old(a[(a.Length - 1) - k]);
{
var i := 0;
while (i < a.Length - 1 - i)
{
a[i], a[a.Length - 1 - i] := a[a.Length - 1 - i], a[i];
i := i + 1;
}
}
|
387 | Workshop_tmp_tmp0cu11bdq_Workshop_Answers_Question6.dfy | method arrayUpToN(n: int) returns (a: array<int>)
requires n >= 0
ensures a.Length == n
ensures forall j :: 0 < j < n ==> a[j] >= 0
ensures forall j, k : int :: 0 <= j <= k < n ==> a[j] <= a[k]
{
var i := 0;
a := new int[n];
while i < n
invariant 0 <= i <= n
invariant forall k :: 0 <= k < i ==> a[k] >= 0
invariant forall k :: 0 <= k < i ==> a[k] == k
invariant forall j, k :: 0 <= j <= k < i ==> a[j] <= a[k]
{
a[i] := i;
i := i + 1;
}
}
| method arrayUpToN(n: int) returns (a: array<int>)
requires n >= 0
ensures a.Length == n
ensures forall j :: 0 < j < n ==> a[j] >= 0
ensures forall j, k : int :: 0 <= j <= k < n ==> a[j] <= a[k]
{
var i := 0;
a := new int[n];
while i < n
{
a[i] := i;
i := i + 1;
}
}
|
388 | WrappedEther.dfy | /*
* Copyright 2022 ConsenSys Software Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software dis-
* tributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
module Int {
const TWO_7 : int := 0x0_80
const TWO_8 : int := 0x1_00
const TWO_15 : int := 0x0_8000
const TWO_16 : int := 0x1_0000
const TWO_24 : int := 0x1_0000_00
const TWO_31 : int := 0x0_8000_0000
const TWO_32 : int := 0x1_0000_0000
const TWO_40 : int := 0x1_0000_0000_00
const TWO_48 : int := 0x1_0000_0000_0000
const TWO_56 : int := 0x1_0000_0000_0000_00
const TWO_63 : int := 0x0_8000_0000_0000_0000
const TWO_64 : int := 0x1_0000_0000_0000_0000
const TWO_127 : int := 0x0_8000_0000_0000_0000_0000_0000_0000_0000
const TWO_128 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_160 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_255 : int := 0x0_8000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_256 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
// Signed Integers
const MIN_I8 : int := -TWO_7
const MAX_I8 : int := TWO_7 - 1
const MIN_I16 : int := -TWO_15
const MAX_I16 : int := TWO_15 - 1
const MIN_I32 : int := -TWO_31
const MAX_I32 : int := TWO_31 - 1
const MIN_I64 : int := -TWO_63
const MAX_I64 : int := TWO_63 - 1
const MIN_I128 : int := -TWO_127
const MAX_I128 : int := TWO_127 - 1
const MIN_I256 : int := -TWO_255
const MAX_I256 : int := TWO_255 - 1
newtype{:nativeType "sbyte"} i8 = i:int | MIN_I8 <= i <= MAX_I8
newtype{:nativeType "short"} i16 = i:int | MIN_I16 <= i <= MAX_I16
newtype{:nativeType "int"} i32 = i:int | MIN_I32 <= i <= MAX_I32
newtype{:nativeType "long"} i64 = i:int | MIN_I64 <= i <= MAX_I64
newtype i128 = i:int | MIN_I128 <= i <= MAX_I128
newtype i256 = i:int | MIN_I256 <= i <= MAX_I256
// Unsigned Integers
const MAX_U8 : int := TWO_8 - 1
const MAX_U16 : int := TWO_16 - 1
const MAX_U24 : int := TWO_24 - 1
const MAX_U32 : int := TWO_32 - 1
const MAX_U40 : int := TWO_40 - 1
const MAX_U48 : int := TWO_48 - 1
const MAX_U56 : int := TWO_56 - 1
const MAX_U64 : int := TWO_64 - 1
const MAX_U128 : int := TWO_128 - 1
const MAX_U160: int := TWO_160 - 1
const MAX_U256: int := TWO_256 - 1
newtype{:nativeType "byte"} u8 = i:int | 0 <= i <= MAX_U8
newtype{:nativeType "ushort"} u16 = i:int | 0 <= i <= MAX_U16
newtype{:nativeType "uint"} u24 = i:int | 0 <= i <= MAX_U24
newtype{:nativeType "uint"} u32 = i:int | 0 <= i <= MAX_U32
newtype{:nativeType "ulong"} u40 = i:int | 0 <= i <= MAX_U40
newtype{:nativeType "ulong"} u48 = i:int | 0 <= i <= MAX_U48
newtype{:nativeType "ulong"} u56 = i:int | 0 <= i <= MAX_U56
newtype{:nativeType "ulong"} u64 = i:int | 0 <= i <= MAX_U64
newtype u128 = i:int | 0 <= i <= MAX_U128
newtype u160 = i:int | 0 <= i <= MAX_U160
newtype u256 = i:int | 0 <= i <= MAX_U256
// Determine maximum of two u256 integers.
function Max(i1: int, i2: int) : int {
if i1 >= i2 then i1 else i2
}
// Determine maximum of two u256 integers.
function Min(i1: int, i2: int) : int {
if i1 < i2 then i1 else i2
}
// Round up a given number (i) by a given multiple (r).
function RoundUp(i: int, r: nat) : int
requires r > 0 {
if (i % r) == 0 then i
else
((i/r)*r) + r
}
// Return the maximum value representable using exactly n unsigned bytes.
// This is essentially computing (2^n - 1). However, the point of doing it
// in this fashion is to avoid using Pow() as this is challenging for the
// verifier.
function MaxUnsignedN(n:nat) : (r:nat)
requires 1 <= n <= 32 {
match n
case 1 => MAX_U8
case 2 => MAX_U16
case 3 => MAX_U24
case 4 => MAX_U32
case 5 => MAX_U40
case 6 => MAX_U48
case 7 => MAX_U56
case 8 => MAX_U64
case 16 => MAX_U128
case 20 => MAX_U160
case 32 => MAX_U256
// Fall back case (for now)
case _ =>
Pow(2,n) - 1
}
// =========================================================
// Exponent
// =========================================================
/**
* Compute n^k.
*/
function Pow(n:nat, k:nat) : (r:nat)
// Following needed for some proofs
ensures n > 0 ==> r > 0 {
if k == 0 then 1
else if k == 1 then n
else
var p := k / 2;
var np := Pow(n,p);
if p*2 == k then np * np
else np * np * n
}
// Simple lemma about POW.
lemma lemma_pow2(k:nat)
ensures Pow(2,k) > 0 {
if k == 0 {
assert Pow(2,k) == 1;
} else if k == 1 {
assert Pow(2,k) == 2;
} else {
lemma_pow2(k/2);
}
}
// =========================================================
// Non-Euclidean Division / Remainder
// =========================================================
// This provides a non-Euclidean division operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. This operator, therefore,
// always divides *towards* zero.
function Div(lhs: int, rhs: int) : int
requires rhs != 0 {
if lhs >= 0 then lhs / rhs
else
-((-lhs) / rhs)
}
// This provides a non-Euclidean Remainder operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. Observe that this is a
// true Remainder operator, and not a modulus operator. For
// emxaple, this means the result can be negative.
function Rem(lhs: int, rhs: int) : int
requires rhs != 0 {
if lhs >= 0 then (lhs % rhs)
else
var d := -((-lhs) / rhs);
lhs - (d * rhs)
}
}
/**
* Various helper methods related to unsigned 8bit integers.
*/
module U8 {
import opened Int
// Compute the log of a value at base 2 where the result is rounded down.
function Log2(v:u8) : (r:nat)
ensures r < 8 {
// Split 4 bits
if v <= 15 then
// Split 2 bits
if v <= 3 then
// Split 1 bit
if v <= 1 then 0 else 1
else
// Split 1 bit
if v <= 7 then 2 else 3
else
// Split 2 bits
if v <= 63 then
// Split 1 bit
if v <= 31 then 4 else 5
else
// Split 1 bit
if v <= 127 then 6 else 7
}
}
/**
* Various helper methods related to unsigned 16bit integers.
*/
module U16 {
import opened Int
import U8
// Read nth 8bit word (i.e. byte) out of this u16, where 0
// identifies the most significant byte.
function NthUint8(v:u16, k: nat) : u8
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_8 as u16)) as u8
else
(v % (TWO_8 as u16)) as u8
}
/**
* Compute the log of a value at base 2 where the result is rounded down.
*/
function Log2(v:u16) : (r:nat)
ensures r < 16 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
if high != 0 then U8.Log2(high)+8 else U8.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u16) : (r:nat)
ensures r <= 1 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
if high != 0 then 1 else 0
}
/**
* Convert a u16 into a sequence of 2 bytes (in big endian representation).
*/
function ToBytes(v:u16) : (r:seq<u8>)
ensures |r| == 2 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
[high,low]
}
function Read(bytes: seq<u8>, address:nat) : u16
requires (address+1) < |bytes| {
var b1 := bytes[address] as u16;
var b2 := bytes[address+1] as u16;
(b1 * (TWO_8 as u16)) + b2
}
}
/**
* Various helper methods related to unsigned 32bit integers.
*/
module U32 {
import U16
import opened Int
// Read nth 16bit word out of this u32, where 0 identifies the most
// significant word.
function NthUint16(v:u32, k: nat) : u16
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_16 as u32)) as u16
else
(v % (TWO_16 as u32)) as u16
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u32) : (r:nat)
ensures r < 32 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
if high != 0 then U16.Log2(high)+16 else U16.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u32) : (r:nat)
ensures r <= 3 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
if high != 0 then U16.Log256(high)+2 else U16.Log256(low)
}
/**
* Convert a u32 into a sequence of 4 bytes (in big endian representation).
*/
function ToBytes(v:u32) : (r:seq<u8>)
ensures |r| == 4 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
U16.ToBytes(high) + U16.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u32
requires (address+3) < |bytes| {
var b1 := U16.Read(bytes, address) as u32;
var b2 := U16.Read(bytes, address+2) as u32;
(b1 * (TWO_16 as u32)) + b2
}
}
/**
* Various helper methods related to unsigned 64bit integers.
*/
module U64 {
import U32
import opened Int
// Read nth 32bit word out of this u64, where 0 identifies the most
// significant word.
function NthUint32(v:u64, k: nat) : u32
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_32 as u64)) as u32
else
(v % (TWO_32 as u64)) as u32
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u64) : (r:nat)
ensures r < 64 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
if high != 0 then U32.Log2(high)+32 else U32.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u64) : (r:nat)
ensures r <= 7 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
if high != 0 then U32.Log256(high)+4 else U32.Log256(low)
}
/**
* Convert a u64 into a sequence of 8bytes (in big endian representation).
*/
function ToBytes(v:u64) : (r:seq<u8>)
ensures |r| == 8 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
U32.ToBytes(high) + U32.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u64
requires (address+7) < |bytes| {
var b1 := U32.Read(bytes, address) as u64;
var b2 := U32.Read(bytes, address+4) as u64;
(b1 * (TWO_32 as u64)) + b2
}
}
/**
* Various helper methods related to unsigned 128bit integers.
*/
module U128 {
import U64
import opened Int
// Read nth 64bit word out of this u128, where 0 identifies the most
// significant word.
function NthUint64(v:u128, k: nat) : u64
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_64 as u128)) as u64
else
(v % (TWO_64 as u128)) as u64
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u128) : (r:nat)
ensures r < 128 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
if high != 0 then U64.Log2(high)+64 else U64.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u128) : (r:nat)
ensures r <= 15 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
if high != 0 then U64.Log256(high)+8 else U64.Log256(low)
}
/**
* Convert a u128 into a sequence of 16bytes (in big endian representation).
*/
function ToBytes(v:u128) : (r:seq<u8>)
ensures |r| == 16 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
U64.ToBytes(high) + U64.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u128
requires (address+15) < |bytes| {
var b1 := U64.Read(bytes, address) as u128;
var b2 := U64.Read(bytes, address+8) as u128;
(b1 * (TWO_64 as u128)) + b2
}
}
/**
* Various helper methods related to unsigned 256bit integers.
*/
module U256 {
import opened Int
import U8
import U16
import U32
import U64
import U128
/** An axiom stating that a bv256 converted as a nat is bounded by 2^256. */
lemma {:axiom} as_bv256_as_u256(v: bv256)
ensures v as nat < TWO_256
function Shl(lhs: u256, rhs: u256) : u256
{
var lbv := lhs as bv256;
// NOTE: unclear whether shifting is optimal choice here.
var res := if rhs < 256 then (lbv << rhs) else 0;
//
res as u256
}
function Shr(lhs: u256, rhs: u256) : u256 {
var lbv := lhs as bv256;
// NOTE: unclear whether shifting is optimal choice here.
var res := if rhs < 256 then (lbv >> rhs) else 0;
//
res as u256
}
/**
* Compute the log of a value at base 2, where the result in rounded down.
* This effectively determines the position of the highest on bit.
*/
function Log2(v:u256) : (r:nat)
ensures r < 256 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
if high != 0 then U128.Log2(high)+128 else U128.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u256) : (r:nat)
ensures r <= 31 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
if high != 0 then U128.Log256(high)+16 else U128.Log256(low)
}
// Read nth 128bit word out of this u256, where 0 identifies the most
// significant word.
function NthUint128(v:u256, k: nat) : u128
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_128 as u256)) as u128
else
(v % (TWO_128 as u256)) as u128
}
// Read nth byte out of this u256, where 0 identifies the most
// significant byte.
function NthUint8(v:u256, k: nat) : u8
// Cannot read more than 32bytes!
requires k < 32 {
// This is perhaps a tad ugly. Happy to take suggestions on
// a better approach :)
var w128 := NthUint128(v,k / 16);
var w64 := U128.NthUint64(w128,(k % 16) / 8);
var w32 := U64.NthUint32(w64,(k % 8) / 4);
var w16 := U32.NthUint16(w32,(k % 4) / 2);
U16.NthUint8(w16,k%2)
}
function Read(bytes: seq<u8>, address:nat) : u256
requires (address+31) < |bytes| {
var b1 := U128.Read(bytes, address) as u256;
var b2 := U128.Read(bytes, address+16) as u256;
(b1 * (TWO_128 as u256)) + b2
}
/**
* Convert a u256 into a sequence of 32bytes in big endian representation.
*/
function ToBytes(v:u256) : (r:seq<u8>)
ensures |r| == 32 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
U128.ToBytes(high) + U128.ToBytes(low)
}
/**
*
*/
function SignExtend(v: u256, k: nat) : u256 {
if k >= 31 then v
else
// Reinterpret k as big endian
var ith := 31 - k;
// Extract byte containing sign bit
var byte := NthUint8(v,ith);
// Extract sign bit
var signbit := ((byte as bv8) & 0x80) == 0x80;
// Replicate sign bit.
var signs := if signbit then seq(31-k, i => 0xff)
else seq(31-k, i => 0);
// Extract unchanged bytes
var bytes := ToBytes(v)[ith..];
// Sanity check
assert |signs + bytes| == 32;
// Done
Read(signs + bytes,0)
}
}
module I256 {
import U256
import Word
import opened Int
// This provides a non-Euclidean division operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. This operator, therefore,
// always divides *towards* zero.
function Div(lhs: i256, rhs: i256) : i256
// Cannot divide by zero!
requires rhs != 0
// Range restriction to prevent overflow
requires (rhs != -1 || lhs != (-TWO_255 as i256)) {
Int.Div(lhs as int, rhs as int) as i256
}
// This provides a non-Euclidean Remainder operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. Observe that this is a
// true Remainder operator, and not a modulus operator. For
// emxaple, this means the result can be negative.
function Rem(lhs: i256, rhs: i256) : i256
// Cannot divide by zero!
requires rhs != 0 {
Int.Rem(lhs as int, rhs as int) as i256
}
/**
* Shifting 1 left less than 256 times produces a non-zero value.
*
* More generally, shifting-left 1 less than k times over k bits
* yield a non-zero number.
*
* @example over 2 bits, left-shift 1 once: 01 -> 10
* @example over 4 bits, left-shift 1 3 times: 0001 -> 0010 -> 0100 -> 1000
*/
lemma ShiftYieldsNonZero(x: u256)
requires 0 < x < 256
ensures U256.Shl(1, x) > 0
{
// Thanks Dafny.
}
// Shift Arithmetic Right. This implementation follows the Yellow Paper quite
// accurately.
function Sar(lhs: i256, rhs: u256): i256 {
if rhs == 0 then lhs
else if rhs < 256
then
assert 0 < rhs < 256;
var r := U256.Shl(1,rhs);
ShiftYieldsNonZero(rhs);
((lhs as int) / (r as int)) as i256
else if lhs < 0 then -1
else 0
}
}
module Word {
import opened Int
// Decode a 256bit word as a signed 256bit integer. Since words
// are represented as u256, the parameter has type u256. However,
// its important to note that this does not mean the value in
// question represents an unsigned 256 bit integer. Rather, it is a
// signed integer encoded into an unsigned integer.
function asI256(w: u256) : i256 {
if w > (MAX_I256 as u256)
then
var v := 1 + MAX_U256 - (w as int);
(-v) as i256
else
w as i256
}
// Encode a 256bit signed integer as a 256bit word. Since words are
// represented as u256, the return is represented as u256. However,
// its important to note that this does not mean the value in
// question represents an unsigned 256 bit integer. Rather, it is a
// signed integer encoded into an unsigned integer.
function fromI256(w: Int.i256) : u256 {
if w < 0
then
var v := 1 + MAX_U256 + (w as int);
v as u256
else
w as u256
}
}
| /*
* Copyright 2022 ConsenSys Software Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software dis-
* tributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
module Int {
const TWO_7 : int := 0x0_80
const TWO_8 : int := 0x1_00
const TWO_15 : int := 0x0_8000
const TWO_16 : int := 0x1_0000
const TWO_24 : int := 0x1_0000_00
const TWO_31 : int := 0x0_8000_0000
const TWO_32 : int := 0x1_0000_0000
const TWO_40 : int := 0x1_0000_0000_00
const TWO_48 : int := 0x1_0000_0000_0000
const TWO_56 : int := 0x1_0000_0000_0000_00
const TWO_63 : int := 0x0_8000_0000_0000_0000
const TWO_64 : int := 0x1_0000_0000_0000_0000
const TWO_127 : int := 0x0_8000_0000_0000_0000_0000_0000_0000_0000
const TWO_128 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_160 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_255 : int := 0x0_8000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
const TWO_256 : int := 0x1_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000
// Signed Integers
const MIN_I8 : int := -TWO_7
const MAX_I8 : int := TWO_7 - 1
const MIN_I16 : int := -TWO_15
const MAX_I16 : int := TWO_15 - 1
const MIN_I32 : int := -TWO_31
const MAX_I32 : int := TWO_31 - 1
const MIN_I64 : int := -TWO_63
const MAX_I64 : int := TWO_63 - 1
const MIN_I128 : int := -TWO_127
const MAX_I128 : int := TWO_127 - 1
const MIN_I256 : int := -TWO_255
const MAX_I256 : int := TWO_255 - 1
newtype{:nativeType "sbyte"} i8 = i:int | MIN_I8 <= i <= MAX_I8
newtype{:nativeType "short"} i16 = i:int | MIN_I16 <= i <= MAX_I16
newtype{:nativeType "int"} i32 = i:int | MIN_I32 <= i <= MAX_I32
newtype{:nativeType "long"} i64 = i:int | MIN_I64 <= i <= MAX_I64
newtype i128 = i:int | MIN_I128 <= i <= MAX_I128
newtype i256 = i:int | MIN_I256 <= i <= MAX_I256
// Unsigned Integers
const MAX_U8 : int := TWO_8 - 1
const MAX_U16 : int := TWO_16 - 1
const MAX_U24 : int := TWO_24 - 1
const MAX_U32 : int := TWO_32 - 1
const MAX_U40 : int := TWO_40 - 1
const MAX_U48 : int := TWO_48 - 1
const MAX_U56 : int := TWO_56 - 1
const MAX_U64 : int := TWO_64 - 1
const MAX_U128 : int := TWO_128 - 1
const MAX_U160: int := TWO_160 - 1
const MAX_U256: int := TWO_256 - 1
newtype{:nativeType "byte"} u8 = i:int | 0 <= i <= MAX_U8
newtype{:nativeType "ushort"} u16 = i:int | 0 <= i <= MAX_U16
newtype{:nativeType "uint"} u24 = i:int | 0 <= i <= MAX_U24
newtype{:nativeType "uint"} u32 = i:int | 0 <= i <= MAX_U32
newtype{:nativeType "ulong"} u40 = i:int | 0 <= i <= MAX_U40
newtype{:nativeType "ulong"} u48 = i:int | 0 <= i <= MAX_U48
newtype{:nativeType "ulong"} u56 = i:int | 0 <= i <= MAX_U56
newtype{:nativeType "ulong"} u64 = i:int | 0 <= i <= MAX_U64
newtype u128 = i:int | 0 <= i <= MAX_U128
newtype u160 = i:int | 0 <= i <= MAX_U160
newtype u256 = i:int | 0 <= i <= MAX_U256
// Determine maximum of two u256 integers.
function Max(i1: int, i2: int) : int {
if i1 >= i2 then i1 else i2
}
// Determine maximum of two u256 integers.
function Min(i1: int, i2: int) : int {
if i1 < i2 then i1 else i2
}
// Round up a given number (i) by a given multiple (r).
function RoundUp(i: int, r: nat) : int
requires r > 0 {
if (i % r) == 0 then i
else
((i/r)*r) + r
}
// Return the maximum value representable using exactly n unsigned bytes.
// This is essentially computing (2^n - 1). However, the point of doing it
// in this fashion is to avoid using Pow() as this is challenging for the
// verifier.
function MaxUnsignedN(n:nat) : (r:nat)
requires 1 <= n <= 32 {
match n
case 1 => MAX_U8
case 2 => MAX_U16
case 3 => MAX_U24
case 4 => MAX_U32
case 5 => MAX_U40
case 6 => MAX_U48
case 7 => MAX_U56
case 8 => MAX_U64
case 16 => MAX_U128
case 20 => MAX_U160
case 32 => MAX_U256
// Fall back case (for now)
case _ =>
Pow(2,n) - 1
}
// =========================================================
// Exponent
// =========================================================
/**
* Compute n^k.
*/
function Pow(n:nat, k:nat) : (r:nat)
// Following needed for some proofs
ensures n > 0 ==> r > 0 {
if k == 0 then 1
else if k == 1 then n
else
var p := k / 2;
var np := Pow(n,p);
if p*2 == k then np * np
else np * np * n
}
// Simple lemma about POW.
lemma lemma_pow2(k:nat)
ensures Pow(2,k) > 0 {
if k == 0 {
} else if k == 1 {
} else {
lemma_pow2(k/2);
}
}
// =========================================================
// Non-Euclidean Division / Remainder
// =========================================================
// This provides a non-Euclidean division operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. This operator, therefore,
// always divides *towards* zero.
function Div(lhs: int, rhs: int) : int
requires rhs != 0 {
if lhs >= 0 then lhs / rhs
else
-((-lhs) / rhs)
}
// This provides a non-Euclidean Remainder operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. Observe that this is a
// true Remainder operator, and not a modulus operator. For
// emxaple, this means the result can be negative.
function Rem(lhs: int, rhs: int) : int
requires rhs != 0 {
if lhs >= 0 then (lhs % rhs)
else
var d := -((-lhs) / rhs);
lhs - (d * rhs)
}
}
/**
* Various helper methods related to unsigned 8bit integers.
*/
module U8 {
import opened Int
// Compute the log of a value at base 2 where the result is rounded down.
function Log2(v:u8) : (r:nat)
ensures r < 8 {
// Split 4 bits
if v <= 15 then
// Split 2 bits
if v <= 3 then
// Split 1 bit
if v <= 1 then 0 else 1
else
// Split 1 bit
if v <= 7 then 2 else 3
else
// Split 2 bits
if v <= 63 then
// Split 1 bit
if v <= 31 then 4 else 5
else
// Split 1 bit
if v <= 127 then 6 else 7
}
}
/**
* Various helper methods related to unsigned 16bit integers.
*/
module U16 {
import opened Int
import U8
// Read nth 8bit word (i.e. byte) out of this u16, where 0
// identifies the most significant byte.
function NthUint8(v:u16, k: nat) : u8
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_8 as u16)) as u8
else
(v % (TWO_8 as u16)) as u8
}
/**
* Compute the log of a value at base 2 where the result is rounded down.
*/
function Log2(v:u16) : (r:nat)
ensures r < 16 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
if high != 0 then U8.Log2(high)+8 else U8.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u16) : (r:nat)
ensures r <= 1 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
if high != 0 then 1 else 0
}
/**
* Convert a u16 into a sequence of 2 bytes (in big endian representation).
*/
function ToBytes(v:u16) : (r:seq<u8>)
ensures |r| == 2 {
var low := (v % (TWO_8 as u16)) as u8;
var high := (v / (TWO_8 as u16)) as u8;
[high,low]
}
function Read(bytes: seq<u8>, address:nat) : u16
requires (address+1) < |bytes| {
var b1 := bytes[address] as u16;
var b2 := bytes[address+1] as u16;
(b1 * (TWO_8 as u16)) + b2
}
}
/**
* Various helper methods related to unsigned 32bit integers.
*/
module U32 {
import U16
import opened Int
// Read nth 16bit word out of this u32, where 0 identifies the most
// significant word.
function NthUint16(v:u32, k: nat) : u16
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_16 as u32)) as u16
else
(v % (TWO_16 as u32)) as u16
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u32) : (r:nat)
ensures r < 32 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
if high != 0 then U16.Log2(high)+16 else U16.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u32) : (r:nat)
ensures r <= 3 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
if high != 0 then U16.Log256(high)+2 else U16.Log256(low)
}
/**
* Convert a u32 into a sequence of 4 bytes (in big endian representation).
*/
function ToBytes(v:u32) : (r:seq<u8>)
ensures |r| == 4 {
var low := (v % (TWO_16 as u32)) as u16;
var high := (v / (TWO_16 as u32)) as u16;
U16.ToBytes(high) + U16.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u32
requires (address+3) < |bytes| {
var b1 := U16.Read(bytes, address) as u32;
var b2 := U16.Read(bytes, address+2) as u32;
(b1 * (TWO_16 as u32)) + b2
}
}
/**
* Various helper methods related to unsigned 64bit integers.
*/
module U64 {
import U32
import opened Int
// Read nth 32bit word out of this u64, where 0 identifies the most
// significant word.
function NthUint32(v:u64, k: nat) : u32
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_32 as u64)) as u32
else
(v % (TWO_32 as u64)) as u32
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u64) : (r:nat)
ensures r < 64 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
if high != 0 then U32.Log2(high)+32 else U32.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u64) : (r:nat)
ensures r <= 7 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
if high != 0 then U32.Log256(high)+4 else U32.Log256(low)
}
/**
* Convert a u64 into a sequence of 8bytes (in big endian representation).
*/
function ToBytes(v:u64) : (r:seq<u8>)
ensures |r| == 8 {
var low := (v % (TWO_32 as u64)) as u32;
var high := (v / (TWO_32 as u64)) as u32;
U32.ToBytes(high) + U32.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u64
requires (address+7) < |bytes| {
var b1 := U32.Read(bytes, address) as u64;
var b2 := U32.Read(bytes, address+4) as u64;
(b1 * (TWO_32 as u64)) + b2
}
}
/**
* Various helper methods related to unsigned 128bit integers.
*/
module U128 {
import U64
import opened Int
// Read nth 64bit word out of this u128, where 0 identifies the most
// significant word.
function NthUint64(v:u128, k: nat) : u64
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_64 as u128)) as u64
else
(v % (TWO_64 as u128)) as u64
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log2(v:u128) : (r:nat)
ensures r < 128 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
if high != 0 then U64.Log2(high)+64 else U64.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u128) : (r:nat)
ensures r <= 15 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
if high != 0 then U64.Log256(high)+8 else U64.Log256(low)
}
/**
* Convert a u128 into a sequence of 16bytes (in big endian representation).
*/
function ToBytes(v:u128) : (r:seq<u8>)
ensures |r| == 16 {
var low := (v % (TWO_64 as u128)) as u64;
var high := (v / (TWO_64 as u128)) as u64;
U64.ToBytes(high) + U64.ToBytes(low)
}
function Read(bytes: seq<u8>, address:nat) : u128
requires (address+15) < |bytes| {
var b1 := U64.Read(bytes, address) as u128;
var b2 := U64.Read(bytes, address+8) as u128;
(b1 * (TWO_64 as u128)) + b2
}
}
/**
* Various helper methods related to unsigned 256bit integers.
*/
module U256 {
import opened Int
import U8
import U16
import U32
import U64
import U128
/** An axiom stating that a bv256 converted as a nat is bounded by 2^256. */
lemma {:axiom} as_bv256_as_u256(v: bv256)
ensures v as nat < TWO_256
function Shl(lhs: u256, rhs: u256) : u256
{
var lbv := lhs as bv256;
// NOTE: unclear whether shifting is optimal choice here.
var res := if rhs < 256 then (lbv << rhs) else 0;
//
res as u256
}
function Shr(lhs: u256, rhs: u256) : u256 {
var lbv := lhs as bv256;
// NOTE: unclear whether shifting is optimal choice here.
var res := if rhs < 256 then (lbv >> rhs) else 0;
//
res as u256
}
/**
* Compute the log of a value at base 2, where the result in rounded down.
* This effectively determines the position of the highest on bit.
*/
function Log2(v:u256) : (r:nat)
ensures r < 256 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
if high != 0 then U128.Log2(high)+128 else U128.Log2(low)
}
/**
* Compute the log of a value at base 256 where the result is rounded down.
*/
function Log256(v:u256) : (r:nat)
ensures r <= 31 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
if high != 0 then U128.Log256(high)+16 else U128.Log256(low)
}
// Read nth 128bit word out of this u256, where 0 identifies the most
// significant word.
function NthUint128(v:u256, k: nat) : u128
// Cannot read more than two words!
requires k < 2 {
if k == 0
then (v / (TWO_128 as u256)) as u128
else
(v % (TWO_128 as u256)) as u128
}
// Read nth byte out of this u256, where 0 identifies the most
// significant byte.
function NthUint8(v:u256, k: nat) : u8
// Cannot read more than 32bytes!
requires k < 32 {
// This is perhaps a tad ugly. Happy to take suggestions on
// a better approach :)
var w128 := NthUint128(v,k / 16);
var w64 := U128.NthUint64(w128,(k % 16) / 8);
var w32 := U64.NthUint32(w64,(k % 8) / 4);
var w16 := U32.NthUint16(w32,(k % 4) / 2);
U16.NthUint8(w16,k%2)
}
function Read(bytes: seq<u8>, address:nat) : u256
requires (address+31) < |bytes| {
var b1 := U128.Read(bytes, address) as u256;
var b2 := U128.Read(bytes, address+16) as u256;
(b1 * (TWO_128 as u256)) + b2
}
/**
* Convert a u256 into a sequence of 32bytes in big endian representation.
*/
function ToBytes(v:u256) : (r:seq<u8>)
ensures |r| == 32 {
var low := (v % (TWO_128 as u256)) as u128;
var high := (v / (TWO_128 as u256)) as u128;
U128.ToBytes(high) + U128.ToBytes(low)
}
/**
*
*/
function SignExtend(v: u256, k: nat) : u256 {
if k >= 31 then v
else
// Reinterpret k as big endian
var ith := 31 - k;
// Extract byte containing sign bit
var byte := NthUint8(v,ith);
// Extract sign bit
var signbit := ((byte as bv8) & 0x80) == 0x80;
// Replicate sign bit.
var signs := if signbit then seq(31-k, i => 0xff)
else seq(31-k, i => 0);
// Extract unchanged bytes
var bytes := ToBytes(v)[ith..];
// Sanity check
// Done
Read(signs + bytes,0)
}
}
module I256 {
import U256
import Word
import opened Int
// This provides a non-Euclidean division operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. This operator, therefore,
// always divides *towards* zero.
function Div(lhs: i256, rhs: i256) : i256
// Cannot divide by zero!
requires rhs != 0
// Range restriction to prevent overflow
requires (rhs != -1 || lhs != (-TWO_255 as i256)) {
Int.Div(lhs as int, rhs as int) as i256
}
// This provides a non-Euclidean Remainder operator and is necessary
// because Dafny (unlike just about every other programming
// language) supports Euclidean division. Observe that this is a
// true Remainder operator, and not a modulus operator. For
// emxaple, this means the result can be negative.
function Rem(lhs: i256, rhs: i256) : i256
// Cannot divide by zero!
requires rhs != 0 {
Int.Rem(lhs as int, rhs as int) as i256
}
/**
* Shifting 1 left less than 256 times produces a non-zero value.
*
* More generally, shifting-left 1 less than k times over k bits
* yield a non-zero number.
*
* @example over 2 bits, left-shift 1 once: 01 -> 10
* @example over 4 bits, left-shift 1 3 times: 0001 -> 0010 -> 0100 -> 1000
*/
lemma ShiftYieldsNonZero(x: u256)
requires 0 < x < 256
ensures U256.Shl(1, x) > 0
{
// Thanks Dafny.
}
// Shift Arithmetic Right. This implementation follows the Yellow Paper quite
// accurately.
function Sar(lhs: i256, rhs: u256): i256 {
if rhs == 0 then lhs
else if rhs < 256
then
var r := U256.Shl(1,rhs);
ShiftYieldsNonZero(rhs);
((lhs as int) / (r as int)) as i256
else if lhs < 0 then -1
else 0
}
}
module Word {
import opened Int
// Decode a 256bit word as a signed 256bit integer. Since words
// are represented as u256, the parameter has type u256. However,
// its important to note that this does not mean the value in
// question represents an unsigned 256 bit integer. Rather, it is a
// signed integer encoded into an unsigned integer.
function asI256(w: u256) : i256 {
if w > (MAX_I256 as u256)
then
var v := 1 + MAX_U256 - (w as int);
(-v) as i256
else
w as i256
}
// Encode a 256bit signed integer as a 256bit word. Since words are
// represented as u256, the return is represented as u256. However,
// its important to note that this does not mean the value in
// question represents an unsigned 256 bit integer. Rather, it is a
// signed integer encoded into an unsigned integer.
function fromI256(w: Int.i256) : u256 {
if w < 0
then
var v := 1 + MAX_U256 + (w as int);
v as u256
else
w as u256
}
}
|
389 | assertive-programming-assignment-1_tmp_tmp3h_cj44u_FindRange.dfy | method Main()
{
var q := [1,2,2,5,10,10,10,23];
assert Sorted(q);
assert 10 in q;
var i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [1,2,2,5,10,10,10,23] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
assert i == 4 && j == 7 by {
assert q[0] <= q[1] <= q[2] <= q[3] < 10;
assert q[4] == q[5] == q[6] == 10;
assert 10 < q[7];
}
// arr = [0, 1, 2] key = 10 -> left = right = |q| = 3
q := [0,1,2];
assert Sorted(q);
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [0,1,2] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [10, 11, 12] key = 1 -> left = right = 0
q := [10,11,12];
assert Sorted(q);
i,j := FindRange(q, 1);
print "The number of occurrences of 1 in the sorted sequence [10,11,12] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1, 11, 22] key = 10 -> left = right = i+1 = 1 i is the nearest index to key
q := [1,11,22];
assert Sorted(q);
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [1,11,22] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 22] key = 11 -> left = 1, right = 2
q := [1,11,22];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,22] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 11] key = 11 -> left = 1, right = 3
q := [1,11,11];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11 ,11, 14] key = 11 -> left = 0, right = 2
q := [11 ,11, 14];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [11 ,11, 14] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 11, 11, 13] key = 11 -> left = 1, right = 4
q := [1,11,11,11,13];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,11,11,13] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [] key = 11 -> left = 0, right = 0
q := [];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11] key = 10 -> left = 0, right = 0
q := [11];
assert Sorted(q);
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11] key = 11 -> left = 0, right = 1
q := [11];
assert Sorted(q);
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
}
predicate Sorted(q: seq<int>)
{
forall i,j :: 0 <= i <= j < |q| ==> q[i] <= q[j]
}
method {:verify true} FindRange(q: seq<int>, key: int) returns (left: nat, right: nat)
requires Sorted(q)
ensures left <= right <= |q|
ensures forall i :: 0 <= i < left ==> q[i] < key
ensures forall i :: left <= i < right ==> q[i] == key
ensures forall i :: right <= i < |q| ==> q[i] > key
{
left := BinarySearch(q, key, 0, |q|, (n, m) => (n >= m));
right := BinarySearch(q, key, left, |q|, (n, m) => (n > m));
}
// all the values in the range satisfy `comparer` (comparer(q[i], key) == true)
predicate RangeSatisfiesComparer(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool)
requires 0 <= lowerBound <= upperBound <= |q|
{
forall i :: lowerBound <= i < upperBound ==> comparer(q[i], key)
}
// all the values in the range satisfy `!comparer` (comparer(q[i], key) == false)
predicate RangeSatisfiesComparerNegation(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool)
requires 0 <= lowerBound <= upperBound <= |q|
{
RangeSatisfiesComparer(q, key, lowerBound, upperBound, (n1, n2) => !comparer(n1, n2))
}
method BinarySearch(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool) returns (index: nat)
requires Sorted(q)
requires 0 <= lowerBound <= upperBound <= |q|
requires RangeSatisfiesComparerNegation(q, key, 0, lowerBound, comparer)
requires RangeSatisfiesComparer(q, key, upperBound, |q|, comparer)
// comparer is '>' or '>='
requires
(forall n1, n2 :: comparer(n1, n2) == (n1 > n2)) ||
(forall n1, n2 :: comparer(n1, n2) == (n1 >= n2))
ensures lowerBound <= index <= upperBound
ensures RangeSatisfiesComparerNegation(q, key, 0, index, comparer)
ensures RangeSatisfiesComparer(q, key, index, |q|, comparer)
{
var low : nat := lowerBound;
var high : nat := upperBound;
while (low < high)
invariant lowerBound <= low <= high <= upperBound
invariant RangeSatisfiesComparerNegation(q, key, 0, low, comparer)
invariant RangeSatisfiesComparer(q, key, high, |q|, comparer)
decreases high - low
{
var middle:= low + ((high - low) / 2);
if (comparer(q[middle], key))
{
high := middle;
}
else
{
low := middle + 1;
}
}
index := high;
}
| method Main()
{
var q := [1,2,2,5,10,10,10,23];
var i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [1,2,2,5,10,10,10,23] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
}
// arr = [0, 1, 2] key = 10 -> left = right = |q| = 3
q := [0,1,2];
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [0,1,2] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [10, 11, 12] key = 1 -> left = right = 0
q := [10,11,12];
i,j := FindRange(q, 1);
print "The number of occurrences of 1 in the sorted sequence [10,11,12] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1, 11, 22] key = 10 -> left = right = i+1 = 1 i is the nearest index to key
q := [1,11,22];
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [1,11,22] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 22] key = 11 -> left = 1, right = 2
q := [1,11,22];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,22] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 11] key = 11 -> left = 1, right = 3
q := [1,11,11];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11 ,11, 14] key = 11 -> left = 0, right = 2
q := [11 ,11, 14];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [11 ,11, 14] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [1 ,11, 11, 11, 13] key = 11 -> left = 1, right = 4
q := [1,11,11,11,13];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [1,11,11,11,13] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [] key = 11 -> left = 0, right = 0
q := [];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11] key = 10 -> left = 0, right = 0
q := [11];
i,j := FindRange(q, 10);
print "The number of occurrences of 10 in the sorted sequence [11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
// arr = [11] key = 11 -> left = 0, right = 1
q := [11];
i,j := FindRange(q, 11);
print "The number of occurrences of 11 in the sorted sequence [11] is ";
print j-i;
print " (starting at index ";
print i;
print " and ending in ";
print j;
print ").\n";
}
predicate Sorted(q: seq<int>)
{
forall i,j :: 0 <= i <= j < |q| ==> q[i] <= q[j]
}
method {:verify true} FindRange(q: seq<int>, key: int) returns (left: nat, right: nat)
requires Sorted(q)
ensures left <= right <= |q|
ensures forall i :: 0 <= i < left ==> q[i] < key
ensures forall i :: left <= i < right ==> q[i] == key
ensures forall i :: right <= i < |q| ==> q[i] > key
{
left := BinarySearch(q, key, 0, |q|, (n, m) => (n >= m));
right := BinarySearch(q, key, left, |q|, (n, m) => (n > m));
}
// all the values in the range satisfy `comparer` (comparer(q[i], key) == true)
predicate RangeSatisfiesComparer(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool)
requires 0 <= lowerBound <= upperBound <= |q|
{
forall i :: lowerBound <= i < upperBound ==> comparer(q[i], key)
}
// all the values in the range satisfy `!comparer` (comparer(q[i], key) == false)
predicate RangeSatisfiesComparerNegation(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool)
requires 0 <= lowerBound <= upperBound <= |q|
{
RangeSatisfiesComparer(q, key, lowerBound, upperBound, (n1, n2) => !comparer(n1, n2))
}
method BinarySearch(q: seq<int>, key: int, lowerBound: nat, upperBound: nat, comparer: (int, int) -> bool) returns (index: nat)
requires Sorted(q)
requires 0 <= lowerBound <= upperBound <= |q|
requires RangeSatisfiesComparerNegation(q, key, 0, lowerBound, comparer)
requires RangeSatisfiesComparer(q, key, upperBound, |q|, comparer)
// comparer is '>' or '>='
requires
(forall n1, n2 :: comparer(n1, n2) == (n1 > n2)) ||
(forall n1, n2 :: comparer(n1, n2) == (n1 >= n2))
ensures lowerBound <= index <= upperBound
ensures RangeSatisfiesComparerNegation(q, key, 0, index, comparer)
ensures RangeSatisfiesComparer(q, key, index, |q|, comparer)
{
var low : nat := lowerBound;
var high : nat := upperBound;
while (low < high)
{
var middle:= low + ((high - low) / 2);
if (comparer(q[middle], key))
{
high := middle;
}
else
{
low := middle + 1;
}
}
index := high;
}
|
390 | assertive-programming-assignment-1_tmp_tmp3h_cj44u_ProdAndCount.dfy | method Main() {
var q := [7, -2, 3, -2];
var p, c := ProdAndCount(q, -2);
print "The product of all positive elements in [7,-2,3,-2] is ";
print p;
assert p == RecursivePositiveProduct(q) == 21;
print "\nThe number of occurrences of -2 in [7,-2,3,-2] is ";
print c;
assert c == RecursiveCount(-2, q) == 2 by {
calc {
RecursiveCount(-2, q);
== { assert q[3] == -2; AppendOne(q, 3); }
1 + RecursiveCount(-2, q[..3]);
== { assert q[2] != -2; AppendOne(q, 2); }
1 + RecursiveCount(-2, q[..2]);
== { assert q[1] == -2; AppendOne(q, 1); }
1 + 1 + RecursiveCount(-2, q[..1]);
== { assert q[0] != -2; AppendOne(q, 0); }
1 + 1 + RecursiveCount(-2, q[..0]);
}
}
}
lemma AppendOne<T>(q: seq<T>, n: nat)
requires n < |q|
ensures q[..n]+[q[n]] == q[..n+1]
{}
function RecursivePositiveProduct(q: seq<int>): int
decreases |q|
{
if q == [] then 1
else if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..])
}
function RecursiveCount(key: int, q: seq<int>): int
decreases |q|
{
if q == [] then 0
else if q[|q|-1] == key then 1+RecursiveCount(key, q[..|q|-1])
else RecursiveCount(key, q[..|q|-1])
}
method ProdAndCount(q: seq<int>, key: int) returns (prod: int, count: nat)
ensures prod == RecursivePositiveProduct(q)
ensures count == RecursiveCount(key, q)
{
prod := 1;
count := 0;
var size := |q|;
var i := 0;
var curr := 0;
while i < size
invariant 0 <= i <= size && // legal index
count == RecursiveCount(key, q[..i]) && // count invar
prod == RecursivePositiveProduct(q[..i])
decreases size-i
{
Lemma_Count_Inv(q, i, count, key);
Lemma_Prod_Inv(q, i, prod);
curr := q[i];
if curr > 0 {
prod := prod*curr;
}
if curr == key {
count := count+1;
}
i := i+1;
}
Lemma_Count_Finish(q, i, count, key);
Lemma_Prod_Finish(q, i, prod);
}
function county(elem: int, key: int): int{
if elem==key then 1 else 0
}
function prody(elem: int): int{
if elem <= 0 then 1 else elem
}
lemma Lemma_Count_Inv(q: seq<int>, i: nat, count: int, key: int)
requires 0 <= i < |q| && count == RecursiveCount(key, q[..i])
ensures 0 <= i+1 <= |q| && county(q[i],key)+count == RecursiveCount(key, q[..i+1])
{
assert q[..i+1] == q[..i] + [q[i]];
var q1 := q[..i+1];
calc {
RecursiveCount(key, q[..i+1]);
== // def.
if q1 == [] then 0
else if q1[i] == key then 1+RecursiveCount(key,q1[..i])
else RecursiveCount(key, q1[..i]);
== { assert q1 != []; } // simplification for a non-empty sequence
if q1[i] == key then 1+RecursiveCount(key, q1[..i])
else RecursiveCount(key,q1[..i]);
== {KibutzLaw1(q1,key,i);} // the kibutz law
(if q1[i] == key then 1 else 0) + RecursiveCount(key, q1[..i]);
==
county(q1[i],key) + RecursiveCount(key, q1[..i]);
==
county(q[i],key) + RecursiveCount(key, q[..i]);
}
}
lemma Lemma_Prod_Inv(q: seq<int>, i: nat, prod: int)
requires 0 <= i < |q| && prod == RecursivePositiveProduct(q[..i])
ensures 0 <= i+1 <= |q| && prody(q[i])*prod == RecursivePositiveProduct(q[..i+1])
{
assert q[..i+1] == q[..i] + [q[i]];
var q1 := q[..i+1];
calc {
RecursivePositiveProduct(q[..i+1]);
== // def.
if q1 == [] then 1
else if q1[0] <= 0 then RecursivePositiveProduct(q1[1..])
else q1[0] * RecursivePositiveProduct(q1[1..]);
== { assert q1 != []; } // simplification for a non-empty sequence
if q1[0] <= 0 then RecursivePositiveProduct(q1[1..])
else q1[0] * RecursivePositiveProduct(q1[1..]);
== // def. of q1
if q[0] <= 0 then RecursivePositiveProduct(q[1..i+1])
else q[0] * RecursivePositiveProduct(q[1..i+1]);
== { KibutzLaw2(q);}
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..i+1]);
==
prody(q[0])*RecursivePositiveProduct(q[1..i+1]);
== {PrependProd(q);}
RecursivePositiveProduct(q[..i+1]);
== {AppendProd(q[..i+1]);}
prody(q[i])*RecursivePositiveProduct(q[..i]);
==
prody(q[i])*prod;
}
}
lemma Lemma_Count_Finish(q: seq<int>, i: nat, count: int, key: int)
requires inv: 0 <= i <= |q| && count == RecursiveCount(key, q[..i])
requires neg_of_guard: i >= |q|
ensures count == RecursiveCount(key, q)
{
assert i <= |q| && count == RecursiveCount(key, q[..i]) by { reveal inv; }
assert i == |q| by { reveal inv,neg_of_guard; }
assert q[..i] == q[..|q|] == q;
}
lemma Lemma_Prod_Finish(q: seq<int>, i: nat, prod: int)
requires inv: 0 <= i <= |q| && prod == RecursivePositiveProduct(q[..i])
requires neg_of_guard: i >= |q|
ensures prod == RecursivePositiveProduct(q)
{
assert i <= |q| && prod == RecursivePositiveProduct(q[..i]) by { reveal inv; }
assert i == |q| by { reveal inv,neg_of_guard; }
assert q[..i] == q[..|q|] == q;
}
lemma KibutzLaw1(q: seq<int>,key: int,i: nat)
requires q != [] && i < |q|
ensures
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]))
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1])
{
if q[|q|-1] == key {
calc {
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]));
==
1 + RecursiveCount(key, q[1..i+1]);
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1]);
}
} else {
calc {
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]));
==
0 + RecursiveCount(key, q[1..i+1]);
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1]);
}
}
}
lemma {:verify true} KibutzLaw2(q: seq<int>)
requires q != []
ensures
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]))
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..])
{
if q[0] <= 0 {
calc {
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]));
==
RecursivePositiveProduct(q[1..]);
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
}
} else {
calc {
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]));
==
q[0] * RecursivePositiveProduct(q[1..]);
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
}
}
}
lemma AppendCount(key: int, q: seq<int>)
requires q != []
ensures RecursiveCount(key, q) == RecursiveCount(key,q[..|q|-1])+county(q[|q|-1], key)
{
if |q| == 1
{
assert
RecursiveCount(key,q[..|q|-1])+county(q[|q|-1], key) ==
RecursiveCount(key,q[..0])+county(q[0], key) ==
RecursiveCount(key, [])+county(q[0], key) ==
0+county(q[0], key) ==
county(q[0], key);
assert RecursiveCount(key, q) == county(q[0], key);
}
else
{ // inductive step
var q1 := q[1..];
calc {
RecursiveCount(key, q);
== // def. for a non-empty sequence
if q == [] then 0
else if q[|q|-1] == key then 1+RecursiveCount(key, q[..|q|-1])
else RecursiveCount(key, q[..|q|-1]);
==
RecursiveCount(key, q[..|q|-1]) + county(q[|q|-1], key);
}
}
}
lemma PrependProd(q: seq<int>)
requires q != []
ensures RecursivePositiveProduct(q) == prody(q[0])*RecursivePositiveProduct(q[1..])
{
calc {
RecursivePositiveProduct(q);
==
if q == [] then 1
else if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]);
== { assert q != []; } // simplification for a non-empty sequence
if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]);
== { KibutzLaw2(q);}
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
==
prody(q[0])*RecursivePositiveProduct(q[1..]);
}
}
lemma AppendProd(q: seq<int>)
requires q != []
ensures RecursivePositiveProduct(q) == RecursivePositiveProduct(q[..|q|-1])*prody(q[|q|-1])
{
if |q| == 1
{
assert
RecursivePositiveProduct(q[..|q|-1])*prody(q[|q|-1]) ==
RecursivePositiveProduct(q[..0])*prody(q[0]) ==
RecursivePositiveProduct([])*prody(q[0]) ==
1*prody(q[0]) ==
prody(q[0]);
assert RecursivePositiveProduct(q) == prody(q[0]);
}
else
{ // inductive step
var q1 := q[1..];
calc {
RecursivePositiveProduct(q);
== // def. for a non-empty sequence
prody(q[0]) * RecursivePositiveProduct(q[1..]);
== { assert q1 != []; assert |q1| < |q|; AppendProd(q1); } // induction hypothesis (one assertion for the precondition, another for termination)
prody(q[0]) * RecursivePositiveProduct(q1[..|q1|-1]) * prody(q1[|q1|-1]);
== { assert q1[..|q1|-1] == q[1..|q|-1]; assert q1[|q1|-1] == q[|q|-1]; }
prody(q[0]) * RecursivePositiveProduct(q[1..|q|-1]) * prody(q[|q|-1]);
== {PrependProd(q[..|q|-1]);}
RecursivePositiveProduct(q[..|q|-1]) * prody(q[|q|-1]);
}
}
}
| method Main() {
var q := [7, -2, 3, -2];
var p, c := ProdAndCount(q, -2);
print "The product of all positive elements in [7,-2,3,-2] is ";
print p;
print "\nThe number of occurrences of -2 in [7,-2,3,-2] is ";
print c;
calc {
RecursiveCount(-2, q);
== { assert q[3] == -2; AppendOne(q, 3); }
1 + RecursiveCount(-2, q[..3]);
== { assert q[2] != -2; AppendOne(q, 2); }
1 + RecursiveCount(-2, q[..2]);
== { assert q[1] == -2; AppendOne(q, 1); }
1 + 1 + RecursiveCount(-2, q[..1]);
== { assert q[0] != -2; AppendOne(q, 0); }
1 + 1 + RecursiveCount(-2, q[..0]);
}
}
}
lemma AppendOne<T>(q: seq<T>, n: nat)
requires n < |q|
ensures q[..n]+[q[n]] == q[..n+1]
{}
function RecursivePositiveProduct(q: seq<int>): int
{
if q == [] then 1
else if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..])
}
function RecursiveCount(key: int, q: seq<int>): int
{
if q == [] then 0
else if q[|q|-1] == key then 1+RecursiveCount(key, q[..|q|-1])
else RecursiveCount(key, q[..|q|-1])
}
method ProdAndCount(q: seq<int>, key: int) returns (prod: int, count: nat)
ensures prod == RecursivePositiveProduct(q)
ensures count == RecursiveCount(key, q)
{
prod := 1;
count := 0;
var size := |q|;
var i := 0;
var curr := 0;
while i < size
count == RecursiveCount(key, q[..i]) && // count invar
prod == RecursivePositiveProduct(q[..i])
{
Lemma_Count_Inv(q, i, count, key);
Lemma_Prod_Inv(q, i, prod);
curr := q[i];
if curr > 0 {
prod := prod*curr;
}
if curr == key {
count := count+1;
}
i := i+1;
}
Lemma_Count_Finish(q, i, count, key);
Lemma_Prod_Finish(q, i, prod);
}
function county(elem: int, key: int): int{
if elem==key then 1 else 0
}
function prody(elem: int): int{
if elem <= 0 then 1 else elem
}
lemma Lemma_Count_Inv(q: seq<int>, i: nat, count: int, key: int)
requires 0 <= i < |q| && count == RecursiveCount(key, q[..i])
ensures 0 <= i+1 <= |q| && county(q[i],key)+count == RecursiveCount(key, q[..i+1])
{
var q1 := q[..i+1];
calc {
RecursiveCount(key, q[..i+1]);
== // def.
if q1 == [] then 0
else if q1[i] == key then 1+RecursiveCount(key,q1[..i])
else RecursiveCount(key, q1[..i]);
== { assert q1 != []; } // simplification for a non-empty sequence
if q1[i] == key then 1+RecursiveCount(key, q1[..i])
else RecursiveCount(key,q1[..i]);
== {KibutzLaw1(q1,key,i);} // the kibutz law
(if q1[i] == key then 1 else 0) + RecursiveCount(key, q1[..i]);
==
county(q1[i],key) + RecursiveCount(key, q1[..i]);
==
county(q[i],key) + RecursiveCount(key, q[..i]);
}
}
lemma Lemma_Prod_Inv(q: seq<int>, i: nat, prod: int)
requires 0 <= i < |q| && prod == RecursivePositiveProduct(q[..i])
ensures 0 <= i+1 <= |q| && prody(q[i])*prod == RecursivePositiveProduct(q[..i+1])
{
var q1 := q[..i+1];
calc {
RecursivePositiveProduct(q[..i+1]);
== // def.
if q1 == [] then 1
else if q1[0] <= 0 then RecursivePositiveProduct(q1[1..])
else q1[0] * RecursivePositiveProduct(q1[1..]);
== { assert q1 != []; } // simplification for a non-empty sequence
if q1[0] <= 0 then RecursivePositiveProduct(q1[1..])
else q1[0] * RecursivePositiveProduct(q1[1..]);
== // def. of q1
if q[0] <= 0 then RecursivePositiveProduct(q[1..i+1])
else q[0] * RecursivePositiveProduct(q[1..i+1]);
== { KibutzLaw2(q);}
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..i+1]);
==
prody(q[0])*RecursivePositiveProduct(q[1..i+1]);
== {PrependProd(q);}
RecursivePositiveProduct(q[..i+1]);
== {AppendProd(q[..i+1]);}
prody(q[i])*RecursivePositiveProduct(q[..i]);
==
prody(q[i])*prod;
}
}
lemma Lemma_Count_Finish(q: seq<int>, i: nat, count: int, key: int)
requires inv: 0 <= i <= |q| && count == RecursiveCount(key, q[..i])
requires neg_of_guard: i >= |q|
ensures count == RecursiveCount(key, q)
{
}
lemma Lemma_Prod_Finish(q: seq<int>, i: nat, prod: int)
requires inv: 0 <= i <= |q| && prod == RecursivePositiveProduct(q[..i])
requires neg_of_guard: i >= |q|
ensures prod == RecursivePositiveProduct(q)
{
}
lemma KibutzLaw1(q: seq<int>,key: int,i: nat)
requires q != [] && i < |q|
ensures
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]))
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1])
{
if q[|q|-1] == key {
calc {
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]));
==
1 + RecursiveCount(key, q[1..i+1]);
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1]);
}
} else {
calc {
(if q[|q|-1] == key then 1 + RecursiveCount(key, q[1..i+1])
else 0 + RecursiveCount(key, q[1..i+1]));
==
0 + RecursiveCount(key, q[1..i+1]);
==
(if q[|q|-1] == key then 1 else 0) + RecursiveCount(key, q[1..i+1]);
}
}
}
lemma {:verify true} KibutzLaw2(q: seq<int>)
requires q != []
ensures
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]))
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..])
{
if q[0] <= 0 {
calc {
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]));
==
RecursivePositiveProduct(q[1..]);
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
}
} else {
calc {
(if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]));
==
q[0] * RecursivePositiveProduct(q[1..]);
==
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
}
}
}
lemma AppendCount(key: int, q: seq<int>)
requires q != []
ensures RecursiveCount(key, q) == RecursiveCount(key,q[..|q|-1])+county(q[|q|-1], key)
{
if |q| == 1
{
RecursiveCount(key,q[..|q|-1])+county(q[|q|-1], key) ==
RecursiveCount(key,q[..0])+county(q[0], key) ==
RecursiveCount(key, [])+county(q[0], key) ==
0+county(q[0], key) ==
county(q[0], key);
}
else
{ // inductive step
var q1 := q[1..];
calc {
RecursiveCount(key, q);
== // def. for a non-empty sequence
if q == [] then 0
else if q[|q|-1] == key then 1+RecursiveCount(key, q[..|q|-1])
else RecursiveCount(key, q[..|q|-1]);
==
RecursiveCount(key, q[..|q|-1]) + county(q[|q|-1], key);
}
}
}
lemma PrependProd(q: seq<int>)
requires q != []
ensures RecursivePositiveProduct(q) == prody(q[0])*RecursivePositiveProduct(q[1..])
{
calc {
RecursivePositiveProduct(q);
==
if q == [] then 1
else if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]);
== { assert q != []; } // simplification for a non-empty sequence
if q[0] <= 0 then RecursivePositiveProduct(q[1..])
else q[0] * RecursivePositiveProduct(q[1..]);
== { KibutzLaw2(q);}
(if q[0] <= 0 then 1 else q[0])*RecursivePositiveProduct(q[1..]);
==
prody(q[0])*RecursivePositiveProduct(q[1..]);
}
}
lemma AppendProd(q: seq<int>)
requires q != []
ensures RecursivePositiveProduct(q) == RecursivePositiveProduct(q[..|q|-1])*prody(q[|q|-1])
{
if |q| == 1
{
RecursivePositiveProduct(q[..|q|-1])*prody(q[|q|-1]) ==
RecursivePositiveProduct(q[..0])*prody(q[0]) ==
RecursivePositiveProduct([])*prody(q[0]) ==
1*prody(q[0]) ==
prody(q[0]);
}
else
{ // inductive step
var q1 := q[1..];
calc {
RecursivePositiveProduct(q);
== // def. for a non-empty sequence
prody(q[0]) * RecursivePositiveProduct(q[1..]);
== { assert q1 != []; assert |q1| < |q|; AppendProd(q1); } // induction hypothesis (one assertion for the precondition, another for termination)
prody(q[0]) * RecursivePositiveProduct(q1[..|q1|-1]) * prody(q1[|q1|-1]);
== { assert q1[..|q1|-1] == q[1..|q|-1]; assert q1[|q1|-1] == q[|q|-1]; }
prody(q[0]) * RecursivePositiveProduct(q[1..|q|-1]) * prody(q[|q|-1]);
== {PrependProd(q[..|q|-1]);}
RecursivePositiveProduct(q[..|q|-1]) * prody(q[|q|-1]);
}
}
}
|
391 | assertive-programming-assignment-1_tmp_tmp3h_cj44u_SearchAddends.dfy | method Main()
{
var q := [1,2,4,5,6,7,10,23];
assert Sorted(q);
assert HasAddends(q,10) by { assert q[2]+q[4] == 4+6 == 10; }
var i,j := FindAddends(q, 10);
print "Searching for addends of 10 in q == [1,2,4,5,6,7,10,23]:\n";
print "Found that q[";
print i;
print "] + q[";
print j;
print "] == ";
print q[i];
print " + ";
print q[j];
print " == 10";
assert i == 2 && j == 4;
}
predicate Sorted(q: seq<int>)
{
forall i,j :: 0 <= i <= j < |q| ==> q[i] <= q[j]
}
predicate HasAddends(q: seq<int>, x: int)
{
exists i,j :: 0 <= i < j < |q| && q[i] + q[j] == x
}
method FindAddends(q: seq<int>, x: int) returns (i: nat, j: nat)
requires Sorted(q) && HasAddends(q, x)
ensures i < j < |q| && q[i]+q[j] == x
{
i := 0;
j := |q| - 1;
var sum := q[i] + q[j];
while sum != x
invariant LoopInv(q, x, i, j, sum)
decreases j - i
{
if (sum > x)
{
// Sum it too big, lower it by decreasing the high index
LoopInvWhenSumIsBigger(q, x, i, j, sum);
j := j - 1;
}
// 'sum == x' cannot occur because the loop's guard is 'sum !=x'.
else // (sum < x)
{
// Sum is too small, make it bigger by increasing the low index.
i := i + 1;
}
sum := q[i] + q[j];
}
}
predicate IsValidIndex<T>(q: seq<T>, i: nat)
{
0 <= i < |q|
}
predicate AreOreredIndices<T>(q: seq<T>, i: nat, j: nat)
{
0 <= i < j < |q|
}
predicate AreAddendsIndices(q: seq<int>, x: int, i: nat, j: nat)
requires IsValidIndex(q, i) && IsValidIndex(q, j)
{
q[i] + q[j] == x
}
predicate HasAddendsInIndicesRange(q: seq<int>, x: int, i: nat, j: nat)
requires AreOreredIndices(q, i, j)
{
HasAddends(q[i..(j + 1)], x)
}
predicate LoopInv(q: seq<int>, x: int, i: nat, j: nat, sum: int)
{
AreOreredIndices(q, i, j) &&
HasAddendsInIndicesRange(q, x, i, j) &&
AreAddendsIndices(q, sum, i, j)
}
lemma LoopInvWhenSumIsBigger(q: seq<int>, x: int, i: nat, j: nat, sum: int)
requires HasAddends(q, x)
requires Sorted(q)
requires sum > x;
requires LoopInv(q, x, i, j, sum)
ensures HasAddendsInIndicesRange(q, x, i, j - 1)
{
assert q[i..j] < q[i..(j + 1)];
}
| method Main()
{
var q := [1,2,4,5,6,7,10,23];
var i,j := FindAddends(q, 10);
print "Searching for addends of 10 in q == [1,2,4,5,6,7,10,23]:\n";
print "Found that q[";
print i;
print "] + q[";
print j;
print "] == ";
print q[i];
print " + ";
print q[j];
print " == 10";
}
predicate Sorted(q: seq<int>)
{
forall i,j :: 0 <= i <= j < |q| ==> q[i] <= q[j]
}
predicate HasAddends(q: seq<int>, x: int)
{
exists i,j :: 0 <= i < j < |q| && q[i] + q[j] == x
}
method FindAddends(q: seq<int>, x: int) returns (i: nat, j: nat)
requires Sorted(q) && HasAddends(q, x)
ensures i < j < |q| && q[i]+q[j] == x
{
i := 0;
j := |q| - 1;
var sum := q[i] + q[j];
while sum != x
{
if (sum > x)
{
// Sum it too big, lower it by decreasing the high index
LoopInvWhenSumIsBigger(q, x, i, j, sum);
j := j - 1;
}
// 'sum == x' cannot occur because the loop's guard is 'sum !=x'.
else // (sum < x)
{
// Sum is too small, make it bigger by increasing the low index.
i := i + 1;
}
sum := q[i] + q[j];
}
}
predicate IsValidIndex<T>(q: seq<T>, i: nat)
{
0 <= i < |q|
}
predicate AreOreredIndices<T>(q: seq<T>, i: nat, j: nat)
{
0 <= i < j < |q|
}
predicate AreAddendsIndices(q: seq<int>, x: int, i: nat, j: nat)
requires IsValidIndex(q, i) && IsValidIndex(q, j)
{
q[i] + q[j] == x
}
predicate HasAddendsInIndicesRange(q: seq<int>, x: int, i: nat, j: nat)
requires AreOreredIndices(q, i, j)
{
HasAddends(q[i..(j + 1)], x)
}
predicate LoopInv(q: seq<int>, x: int, i: nat, j: nat, sum: int)
{
AreOreredIndices(q, i, j) &&
HasAddendsInIndicesRange(q, x, i, j) &&
AreAddendsIndices(q, sum, i, j)
}
lemma LoopInvWhenSumIsBigger(q: seq<int>, x: int, i: nat, j: nat, sum: int)
requires HasAddends(q, x)
requires Sorted(q)
requires sum > x;
requires LoopInv(q, x, i, j, sum)
ensures HasAddendsInIndicesRange(q, x, i, j - 1)
{
}
|
392 | bbfny_tmp_tmpw4m0jvl0_enjoying.dfy | // shenanigans going through the dafny tutorial
method MultipleReturns(x: int, y: int) returns (more: int, less: int)
requires 0 < y
ensures less < x < more
{
more := x + y;
less := x - y;
}
method Max(a: int, b: int) returns (c: int)
ensures a <= c && b <= c
ensures a == c || b == c
{
if a > b {
c := a;
} else { c := b; }
}
method Testing() {
var x := Max(3,15);
assert x >= 3 && x >= 15;
assert x == 15;
}
function max(a: int, b: int): int
{
if a > b then a else b
}
method Testing'() {
assert max(1,2) == 2;
assert forall a,b : int :: max (a,b) == a || max (a,b) == b;
}
function abs(x: int): int
{
if x < 0 then -x else x
}
method Abs(x: int) returns (y: int)
ensures y == abs(x)
{
return abs(x);
}
method m(n: nat)
{
var i := 0;
while i != n
invariant 0 <= i <= n
{
i := i + 1;
}
assert i == n;
}
function fib(n: nat): nat
{
if n == 0 then 0
else if n == 1 then 1
else fib(n - 1) + fib(n - 2)
}
method Find(a: array<int>, key: int) returns (index: int)
ensures 0 <= index ==> index < a.Length && a[index] == key
ensures index < 0 ==> forall k :: 0 <= k < a.Length ==> a[k] != key
{
var i := 0;
while i < a.Length
invariant 0 <= i <= a.Length
invariant forall k :: 0 <= k < i ==> a[k] != key
{
if a[i] == key {return i;}
i := i+1;
}
assert i == a.Length;
return -1;
}
method FindMax(a: array<int>) returns (i: int)
requires a.Length >= 1
ensures 0 <= i < a.Length
ensures forall k :: 0 <= k < a.Length ==> a[k] <= a[i]
{
i := 0;
var max := a[i];
var j := 1;
while j < a.Length
invariant 0 < j <= a.Length
invariant i < j
invariant max == a[i]
invariant forall k :: 0 <= k < j ==> a[k] <= max
{
if max < a[j] { max := a[j]; i := j; }
j := j+1;
}
}
predicate sorted(a: array<int>)
reads a
{
forall j, k :: 0 <= j < k < a.Length ==> a[j] < a[k]
}
predicate sorted'(a: array?<int>) // Change the type
reads a
{
forall j, k :: a != null && 0 <= j < k < a.Length ==> a[j] <= a[k]
}
| // shenanigans going through the dafny tutorial
method MultipleReturns(x: int, y: int) returns (more: int, less: int)
requires 0 < y
ensures less < x < more
{
more := x + y;
less := x - y;
}
method Max(a: int, b: int) returns (c: int)
ensures a <= c && b <= c
ensures a == c || b == c
{
if a > b {
c := a;
} else { c := b; }
}
method Testing() {
var x := Max(3,15);
}
function max(a: int, b: int): int
{
if a > b then a else b
}
method Testing'() {
}
function abs(x: int): int
{
if x < 0 then -x else x
}
method Abs(x: int) returns (y: int)
ensures y == abs(x)
{
return abs(x);
}
method m(n: nat)
{
var i := 0;
while i != n
{
i := i + 1;
}
}
function fib(n: nat): nat
{
if n == 0 then 0
else if n == 1 then 1
else fib(n - 1) + fib(n - 2)
}
method Find(a: array<int>, key: int) returns (index: int)
ensures 0 <= index ==> index < a.Length && a[index] == key
ensures index < 0 ==> forall k :: 0 <= k < a.Length ==> a[k] != key
{
var i := 0;
while i < a.Length
{
if a[i] == key {return i;}
i := i+1;
}
return -1;
}
method FindMax(a: array<int>) returns (i: int)
requires a.Length >= 1
ensures 0 <= i < a.Length
ensures forall k :: 0 <= k < a.Length ==> a[k] <= a[i]
{
i := 0;
var max := a[i];
var j := 1;
while j < a.Length
{
if max < a[j] { max := a[j]; i := j; }
j := j+1;
}
}
predicate sorted(a: array<int>)
reads a
{
forall j, k :: 0 <= j < k < a.Length ==> a[j] < a[k]
}
predicate sorted'(a: array?<int>) // Change the type
reads a
{
forall j, k :: a != null && 0 <= j < k < a.Length ==> a[j] <= a[k]
}
|
393 | circular-queue-implemetation_tmp_tmpnulfdc9l_Queue.dfy | class {:autocontracts} Queue {
// Atributos
var circularQueue: array<int>;
var rear: nat; // cauda
var front: nat; // head
var counter: nat;
// Abstração
ghost var Content: seq<int>;
// Predicado
ghost predicate Valid()
{
0 <= counter <= circularQueue.Length &&
0 <= front &&
0 <= rear &&
Content == circularQueue[..]
}
// Construtor
constructor()
ensures circularQueue.Length == 0
ensures front == 0 && rear == 0
ensures Content == [] // REVISAR
ensures counter == 0
{
circularQueue := new int[0];
rear := 0;
front := 0;
Content := [];
counter := 0;
} //[tam] ; [1, 2, 3, 4]
method insert(item: int)
// requires rear <= circularQueue.Length
// ensures (front == 0 && rear == 0 && circularQueue.Length == 1) ==>
// (
// Content == [item] &&
// |Content| == 1
// )
// ensures circularQueue.Length != 0 ==>
// (
// (front == 0 && rear == 0 && circularQueue.Length == 1) ==>
// (
// Content == old(Content) &&
// |Content| == old(|Content|)
// )
// ||
// (front == 0 && rear == circularQueue.Length-1 ) ==>
// (
// Content == old(Content) + [item] &&
// |Content| == old(|Content|) + 1
// )
// ||
// (rear + 1 != front && rear != circularQueue.Length-1 && rear + 1 < circularQueue.Length - 1) ==>
// (
// Content == old(Content[0..rear]) + [item] + old(Content[rear..circularQueue.Length])
// )
// ||
// (rear + 1 == front) ==>
// (
// Content[0..rear + 1] == old(Content[0..rear]) + [item] &&
// forall i :: rear + 2 <= i <= circularQueue.Length ==> Content[i] == old(Content[i-1])
// )
// )
{
//counter := counter + 1;
// if front == 0 && rear == 0 && circularQueue.Length == 0
// {
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// queueInsert[0] := item;
// circularQueue := queueInsert;
// Content := [item];
// rear := rear + 1;
// }
// else if front == 0 && rear == circularQueue.Length-1 && circularQueue.Length > 0
// {
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// var i: nat := 0;
// while i < circularQueue.Length
// invariant circularQueue.Length + 1 == queueInsert.Length
// {
// queueInsert[i] := circularQueue[i];
// i := i + 1;
// }
// queueInsert[queueInsert.Length - 1] := item;
// Content := Content + [item];
// rear := rear + 1;
// circularQueue := queueInsert;
// }
}
method auxInsertEmptyQueue(item:int)
requires front == 0 && rear == 0 && circularQueue.Length == 0
ensures circularQueue.Length == 1
ensures Content == [item]
ensures |Content| == 1
ensures rear == 1
ensures counter == old(counter) + 1
ensures front == 0
{
counter := counter + 1;
var queueInsert: array<int>;
queueInsert := new int [circularQueue.Length + 1];
queueInsert[0] := item;
circularQueue := queueInsert;
Content := [item];
rear := rear + 1;
}
method auxInsertEndQueue(item:int)
requires front == 0 && rear == circularQueue.Length && circularQueue.Length >= 1
ensures Content == old(Content) + [item]
ensures |Content| == old(|Content|) + 1
ensures front == 0
ensures rear == old(rear) + 1
ensures counter == old(counter) + 1
// {
// counter := counter + 1;
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// var i: nat := 0;
// while i < circularQueue.Length
// invariant circularQueue.Length + 1 == queueInsert.Length
// invariant 0 <= i <= circularQueue.Length
// invariant forall j :: 0 <= j < i ==> queueInsert[j] == circularQueue[j]
// {
// queueInsert[i] := circularQueue[i];
// i := i + 1;
// }
// queueInsert[queueInsert.Length - 1] := item;
// Content := Content + [item];
// rear := rear + 1;
// circularQueue := queueInsert;
// }
method auxInsertSpaceQueue(item:int)
requires rear < front && front < circularQueue.Length
ensures rear == old(rear) + 1
ensures counter == old(counter) + 1
ensures Content == old(Content[0..rear]) + [item] + old(Content[rear+1..circularQueue.Length])
ensures |Content| == old(|Content|) + 1
method auxInsertInitQueue(item:int)
method auxInsertBetweenQueue(item:int)
// remove apenas mudando o ponteiro
// sem resetar o valor na posição, pois, provavelmente,
// vai ser sobrescrito pela inserção
method remove() returns (item: int)
requires front < circularQueue.Length
requires circularQueue.Length > 0
ensures rear <= |old(Content)|
ensures circularQueue.Length > 0
ensures item == old(Content)[old(front)]
ensures front == (old(front) + 1) % circularQueue.Length
ensures old(front) < rear ==> Content == old(Content)[old(front)..rear]
ensures old(front) > rear ==> Content == old(Content)[0 .. rear] + old(Content)[old(front)..|old(Content)|]
/*{
if counter == 0 {
item := -1;
} else {
item := circularQueue[front];
front := (front + 1) % circularQueue.Length;
counter := counter - 1;
}
}*/
method size() returns (size:nat)
ensures size == counter
{
size := counter;
}
method isEmpty() returns (isEmpty: bool)
ensures isEmpty == true ==> counter == 0;
ensures isEmpty == false ==> counter != 0;
{
isEmpty := if counter == 0 then true else false;
}
method contains(item: int) returns (contains: bool)
ensures contains == true ==> item in circularQueue[..]
ensures contains == false ==> item !in circularQueue[..]
{
var i: nat := 0;
contains := false;
while (i < circularQueue.Length)
decreases circularQueue.Length - i
invariant 0 <= i <= circularQueue.Length
invariant !contains ==> forall j :: 0 <= j < i ==> circularQueue[j] != item
{
if (circularQueue[i] == item) {
contains := true;
break;
}
i := i + 1;
}
}
// TODO
method mergeQueues(otherQueue: Queue) returns (mergedQueue: Queue)
{
// queue1.merge(queue2)
var newQueueSize : int := otherQueue.circularQueue.Length + circularQueue.Length;
var newFront: int := front;
var newRear: int := otherQueue.rear;
var tmp: array<int> := new int[newQueueSize];
forall i | 0 <= i < circularQueue.Length
{
tmp[i] := circularQueue[i];
}
// vamos copiar valores vazios?
// como identificamos os vazios? entre o rear e o front
// como iteramos entre rear e front? front -> rear
// [1, 3, 5, 7, 9] + [0, 2, 4, 6, 8] => [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]
// front => 8
// rear => 0
mergedQueue := new Queue();
}
}
method Main ()
{
var circularQueue := new Queue();
assert circularQueue.circularQueue.Length == 0;
assert circularQueue.Content == [];
assert circularQueue.Content != [1];
var isQueueEmpty := circularQueue.isEmpty();
assert isQueueEmpty == true;
var queueSize := circularQueue.size();
assert queueSize == 0;
circularQueue.auxInsertEmptyQueue(2);
assert circularQueue.Content == [2];
assert circularQueue.counter == 1;
assert circularQueue.circularQueue.Length == 1;
assert circularQueue.front == 0;
assert circularQueue.rear == 1;
assert circularQueue.rear != 2;
assert circularQueue.front != 2;
circularQueue.auxInsertEndQueue(4);
assert circularQueue.Content == [2,4];
assert circularQueue.counter == 2;
assert circularQueue.front == 0;
assert circularQueue.rear == 2;
circularQueue.auxInsertEndQueue(4);
assert circularQueue.Content == [2,4,4];
assert circularQueue.counter == 3;
circularQueue.auxInsertEndQueue(56);
assert circularQueue.Content == [2,4,4,56];
assert circularQueue.counter == 4;
var contains56 := circularQueue.contains(56);
assert contains56 == true;
var contains4 := circularQueue.contains(4);
assert contains4 == true;
var item := circularQueue.remove();
assert item == 2;
//assert circularQueue.Content == [2, 4, 4, 56];
assert (0 + 1) % 6 == 1;
assert (1 + 1) % 6 == 2;
assert (2 + 1) % 6 == 3;
assert (3 + 1) % 6 == 4;
assert (4 + 1) % 6 == 5;
assert (5 + 1) % 6 == 0;
assert (0 + 1) % 6 == 1;
}
| class {:autocontracts} Queue {
// Atributos
var circularQueue: array<int>;
var rear: nat; // cauda
var front: nat; // head
var counter: nat;
// Abstração
ghost var Content: seq<int>;
// Predicado
ghost predicate Valid()
{
0 <= counter <= circularQueue.Length &&
0 <= front &&
0 <= rear &&
Content == circularQueue[..]
}
// Construtor
constructor()
ensures circularQueue.Length == 0
ensures front == 0 && rear == 0
ensures Content == [] // REVISAR
ensures counter == 0
{
circularQueue := new int[0];
rear := 0;
front := 0;
Content := [];
counter := 0;
} //[tam] ; [1, 2, 3, 4]
method insert(item: int)
// requires rear <= circularQueue.Length
// ensures (front == 0 && rear == 0 && circularQueue.Length == 1) ==>
// (
// Content == [item] &&
// |Content| == 1
// )
// ensures circularQueue.Length != 0 ==>
// (
// (front == 0 && rear == 0 && circularQueue.Length == 1) ==>
// (
// Content == old(Content) &&
// |Content| == old(|Content|)
// )
// ||
// (front == 0 && rear == circularQueue.Length-1 ) ==>
// (
// Content == old(Content) + [item] &&
// |Content| == old(|Content|) + 1
// )
// ||
// (rear + 1 != front && rear != circularQueue.Length-1 && rear + 1 < circularQueue.Length - 1) ==>
// (
// Content == old(Content[0..rear]) + [item] + old(Content[rear..circularQueue.Length])
// )
// ||
// (rear + 1 == front) ==>
// (
// Content[0..rear + 1] == old(Content[0..rear]) + [item] &&
// forall i :: rear + 2 <= i <= circularQueue.Length ==> Content[i] == old(Content[i-1])
// )
// )
{
//counter := counter + 1;
// if front == 0 && rear == 0 && circularQueue.Length == 0
// {
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// queueInsert[0] := item;
// circularQueue := queueInsert;
// Content := [item];
// rear := rear + 1;
// }
// else if front == 0 && rear == circularQueue.Length-1 && circularQueue.Length > 0
// {
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// var i: nat := 0;
// while i < circularQueue.Length
// invariant circularQueue.Length + 1 == queueInsert.Length
// {
// queueInsert[i] := circularQueue[i];
// i := i + 1;
// }
// queueInsert[queueInsert.Length - 1] := item;
// Content := Content + [item];
// rear := rear + 1;
// circularQueue := queueInsert;
// }
}
method auxInsertEmptyQueue(item:int)
requires front == 0 && rear == 0 && circularQueue.Length == 0
ensures circularQueue.Length == 1
ensures Content == [item]
ensures |Content| == 1
ensures rear == 1
ensures counter == old(counter) + 1
ensures front == 0
{
counter := counter + 1;
var queueInsert: array<int>;
queueInsert := new int [circularQueue.Length + 1];
queueInsert[0] := item;
circularQueue := queueInsert;
Content := [item];
rear := rear + 1;
}
method auxInsertEndQueue(item:int)
requires front == 0 && rear == circularQueue.Length && circularQueue.Length >= 1
ensures Content == old(Content) + [item]
ensures |Content| == old(|Content|) + 1
ensures front == 0
ensures rear == old(rear) + 1
ensures counter == old(counter) + 1
// {
// counter := counter + 1;
// var queueInsert: array<int>;
// queueInsert := new int [circularQueue.Length + 1];
// var i: nat := 0;
// while i < circularQueue.Length
// invariant circularQueue.Length + 1 == queueInsert.Length
// invariant 0 <= i <= circularQueue.Length
// invariant forall j :: 0 <= j < i ==> queueInsert[j] == circularQueue[j]
// {
// queueInsert[i] := circularQueue[i];
// i := i + 1;
// }
// queueInsert[queueInsert.Length - 1] := item;
// Content := Content + [item];
// rear := rear + 1;
// circularQueue := queueInsert;
// }
method auxInsertSpaceQueue(item:int)
requires rear < front && front < circularQueue.Length
ensures rear == old(rear) + 1
ensures counter == old(counter) + 1
ensures Content == old(Content[0..rear]) + [item] + old(Content[rear+1..circularQueue.Length])
ensures |Content| == old(|Content|) + 1
method auxInsertInitQueue(item:int)
method auxInsertBetweenQueue(item:int)
// remove apenas mudando o ponteiro
// sem resetar o valor na posição, pois, provavelmente,
// vai ser sobrescrito pela inserção
method remove() returns (item: int)
requires front < circularQueue.Length
requires circularQueue.Length > 0
ensures rear <= |old(Content)|
ensures circularQueue.Length > 0
ensures item == old(Content)[old(front)]
ensures front == (old(front) + 1) % circularQueue.Length
ensures old(front) < rear ==> Content == old(Content)[old(front)..rear]
ensures old(front) > rear ==> Content == old(Content)[0 .. rear] + old(Content)[old(front)..|old(Content)|]
/*{
if counter == 0 {
item := -1;
} else {
item := circularQueue[front];
front := (front + 1) % circularQueue.Length;
counter := counter - 1;
}
}*/
method size() returns (size:nat)
ensures size == counter
{
size := counter;
}
method isEmpty() returns (isEmpty: bool)
ensures isEmpty == true ==> counter == 0;
ensures isEmpty == false ==> counter != 0;
{
isEmpty := if counter == 0 then true else false;
}
method contains(item: int) returns (contains: bool)
ensures contains == true ==> item in circularQueue[..]
ensures contains == false ==> item !in circularQueue[..]
{
var i: nat := 0;
contains := false;
while (i < circularQueue.Length)
{
if (circularQueue[i] == item) {
contains := true;
break;
}
i := i + 1;
}
}
// TODO
method mergeQueues(otherQueue: Queue) returns (mergedQueue: Queue)
{
// queue1.merge(queue2)
var newQueueSize : int := otherQueue.circularQueue.Length + circularQueue.Length;
var newFront: int := front;
var newRear: int := otherQueue.rear;
var tmp: array<int> := new int[newQueueSize];
forall i | 0 <= i < circularQueue.Length
{
tmp[i] := circularQueue[i];
}
// vamos copiar valores vazios?
// como identificamos os vazios? entre o rear e o front
// como iteramos entre rear e front? front -> rear
// [1, 3, 5, 7, 9] + [0, 2, 4, 6, 8] => [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]
// front => 8
// rear => 0
mergedQueue := new Queue();
}
}
method Main ()
{
var circularQueue := new Queue();
var isQueueEmpty := circularQueue.isEmpty();
var queueSize := circularQueue.size();
circularQueue.auxInsertEmptyQueue(2);
circularQueue.auxInsertEndQueue(4);
circularQueue.auxInsertEndQueue(4);
circularQueue.auxInsertEndQueue(56);
var contains56 := circularQueue.contains(56);
var contains4 := circularQueue.contains(4);
var item := circularQueue.remove();
//assert circularQueue.Content == [2, 4, 4, 56];
}
|
394 | cmsc433_tmp_tmpe3ob3a0o_dafny_project1_p1-assignment-2.dfy | // ASSIGNMENT P1
// CMSC 433 FALL 2023
// PERFECT SCORE: 100 POINTS
//
// This assignment contains nine questions, each of which involves writing Dafny
// code. You should include your solutions in a single Dafny file and submit it using
// Gradescope.
//
// Revision history
//
// 2023-09-22 2:50 pm Fixed typo in Problem 3.
// Question 1 (5 points)
//
// Fill in a requires clause that enables Dafny to verify
// method PlusOne
method PlusOne (x : int) returns (y : int)
requires x >= 0
ensures y > 0
{
y := x+1;
}
// Question 2 (5 points)
//
// Fill in requires clause(s) that enable(s) Dafny to verify the array bounds
// in method Swap (which swaps elements i and j in array a).
method Swap (a : array?<int>, i : int, j : int)
requires a != null && 0 <= i < a.Length && 0 <= j < a.Length// TODO
modifies a // Dafny requires listing of objects modified in a method
{
var tmp : int := a[i];
a[i] := a[j];
a[j] := a[i];
}
// Question 3 (5 points)
//
// Give ensures clause(s) asserting that d is the result, and r the
// remainder, of dividing m by n. Your clauses cannot use "/" or "%" (which are
// the Dafny division and mod operators, respectively). By definition, the
// remainder must be non-negative.
method IntDiv (m : int, n : int) returns (d : int, r : int)
requires n > 0
ensures m == n * d + r && 0 <= r < n // TODO
{
return m / n, m % n;
}
// Question 4 (5 points)
//
// Give ensures clause(s) asserting that the return value has the same
// length as array a and contains as its elements the sum of the
// corresponding elements in arrays a and b.
method ArraySum (a : array<int>, b : array<int>) returns (c : array<int>)
requires a.Length == b.Length
ensures c.Length == a.Length &&
forall i : int :: 0 <= i < c.Length ==> c[i] == a[i] + b[i] // TODO
{
c := new int [a.Length]; // Creates new array of size a.Length
var i : int := 0;
while (i < a.Length)
invariant i <= a.Length
invariant forall j : int :: 0 <= j < i ==> c[j] == a[j] + b[j]
{
c[i] := a[i] + b[i];
i := i + 1;
}
}
// Question 5 (10 points)
// Euclid's algorithm is used to compute the greatest common divisor of two
// positive integers. If m and n are two such integers, then gcd(m,n) is the
// largest positve integer that evenly divides both m and n, where j evenly divides i
// if and only if i % j == 0 (% is the Dafny mod operator). Write requires and
// ensures clauses for the method header Euclid below. Your requires clauses
// should also specify that the first argument is at least as large as the second.
// You do *not* need to implement the method!
method Euclid (m : int, n : int) returns (gcd : int)
requires m > 1 && n > 1 && m >= n // TODO
ensures gcd > 0 && gcd <= n && gcd <= m && m % gcd == 0 && n % gcd == 0 // TODO
// YOU DO NOT NEED TO IMPLEMENT Euclid!!
// Question 6 (10 points)
//
// Give invariant(s) that enable(s) Dafny to verify the following program, which
// returns true if and only if array a is sorted.
method IsSorted (a : array<int>) returns (isSorted : bool)
ensures isSorted <==> forall j : int :: 1 <= j < a.Length ==> a[j-1] <= a[j]
{
isSorted := true;
var i : int := 1;
if (a.Length < 2)
{
return;
}
else
{
while (i < a.Length)
invariant 1 <= i <= a.Length
invariant isSorted <==> forall j: int :: 1 <= j < i ==> a[j-1] <= a[j] // TODO
{
if a[i-1] > a[i]
{
return false;
}
i := i+1;
}
}
}
// Question 7 (20 points)
//
// Implement, and have Dafny verify, the method IsPrime below, which returns true
// if and only if the given positive integer is prime.
method IsPrime (m : int) returns (isPrime : bool)
requires m > 0 // m must be greater than 0
ensures isPrime <==> (m > 1 && forall j : int :: 2 <= j < m ==> m % j != 0)
// ensures states that "isPrime is true iff m > 1 && not divisible by [2, m-1)"
{
isPrime := true; // asume is prime initially
if m <= 1 {
isPrime := false;
} else {
var i : int := 2;
while (i < m)
invariant isPrime <==> forall j : int :: 2 <= j < i ==> m % j != 0
// invariant specifies that isPrime is true iff at each j from 2 to i-1, not j | m
{
if (m % i == 0)
{
isPrime := false;
break;
}
i := i + 1;
}
}
}
// Question 8 (20 points)
//
// Implement, and have Dafny verify, the method Reverse below, which returns a new array
// aRev consisting of the elements of a, but in reverse order. To create a new
// array of ints use the Dafny command "new int[...]", where "..." is the number
// of elements in the array.
method Reverse (a : array<int>) returns (aRev : array<int>)
ensures aRev.Length == a.Length
ensures forall i : int :: 0 <= i < a.Length ==> a[i] == aRev[aRev.Length-i-1]
ensures fresh(aRev) // Indicates returned object is newly created in method body
{
aRev := new int[a.Length];
var i : int := 0;
while (i < a.Length)
invariant 0 <= i <= a.Length
invariant forall j : int :: 0 <= j < i ==> aRev[j] == a[a.Length-j-1]
{
aRev[i] := a[a.Length-i-1];
i := i + 1;
}
}
// Question 9 (20 points)
//
// Implement and verify method NoDups, which returns true if and only if there
// are no duplicate elements in array a. Note that the requires clause allows
// you to assume that a is sorted, and that this precondition is necessary for
// the ensures clause to imply a lack of duplicates.
method NoDups (a : array<int>) returns (noDups : bool)
requires forall j : int :: 0 < j < a.Length ==> a[j-1] <= a[j] // a sorted
ensures noDups <==> forall j : int :: 1 <= j < a.Length ==> a[j-1] != a[j]
{
noDups := true;
var i : int := 1;
if (a.Length < 2)
{
return;
}
while (i < a.Length)
invariant 1 <= i <= a.Length
invariant noDups <==> forall j : int :: 1 <= j < i ==> a[j-1] != a[j]
{
if (a[i-1] == a[i])
{
noDups := false;
break;
}
i := i + 1;
}
}
| // ASSIGNMENT P1
// CMSC 433 FALL 2023
// PERFECT SCORE: 100 POINTS
//
// This assignment contains nine questions, each of which involves writing Dafny
// code. You should include your solutions in a single Dafny file and submit it using
// Gradescope.
//
// Revision history
//
// 2023-09-22 2:50 pm Fixed typo in Problem 3.
// Question 1 (5 points)
//
// Fill in a requires clause that enables Dafny to verify
// method PlusOne
method PlusOne (x : int) returns (y : int)
requires x >= 0
ensures y > 0
{
y := x+1;
}
// Question 2 (5 points)
//
// Fill in requires clause(s) that enable(s) Dafny to verify the array bounds
// in method Swap (which swaps elements i and j in array a).
method Swap (a : array?<int>, i : int, j : int)
requires a != null && 0 <= i < a.Length && 0 <= j < a.Length// TODO
modifies a // Dafny requires listing of objects modified in a method
{
var tmp : int := a[i];
a[i] := a[j];
a[j] := a[i];
}
// Question 3 (5 points)
//
// Give ensures clause(s) asserting that d is the result, and r the
// remainder, of dividing m by n. Your clauses cannot use "/" or "%" (which are
// the Dafny division and mod operators, respectively). By definition, the
// remainder must be non-negative.
method IntDiv (m : int, n : int) returns (d : int, r : int)
requires n > 0
ensures m == n * d + r && 0 <= r < n // TODO
{
return m / n, m % n;
}
// Question 4 (5 points)
//
// Give ensures clause(s) asserting that the return value has the same
// length as array a and contains as its elements the sum of the
// corresponding elements in arrays a and b.
method ArraySum (a : array<int>, b : array<int>) returns (c : array<int>)
requires a.Length == b.Length
ensures c.Length == a.Length &&
forall i : int :: 0 <= i < c.Length ==> c[i] == a[i] + b[i] // TODO
{
c := new int [a.Length]; // Creates new array of size a.Length
var i : int := 0;
while (i < a.Length)
{
c[i] := a[i] + b[i];
i := i + 1;
}
}
// Question 5 (10 points)
// Euclid's algorithm is used to compute the greatest common divisor of two
// positive integers. If m and n are two such integers, then gcd(m,n) is the
// largest positve integer that evenly divides both m and n, where j evenly divides i
// if and only if i % j == 0 (% is the Dafny mod operator). Write requires and
// ensures clauses for the method header Euclid below. Your requires clauses
// should also specify that the first argument is at least as large as the second.
// You do *not* need to implement the method!
method Euclid (m : int, n : int) returns (gcd : int)
requires m > 1 && n > 1 && m >= n // TODO
ensures gcd > 0 && gcd <= n && gcd <= m && m % gcd == 0 && n % gcd == 0 // TODO
// YOU DO NOT NEED TO IMPLEMENT Euclid!!
// Question 6 (10 points)
//
// Give invariant(s) that enable(s) Dafny to verify the following program, which
// returns true if and only if array a is sorted.
method IsSorted (a : array<int>) returns (isSorted : bool)
ensures isSorted <==> forall j : int :: 1 <= j < a.Length ==> a[j-1] <= a[j]
{
isSorted := true;
var i : int := 1;
if (a.Length < 2)
{
return;
}
else
{
while (i < a.Length)
{
if a[i-1] > a[i]
{
return false;
}
i := i+1;
}
}
}
// Question 7 (20 points)
//
// Implement, and have Dafny verify, the method IsPrime below, which returns true
// if and only if the given positive integer is prime.
method IsPrime (m : int) returns (isPrime : bool)
requires m > 0 // m must be greater than 0
ensures isPrime <==> (m > 1 && forall j : int :: 2 <= j < m ==> m % j != 0)
// ensures states that "isPrime is true iff m > 1 && not divisible by [2, m-1)"
{
isPrime := true; // asume is prime initially
if m <= 1 {
isPrime := false;
} else {
var i : int := 2;
while (i < m)
// invariant specifies that isPrime is true iff at each j from 2 to i-1, not j | m
{
if (m % i == 0)
{
isPrime := false;
break;
}
i := i + 1;
}
}
}
// Question 8 (20 points)
//
// Implement, and have Dafny verify, the method Reverse below, which returns a new array
// aRev consisting of the elements of a, but in reverse order. To create a new
// array of ints use the Dafny command "new int[...]", where "..." is the number
// of elements in the array.
method Reverse (a : array<int>) returns (aRev : array<int>)
ensures aRev.Length == a.Length
ensures forall i : int :: 0 <= i < a.Length ==> a[i] == aRev[aRev.Length-i-1]
ensures fresh(aRev) // Indicates returned object is newly created in method body
{
aRev := new int[a.Length];
var i : int := 0;
while (i < a.Length)
{
aRev[i] := a[a.Length-i-1];
i := i + 1;
}
}
// Question 9 (20 points)
//
// Implement and verify method NoDups, which returns true if and only if there
// are no duplicate elements in array a. Note that the requires clause allows
// you to assume that a is sorted, and that this precondition is necessary for
// the ensures clause to imply a lack of duplicates.
method NoDups (a : array<int>) returns (noDups : bool)
requires forall j : int :: 0 < j < a.Length ==> a[j-1] <= a[j] // a sorted
ensures noDups <==> forall j : int :: 1 <= j < a.Length ==> a[j-1] != a[j]
{
noDups := true;
var i : int := 1;
if (a.Length < 2)
{
return;
}
while (i < a.Length)
{
if (a[i-1] == a[i])
{
noDups := false;
break;
}
i := i + 1;
}
}
|
395 | cs245-verification_tmp_tmp0h_nxhqp_A8_Q1.dfy | // A8Q1 — Steph Renee McIntyre
// Following the solutions from Carmen Bruni
// There is no definition for power, so this function will be used for validating that our imperative program is correct. This is just for Dafny.
function power(a: int, n: int): int //function for a to the power of n
requires 0 <= n;
decreases n;{if (n == 0) then 1 else a * power(a, n - 1)}
method A8Q1(y0: int, x: int) returns (z: int)
requires y0 >= 0;
/*Post-Condition*/ ensures z==power(x,y0);
{var y := y0; //This is here for Dafny's sake and immutable inputs...
/* (| y=y0 ^ y>=0 |) - Pre-Condition */
/* (| 1=power(x,y0-y) ^ y>=0 |) - implied (a) */
z := 1;
/* (| z=power(x,y0-y) ^ y>=0 |) - assignment */
while (y>0)
invariant z==power(x,y0-y) && y>=0;
decreases y; /* variant */
{
/* (| z=power(x,y0-y) ^ y>=0 ^ y>0 |) - partial-while */
/* (| z*x=power(x,y0-(y-1)) ^ (y-1)>=0 |) - implied (b) */
z := z*x;
/* (| z=power(x,y0-(y-1)) ^ (y-1)>=0 |) - assignment */
y := y-1;
/* (| z=power(x,y0-y) ^ y>=0 |) - assignment */
}
/* (| z=power(x,y0-y) ^ y>=0 ^ -(y>0) |) - partial-while */
/* (| z=power(x,y0-y) |) - implied (c) */
}
/* Proof of implieds can be seen on LEARN.
Note: If you are unconvinced, putting asserts for each condition will demonstrate the correctness of the statements.
*/
| // A8Q1 — Steph Renee McIntyre
// Following the solutions from Carmen Bruni
// There is no definition for power, so this function will be used for validating that our imperative program is correct. This is just for Dafny.
function power(a: int, n: int): int //function for a to the power of n
requires 0 <= n;
method A8Q1(y0: int, x: int) returns (z: int)
requires y0 >= 0;
/*Post-Condition*/ ensures z==power(x,y0);
{var y := y0; //This is here for Dafny's sake and immutable inputs...
/* (| y=y0 ^ y>=0 |) - Pre-Condition */
/* (| 1=power(x,y0-y) ^ y>=0 |) - implied (a) */
z := 1;
/* (| z=power(x,y0-y) ^ y>=0 |) - assignment */
while (y>0)
{
/* (| z=power(x,y0-y) ^ y>=0 ^ y>0 |) - partial-while */
/* (| z*x=power(x,y0-(y-1)) ^ (y-1)>=0 |) - implied (b) */
z := z*x;
/* (| z=power(x,y0-(y-1)) ^ (y-1)>=0 |) - assignment */
y := y-1;
/* (| z=power(x,y0-y) ^ y>=0 |) - assignment */
}
/* (| z=power(x,y0-y) ^ y>=0 ^ -(y>0) |) - partial-while */
/* (| z=power(x,y0-y) |) - implied (c) */
}
/* Proof of implieds can be seen on LEARN.
Note: If you are unconvinced, putting asserts for each condition will demonstrate the correctness of the statements.
*/
|
396 | cs245-verification_tmp_tmp0h_nxhqp_A8_Q2.dfy | // A8Q2 — Steph Renee McIntyre
// Following the solutions from Carmen Bruni
method A8Q1(x: int, y: int, z: int) returns (m: int)
/*Pre-Condition*/ requires true;
/*Post-Condition*/ ensures m<=x && m<=y && m<=z;
{
/* (| true |) - Pre-Condition */
if(z<y){
/* (| z<y |) - if-then-else */
if(z<x){
/* (| z<y ^ z<=x |) - if-then-else */
/* (| z<=x ^ z<=y ^ z<=z |) - implied (a) */
m := z;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}else{
/* (| z<y ^ -(z<=x) |) - if-then-else */
/* (| x<=x ^ x<=y ^ x<=z |) - implied (b) */
m := x;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}
}else{
/* (| -(z<y) |) - if-then-else */
/* (| y<=y ^ y<=z |) - implied (c) */
m := y;
/* (| m<=y ^ y<=z |) - assignment */
if (x<y){
/* (| m<=y ^ y<=z ^ x<y |) - if-then */
/* (| x<=x ^ x<=y ^ x<=z |) - implied (d) */
m := x;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}
/* (| m<=x ^ m<=y ^ m<=z |) - if-then: implied (e) */
}
/* (| m<=x ^ m<=y ^ m<=z |) - if-then-else */
}
/* Proof of implieds can be seen on LEARN.
Note: If you are unconvinced, putting asserts for each condition will demonstrate the correctness of the statements.
*/
| // A8Q2 — Steph Renee McIntyre
// Following the solutions from Carmen Bruni
method A8Q1(x: int, y: int, z: int) returns (m: int)
/*Pre-Condition*/ requires true;
/*Post-Condition*/ ensures m<=x && m<=y && m<=z;
{
/* (| true |) - Pre-Condition */
if(z<y){
/* (| z<y |) - if-then-else */
if(z<x){
/* (| z<y ^ z<=x |) - if-then-else */
/* (| z<=x ^ z<=y ^ z<=z |) - implied (a) */
m := z;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}else{
/* (| z<y ^ -(z<=x) |) - if-then-else */
/* (| x<=x ^ x<=y ^ x<=z |) - implied (b) */
m := x;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}
}else{
/* (| -(z<y) |) - if-then-else */
/* (| y<=y ^ y<=z |) - implied (c) */
m := y;
/* (| m<=y ^ y<=z |) - assignment */
if (x<y){
/* (| m<=y ^ y<=z ^ x<y |) - if-then */
/* (| x<=x ^ x<=y ^ x<=z |) - implied (d) */
m := x;
/* (| m<=x ^ m<=y ^ m<=z |) - assignment */
}
/* (| m<=x ^ m<=y ^ m<=z |) - if-then: implied (e) */
}
/* (| m<=x ^ m<=y ^ m<=z |) - if-then-else */
}
/* Proof of implieds can be seen on LEARN.
Note: If you are unconvinced, putting asserts for each condition will demonstrate the correctness of the statements.
*/
|
397 | cs245-verification_tmp_tmp0h_nxhqp_Assignments_simple.dfy | //Simple Assignment Example -- Steph Renee McIntyre
//Based on the code used in the course overheads for Fall 2018
method simple(y: int) returns (x: int)
requires y==6;
ensures x==7;
{
/* (| y = 6 |) //This is from the requires statement.*/
/* (| y + 1 = 7 |) - implied (a) */ assert y+1 == 7;
x := y + 1;
/* (| x = 7 |) - assignment //This is from the ensures statement. */
}
/* Proof of implied (a):
Let y = 6.
Then add 1 to both sides of the equality: y + 1 = 6 + 1
Then y + 1 = 7.
*/
| //Simple Assignment Example -- Steph Renee McIntyre
//Based on the code used in the course overheads for Fall 2018
method simple(y: int) returns (x: int)
requires y==6;
ensures x==7;
{
/* (| y = 6 |) //This is from the requires statement.*/
/* (| y + 1 = 7 |) - implied (a) */ assert y+1 == 7;
x := y + 1;
/* (| x = 7 |) - assignment //This is from the ensures statement. */
}
/* Proof of implied (a):
Let y = 6.
Then add 1 to both sides of the equality: y + 1 = 6 + 1
Then y + 1 = 7.
*/
|
398 | cs245-verification_tmp_tmp0h_nxhqp_SortingIssues_BubbleSortCode.dfy | // Sorting:
// Pre/Post Condition Issues - An investigation
// -- Stephanie McIntyre
// Based on examples in class
// The following is just plain old bubble sort.
//
// Can you find the invariants for the while loops?
// Can you annotate this?
// What about the pre/post-conditions?
method BubbleSort(A: array<int>, n: int)
modifies A;
requires A.Length>=0 && n==A.Length;
{
var i := 0;
var j := 0;
while(i < n-1){
while(j < n-i-1){
if(A[j]<A[i]){
var t := A[j];
A[j] := A[i];
A[i] := t;
}
j := j+1;
}
i := i+1;
}
}
/*Doesn't my title look all bubbly and cute? I'm trying... */
| // Sorting:
// Pre/Post Condition Issues - An investigation
// -- Stephanie McIntyre
// Based on examples in class
// The following is just plain old bubble sort.
//
// Can you find the invariants for the while loops?
// Can you annotate this?
// What about the pre/post-conditions?
method BubbleSort(A: array<int>, n: int)
modifies A;
requires A.Length>=0 && n==A.Length;
{
var i := 0;
var j := 0;
while(i < n-1){
while(j < n-i-1){
if(A[j]<A[i]){
var t := A[j];
A[j] := A[i];
A[i] := t;
}
j := j+1;
}
i := i+1;
}
}
/*Doesn't my title look all bubbly and cute? I'm trying... */
|
399 | cs245-verification_tmp_tmp0h_nxhqp_SortingIssues_FirstAttempt.dfy | // Sorting:
// Pre/Post Condition Issues - An investigation
// -- Stephanie McIntyre
// Based on examples in class
// First Attempt at specifying requirements for sorting array A in incrementing order
// We want our Hoare triple of (|Pre-Condition|) Code (|Post-Condition|) to hold iff A is properly sorted.
method sort(A: array<int>, n: int)
modifies A; requires n==A.Length;
/* Pre-Condition */ requires n>=0;
/* Post-Condition */ ensures forall i,j:: 0<=i<=j<n ==> A[i]<=A[j]; //This states that A is sorted.
//Can we write code that does not sort A that still satisfies the requirements?
//Consider the following program:
{
var k := 0;
while(k<n)
invariant k<=n;
invariant forall i:: 0<=i<k ==> A[i]==i;
{
A[k] := k;
k := k+1;
}
}
| // Sorting:
// Pre/Post Condition Issues - An investigation
// -- Stephanie McIntyre
// Based on examples in class
// First Attempt at specifying requirements for sorting array A in incrementing order
// We want our Hoare triple of (|Pre-Condition|) Code (|Post-Condition|) to hold iff A is properly sorted.
method sort(A: array<int>, n: int)
modifies A; requires n==A.Length;
/* Pre-Condition */ requires n>=0;
/* Post-Condition */ ensures forall i,j:: 0<=i<=j<n ==> A[i]<=A[j]; //This states that A is sorted.
//Can we write code that does not sort A that still satisfies the requirements?
//Consider the following program:
{
var k := 0;
while(k<n)
{
A[k] := k;
k := k+1;
}
}
|