(**************************************************************************)
(*       ___                                                              *)
(*      ||M||                                                             *)
(*      ||A||       A project by Andrea Asperti                           *)
(*      ||T||                                                             *)
(*      ||I||       Developers:                                           *)
(*      ||T||         The HELM team.                                      *)
(*      ||A||         http://helm.cs.unibo.it                             *)
(*      \   /                                                             *)
(*       \ /        This file is distributed under the terms of the       *)
(*        v         GNU General Public License Version 2                  *)
(*                                                                        *)
(**************************************************************************)

(*
Polymorphism and Higher Order


*)

include "basics/bool.ma".

include "basics/types.ma".

(* Most of the types we have seen so far are enumerated types, composed by a 
finite set of alternatives, and records, composed by tuples of heteregoneous 
elements. A more interesting case of type definition is when some of the rules 
defining its elements are recursive, i.e. they allow the formation of more 
elements of the type in terms of the already defined ones. The most typical case 
is provided by the natural numbers, that can be defined as the smallest set 
generated by a constant 0 and a successor function from natural numbers to natural
numbers *)

inductive nat : Type[0] ≝ 
| O :nat
| S: nat →nat.

(* The two terms O and S are called constructors: they define the signature of the
type, whose objects are the elements freely generated by means of them. So, 
examples of natural numbers are O, S O, S (S O), S (S (S O)) and so on. 

The language of Matita allows the definition of well founded recursive functions 
over inductive types; in order to guarantee termination of recursion you are only 
allowed to make recursive calls on structurally smaller arguments than the ones 
you received in input. Most mathematical functions can be naturally defined in this
way. For instance, the sum of two natural numbers can be defined as follows *)

let rec add n m ≝ 
match n with
[ O ⇒ m
| S a ⇒ S (add a m)
].

(*
Elimination
It is worth to observe that the previous algorithm works by recursion over the
first argument. This means that, for instance, (add O x) will reduce to x, as 
expected, but (add x O) is stuck. 
How can we prove that, for a generic x, (add x O) = x? The mathematical tool to do 
it is called induction. The induction principle states that, given a property P(n) 
over natural numbers, if we prove P(0) and prove that, for any m, P(m) implies P(S m), 
than we can conclude P(n) for any n. 

The elim tactic, allow you to apply induction in a very simple way. If your goal is 
P(n), the invocation of
  elim n
will break down your task to prove the two subgoals P(0) and ∀m.P(m) → P(S m).

Let us apply it to our case *)

lemma add_0: ∀a. add a O = a.
#a elim a

(* If you stop the computation here, you will see on the right the two subgoals 
    - add O O = O
    - ∀x. add x 0 = x → add (S x) O = S x
After normalization, both goals are trivial.
*)

normalize // qed.

(* In a similar way, it is convenient to state a lemma about the behaviour of 
add when the second argument is not zero. *)

lemma add_S : ∀a,b. add a (S b) = S (add a b).

(* In the same way as before, we proceed by induction over a. *)

#a #b elim a normalize //
qed. 

(* We are now in the position to prove the commutativity of the sum *)

theorem add_comm : ∀a,b. add a b = add b a.
#a elim a normalize

(* We have two sub goals:
  G1: ∀b. b = add b O
  G2: ∀x.(∀b. add x b = add b x) → ∀b. S (add x b) = add b (S x). 
G1 is just our lemma add_O. For G2, we start introducing x and the induction 
hypothesis IH; then, the goal is proved by rewriting using add_S and IH.
For Matita, the task is trivial and we can simply close the goal with // *)

// qed.

(* COERCIONS *)

inductive bool : Type[0] ≝
| tt : bool
| ff : bool.

definition nat_of_bool ≝ λb. match b with 
[ tt ⇒ S O 
| ff ⇒ O 
].

(* coercion nat_of_bool. ?? *)
 
(* Let us now define the following function: *)

definition twice ≝ λn.add n n. 

(* 
Existential
We are interested to prove that for any natural number n there exists a natural 
number m that is the integer half of n. This will give us the opportunity to 
introduce new connectives and quantifiers and, later on, to make some interesting 
consideration on proofs and computations. *)

theorem ex_half: ∀n.∃m. n = twice m ∨ n = S (twice m).
#n elim n normalize 

(* We proceed by induction on n, that breaks down to the following goals:
  G1: ∃m.O = add O O ∨ O = S (add m m)
  G2: ∀x.(∃m. x = add m m ∨ x = S (add m m))→ ∃m. S x = add m m ∨ S x = S (add m m)
The only way we have to prove an existential goal is by exhibiting the witness, 
that in the case of first goal is O. We do it by apply the term called ex_intro 
instantiated by the witness. Then, it is clear that we must follow the left branch 
of the disjunction. One way to do it is by applying the term or_introl, that is 
the first constructor of the disjunction. However, remembering the names of 
constructors can be annyoing: we can invoke the application of the n-th 
constructor of an inductive type (inferred by the current goal) by typing %n. At 
this point we are left with the subgoal O = add O O, that is closed by 
computation. It is worth to observe that invoking automation at depth /3/ would 
also automatically close G1.
*)
  [@(ex_intro … O) %1 //

(* 
Destructuration
The case of G2 is more complex. We should start introducing x and the 
inductive hypothesis
     IH: ∃m. x = add m m ∨ x = S (add m m) 
At this point we should assume the existence of m enjoying the inductive 
hypothesis. To eliminate the existential from the context we can just use the 
case tactic. This situation where we introduce something into the context and 
immediately eliminate it by case analysis is so frequent that Matita provides a 
convenient shorthand: you can just type a single "*". 
The star symbol should be reminiscent of an explosion: the idea is that you have
a structured hypothesis, and you ask to explode it into its constituents. In the 
case of the existential, it allows to pass from a goal of the shape 
    (∃x.P x) → Q
to a goal of the shape
    ∀x.P x → Q
*)
  |#x *
(* At this point we are left with a new goal with the following shape
  G3: ∀m. x = add m m ∨ x = S (add m m) → ....  
We should introduce m, the hypothesis H: x = add m m ∨ x = S (add m m), and 
then reason by cases on this hypothesis. It is the same situation as before: 
we explode the disjunctive hypothesis into its possible consituents. In the case 
of a disjunction, the * tactic allows to pass from a goal of the form
    A∨B → Q
to two subgoals of the form
    A → Q  and  B → Q
*)
  #m * #eqx
(* In the first subgoal, we are under the assumption that x = add m m. The half 
of (S x) is hence m, and we have to prove the right branch of the disjunction. 
In the second subgoal, we are under the assumption that x = S (add m m). The halh 
of (S x) is hence (S m), and have to follow the left branch of the disjunction.
*)
  [@(ex_intro … m) /2/ | @(ex_intro … (S m)) normalize /2/
  ]
qed. 

(* 
Computing vs. Proving
Instead of proving the existence of a number corresponding to the half of n, 
we could be interested in computing it. The best way to do it is to define this 
division operation together with the remainder, that in our case is just a 
boolean value: tt if the input term is even, and ff if the input term is odd. 
Since we must return a pair, we could use a suitably defined record type, or 
simply a product type nat × bool, defined in the basic library. The product type 
is just a sort of general purpose record, with standard fields fst and snd, called 
projections. 
A pair of values n and m is written (pair … m n) or \langle n,m \rangle - visually 
rendered as 〈n,m〉 

We first write down the function, and then discuss it.*)

let rec div2 n ≝ 
match n with
[ O ⇒ 〈O,ff〉
| S a ⇒ 
   let p ≝ (div2 a) in
   match (snd … p) with
   [ tt ⇒ 〈S (fst … p),ff〉
   | ff ⇒ 〈fst … p, tt〉
   ]
]. 

(* The function is computed by recursion over the input n. If n is 0, then the 
quotient is 0 and the remainder is tt. If n = S a, we start computing the half 
of a, say 〈q,b〉. Then we have two cases according to the possible values of b: 
if b is tt, then we must return 〈q,ff〉, while if b = ff then we must return 
〈S q,tt〉.

It is important to point out the deep, substantial analogy between the algorithm 
for computing div2 and the the proof of ex_half. In particular ex_half returns a 
proof of the kind ∃n.A(n)∨B(n): the really informative content in it is the 
witness n and a boolean indicating which one between the two conditions A(n) and 
B(n) is met. This is precisely the quotient-remainder pair returned by div2.
In both cases we proceed by recurrence (respectively, induction or recursion) over 
the input argument n. In case n = 0, we conclude the proof in ex_half by providing 
the witness O and a proof of A(O); this corresponds to returning the pair 〈O,ff〉 in 
div2. Similarly, in the inductive case n = S a, we must exploit the inductive 
hypothesis for a (i.e. the result of the recursive call), distinguishing two subcases 
according to the the two possibilites A(a) or B(a) (i.e. the two possibile values of 
the remainder for a). The reader is strongly invited to check all remaining details.

Let us now prove that our div2 function has the expected behaviour.
*)

lemma surjective_pairing: ∀A,B.∀p:A×B. p = 〈fst … p,\snd … p〉.
#A #B * // qed.

lemma div2SO: ∀n,q. div2 n = 〈q,ff〉 → div2 (S n) = 〈q,tt〉.
#n #q #H normalize >H normalize // qed.

lemma div2S1: ∀n,q. div2 n = 〈q,tt〉 → div2 (S n) = 〈S q,ff〉.
#n #q #H normalize >H normalize // qed.

lemma div2_ok: ∀n,q,r. div2 n = 〈q,r〉 → n = add (twice q) (nat_of_bool r).
#n elim n
  [#q #r normalize #H destruct //
  |#a #Hind #q #r 
   cut (div2 a = 〈fst … (div2 a), snd … (div2 a)〉) [//] 
   cases (snd … (div2 a))
    [#H >(div2S1 … H) #H1 destruct @eq_f >add_S whd in ⊢ (???%); <add_S @(Hind … H) 
    |#H >(div2SO … H) #H1 destruct >add_S @eq_f @(Hind … H) 
    ]
qed.

(* 
Mixing proofs and computations
There is still another possibility, however, namely to mix the program and its 
specification into a single entity. The idea is to refine the output type of the 
div2 function: it should not be just a generic pair 〈q,r〉 of natural numbers but a 
specific pair satisfying the specification of the function. In other words, we need 
the possibility to define, for a type A and a property P over A, the subset type 
{a:A|P(a)} of all elements a of type A that satisfy the property P. Subset types 
are just a particular case of the so called dependent types, that is types that 
can depend over arguments (such as arrays of a specified length, taken as a 
parameter).These kind of types are quite unusual in traditional programming 
languages, and their study is one of the new frontiers of the current research on 
type systems. 

There is nothing special in a subset type {a:A|P(a)}: it is just a record composed 
by an element of a of type A and a proof of P(a). The crucial point is to have a 
language reach enough to comprise proofs among its expressions. 
*)

record Sub (A:Type[0]) (P:A → Prop) : Type[0] ≝
  {witness: A; 
   proof: P witness}.

definition qr_spec ≝ λn.λp.∀q,r. p = 〈q,r〉 → n = add (twice q) (nat_of_bool r).
  
(* We can now construct a function from n to {p|qr_spec n p} by composing the objects
we already have *)

definition div2P: ∀n. Sub (nat×bool) (qr_spec n) ≝ λn.
 mk_Sub ?? (div2 n) (div2_ok n).

(* But we can also try do directly build such an object *)

definition div2Pagain : ∀n.Sub (nat×bool) (qr_spec n).
#n elim n
  [@(mk_Sub … 〈O,ff〉) normalize #q #r #H destruct //
  |#a * #p #qrspec 
   cut (p = 〈fst … p, snd … p〉) [//] 
   cases (snd … p)
    [#H @(mk_Sub … 〈S (fst … p),ff〉) whd #q #r #H1 destruct @eq_f >add_S
     whd in ⊢ (???%); <add_S @(qrspec … H)
    |#H @(mk_Sub … 〈fst … p,tt〉) whd #q #r #H1 destruct >add_S @eq_f @(qrspec … H) 
  ]
qed.

example quotient7: witness … (div2Pagain (S(S(S(S(S(S(S O)))))))) = 〈S(S(S O)),tt〉.
// qed.

example quotient8: witness … (div2Pagain (twice (twice (twice (twice (S O)))))) 
       = 〈twice (twice (twice (S O))), ff〉.
// qed. 
 

(* Matita supports polymorphic data types. The most typical case are polymorphic
lists, parametric in the type of their elements: *)

inductive list (A:Type[0]) : Type[0] ≝
  | nil: list A
  | cons: A -> list A -> list A.

(* The type notation list A is the type of all lists with elements of type A: 
it is defined by two constructors: a polymorphic empty list (nil A) and a cons 
operation, adding a new head element of type A to a previous list. For instance, 
(list nat) and and (list bool) are lists of natural numbers and booleans, 
respectively. But we can also form more complex data types, like 
(list (list (nat → nat))), that is a list whose elements are lists of functions 
from natural numbers to natural numbers.

Typical elements in (list bool) are for instance,
  nil nat                                    - the empty list of type nat
  cons nat true (nil nat)                    - the list containing true
  cons nat false (cons nat (true (nil nat))) - the list containing false and true
and so on. 

Note that both constructos nil and cons are expecting in input the type parameter:
in this case, bool.
*)

(*
Defining your own notation
We now add a bit of notation, in order to make the syntax more readable. In 
particular, we would like to write [] instead of (nil A) and a::l instead of 
(cons A a l), leaving the system the burden to infer A, whenever possible.
*)

notation "hvbox(hd break :: tl)"
  right associative with precedence 47
  for @{'cons $hd $tl}.

notation "[ list0 x sep ; ]"
  non associative with precedence 90
  for ${fold right @'nil rec acc @{'cons $x $acc}}.

notation "hvbox(l1 break @ l2)"
  right associative with precedence 47
  for @{'append $l1 $l2 }.

interpretation "nil" 'nil = (nil ?).
interpretation "cons" 'cons hd tl = (cons ? hd tl).

(* 
Basic operations on lists
Let us define a few basic functions over lists. Our first example is the 
append function, concatenating two lists l1 and l2. The natural way is to proceed 
by recursion on l1: if it is empty the result is simply l2, while if l1 = hd::tl 
then we recursively append tl and l2 , and then add hd as first element. Note that 
the append function itself is polymorphic, and the first argument it takes in input 
is the type A of the elements of two lists l1 and l2. 
Moreover, since the append function takes in input several parameters, we must also 
specify in the its defintion on which one of them we are recurring: in this case l1.
If not othewise specified, recursion is supposed to act on the first argument of the
function.*)

let rec append A (l1: list A) l2 on l1 ≝ 
  match l1 with
  [ nil ⇒  l2
  | cons hd tl ⇒  hd :: append A tl l2 ].

interpretation "append" 'append l1 l2 = (append ? l1 l2).

(* As usual, the function is executable. For instance, (append A nil l) reduces to
l, as shown by the following example: *)

example nil_append: ∀A.∀l:list A. [] @ l = l.
#A #l normalize // qed.

(* Proving that l @ [] = l is just a bit more complex. The situation is exactly 
the same as for the addition operation of the previous chapter: since append is 
defined by recutsion over the first argument, the computation of l @ [] is stuck, 
and we must proceed by induction on l *) 

lemma append_nil: ∀A.∀l:list A.l @ [] = l.
#A #l (elim l) normalize // qed.

(* similarly, we can define the two functions head and tail. Since we can only define
total functions, we should decide what to do in case the input list is empty. 
For tl, it is natural to return the empty list; for hd, we take in input a default 
element d of type A to return in this case. *)

definition head ≝ λA.λl: list A.λd:A.
  match l with [ nil ⇒ d | cons a _ ⇒ a].

definition tail ≝  λA.λl: list A.
  match l with [ nil ⇒  [] | cons hd tl ⇒  tl].

example ex_head: ∀A.∀a,d,l. head A (a::l) d = a.
#A #a #d #l normalize // qed.

example ex_tail: tail ? (cons ? true []) = [].
normalize // qed.

theorem associative_append: 
∀A.∀l1,l2,l3: list A. (l1 @ l2) @ l3 = l1 @ (l2 @ l3).
#A #l1 #l2 #l3 (elim l1) normalize // qed.

(* Problemi con la notazione *)
lemma a_append: ∀A.∀a.∀l:list A. (a::[]) @ l = a::l.
// qed.

theorem append_cons:
∀A.∀a:A.∀l,l1: list A.l@(a::l1)= (l @ (cons ? a [])) @ l1.
// qed. 

(* Other typical functions over lists are those computing the length 
of a list, and the function returning the nth element *)

let rec length (A:Type[0]) (l:list A) on l ≝ 
match l with 
  [ nil ⇒ O
    | cons a tl ⇒ S (length A tl)].

let rec nth n (A:Type[0]) (l:list A) (d:A)  ≝  
  match n with
    [O ⇒ head A l d
    |S m ⇒ nth m A (tail A l) d].

example ex_length: length ? (cons ? O []) = S O.
normalize // qed.

example ex_nth: nth (S O) ? (cons ? (S O) (cons ? O [])) O = O.
normalize // qed.

(* Proving that the length of l1@l2 is the sum of the lengths of l1
and l2 just requires a trivial induction on the first list. *)

 lemma  length_add: ∀A.∀l1,l2:list A. 
  length ? (l1@l2) = add (length ? l1) (length ? l2).
#A #l1 elim l1 normalize // qed. 

(* 
Comparing Costructors
Let us come to a more interesting question. How can we prove that the empty 
list is different from any list with at least one element, that is from any list 
of the kind (a::l)? We start defining a simple predicate stating if a list is 
empty or not. The predicate is computed by inspection over the list *)

definition is_nil: ∀A:Type[0].list A → Prop ≝
λA.λl.match l with [ nil ⇒ l = [] | cons hd tl ⇒ (l ≠ [])].

(* Next we need a simple result about negation: if you wish to prove ¬P you are
authorized to add P to your hypothesis: *)

lemma neg_aux : ∀P:Prop. (P → ¬P) → ¬P.
#P #PtonegP % #H1 /3/ qed. (*QUI non capisco perchè se metto // non riese e con /3/ sì, qual è la differenza?*)

theorem diff_cons_nil:
∀A:Type[0].∀l:list A.∀a:A. a::l ≠ [].
#A #l #a @neg_aux #Heq 
(* we start assuming the new hypothesis Heq of type a::l = [] using neg_aux. 
Next we use the change tactic to pass from the current goal a::l≠ [] to the 
expression is_nil a::l, convertible with it. *)
(change with (is_nil ? (a::l))) 
(* Now, we rewrite with Heq, obtaining (is_nil A []), that reduces to the trivial 
goal [] = [] *)
>Heq // qed.

(* As an application of the previous result let us prove that l1@l2 is empty if 
and only if both l1 and l2 are empty. 
The idea is to proceed by cases on l1: if l1=[] the statement is trivial; on the 
other side, if l1 = a::tl, then the hypothesis (a::tl)@l2 = [] is absurd, hence we 
can prove anything from it. 
When we know we can prove both A and ¬A, a sensible way to proceed is to apply 
False_ind: ∀P.False → P to the current goal, that breaks down to prove False, and 
then absurd: ∀A:Prop. A → ¬A → False to reduce to the contradictory cases. 
Usually, you may invoke automation to take care to solve the absurd case. *)

lemma nil_to_nil:  ∀A.∀l1,l2:list A.
  l1@l2 = [] → l1 = [] ∧ l2 = [].
#A #l1 cases l1 normalize /2/ #a #tl #l2 #H @False_ind /2/ qed. 

(* 
Higher Order Functionals
Let us come to some important, higher order, polymorphic functionals 
acting over lists. A typical example is the map function, taking a function
f:A → B, a list l = [a1; a2; ... ; an] and returning the list 
[f a1; f a2; ... ; f an]. *)

let rec map (A,B:Type[0]) (f: A → B) (l:list A) on l: list B ≝
 match l with [ nil ⇒ [] | cons x tl ⇒ f x :: (map A B f tl)].

(* Another major example is the fold function, that taken a list 
l = [a1; a2; ... ;an], a base value b:B, and a function f: A → B → B returns 
(f a1 (f a2 (... (f an b)...))). *)

let rec foldr (A,B:Type[0]) (f:A → B → B) (b:B) (l:list A) on l :B ≝  
  match l with [ nil ⇒ b | cons a l ⇒ f a (foldr A B f b l)].

(* As an example of application of foldr, let us use it to define a filter 
function that given a list l: list A and a boolean test p:A → bool returns the 
sublist of elements satisfying the test. In this case, the result type B of 
foldr should be (list A), the base value is [], and f: A → list A →list A is 
the function that taken x and l returns x::l, if x satisfies the test, and l 
otherwise. We use an if_then_else function included from bool.ma to this purpose. *)

definition filter ≝ 
  λT.λp:T → bool.
  foldr T (list T) (λx,l0. if p x then x::l0 else l0) [].

(* Here are a couple of simple lemmas on the behaviour of the filter function. 
It is often convenient to state such lemmas, in order to be able to use rewriting
as an alternative to reduction in proofs: reduction is a bit difficult to control.
*)

lemma filter_true : ∀A,l,a,p. p a = true → 
  filter A p (a::l) = a :: filter A p l.
#A #l #a #p #pa (elim l) normalize >pa // qed.

lemma filter_false : ∀A,l,a,p. p a = false → 
  filter A p (a::l) = filter A p l.
#A #l #a #p #pa (elim l) normalize >pa normalize // qed.

(* As another example, let us redefine the map function using foldr. The
result type B is (list B), the base value b is [], and the fold function 
of type A → list B → list B is the function mapping a and l to (f a)::l.
*)

definition map_again ≝ λA,B,f,l. foldr A (list B) (λa,l.f a::l) [] l.

(* 
Extensional equality
Can we prove that map_again is "the same" as map? We should first of all
clarify in which sense we expect the two functions to be equal. Equality in
Matita has an intentional meaning: it is the smallest predicate induced by 
convertibility, i.e. syntactical equality up to normalization. From an 
intentional point of view, map and map_again are not functions, but programs,
and they are clearly different. What we would like to say is that the two
programs behave in the same way: this is a different, extensional equality 
that can be defined in the following way. *)

definition ExtEq ≝ λA,B:Type[0].λf,g:A→B.∀a:A.f a = g a.

(* Proving that map and map_again are extentionally equal in the 
previous sense can be proved by a trivial structural induction on the list *)

lemma eq_maps: ∀A,B,f. ExtEq ?? (map A B f) (map_again A B f).
#A #B #f #n (elim n) normalize // qed. 

(* Let us make another remark about extensional equality. It is clear that,
if f is extensionally equal to g, then (map A B f) is extensionally equal to
(map A B g). Let us prove it. *)

theorem eq_map : ∀A,B,f,g. ExtEq A B f g → ExtEq ?? (map A B f) (map A B g).
#A #B #f #g #eqfg
 
(* the relevant point is that we cannot proceed by rewriting f with g via
eqfg, here. Rewriting only works with Matita intensional equality, while here
we are dealing with a different predicate, defined by the user. The right way 
to proceed is to unfold the definition of ExtEq, and work by induction on l, 
as usual when we want to prove extensional equality between functions over 
inductive types; again the rest of the proof is trivial. *)

#l (elim l) normalize // qed.

(*
Big Operators
Building a library of basic functions, it is important to achieve a 
good degree of abstraction and generality, in order to be able to reuse
suitable instances of the same function in different context. This has not
only the obvious benefit of factorizing code, but especially to avoid 
repeating proofs of generic properties over and over again.
A really convenient tool is the following combination of fold and filter,
that essentially allow you to iterate on every subset of a given enumerated
(finite) type, represented as a list. *) 

 let rec fold (A,B:Type[0]) (op:B→B→B) (b:B) (p:A→bool) (f:A→B) (l:list A) on l:B ≝  
 match l with 
  [ nil ⇒ b 
  | cons a l ⇒ if p a then op (f a) (fold A B op b p f l) else
      (fold A B op b p f l)].

(* It is also important to spend a few time to introduce some fancy notation
for these iterators. *)

 notation "\fold  [ op , nil ]_{ ident i ∈ l | p} f"
  with precedence 80
for @{'fold $op $nil (λ${ident i}. $p) (λ${ident i}. $f) $l}.

notation "\fold [ op , nil ]_{ident i ∈ l } f"
  with precedence 80
for @{'fold $op $nil (λ${ident i}.true) (λ${ident i}. $f) $l}.

interpretation "\fold" 'fold op nil p f l = (fold ? ? op nil p f l).

theorem fold_true: 
∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f:A→B. p a = true → 
  \fold[op,nil]_{i ∈ a::l| p i} (f i) = 
    op (f a) \fold[op,nil]_{i ∈ l| p i} (f i). 
#A #B #a #l #p #op #nil #f #pa normalize >pa // qed.

theorem fold_false: 
∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f.
p a = false → \fold[op,nil]_{i ∈ a::l| p i} (f i) = 
  \fold[op,nil]_{i ∈ l| p i} (f i).
#A #B #a #l #p #op #nil #f #pa normalize >pa // qed.

theorem fold_filter: 
∀A,B.∀a:A.∀l.∀p.∀op:B→B→B.∀nil.∀f:A →B.
  \fold[op,nil]_{i ∈ l| p i} (f i) = 
    \fold[op,nil]_{i ∈ (filter A p l)} (f i).
#A #B #a #l #p #op #nil #f elim l //  
#a #tl #Hind cases(true_or_false (p a)) #pa 
  [ >filter_true // > fold_true // >fold_true //
  | >filter_false // >fold_false // ]
qed.

record Aop (A:Type[0]) (nil:A) : Type[0] ≝
{op :2> A → A → A; 
  nill:∀a. op nil a = a; 
  nilr:∀a. op a nil = a;
  assoc: ∀a,b,c.op a (op b c) = op (op a b) c
}.

theorem fold_sum: ∀A,B. ∀I,J:list A.∀nil.∀op:Aop B nil.∀f:A → B.
 op (\fold[op,nil]_{i ∈ I} (f i)) (\fold[op,nil]_{i ∈ J} (f i)) = 
   \fold[op,nil]_{i ∈ (I@J)} (f i).
#A #B #I #J #nil #op #f (elim I) normalize 
  [>nill//|#a #tl #Hind <assoc //]
qed.