blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 7
139
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
16
| license_type
stringclasses 2
values | repo_name
stringlengths 7
55
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 6
values | visit_date
int64 1,471B
1,694B
| revision_date
int64 1,378B
1,694B
| committer_date
int64 1,378B
1,694B
| github_id
float64 1.33M
604M
⌀ | star_events_count
int64 0
43.5k
| fork_events_count
int64 0
1.5k
| gha_license_id
stringclasses 6
values | gha_event_created_at
int64 1,402B
1,695B
⌀ | gha_created_at
int64 1,359B
1,637B
⌀ | gha_language
stringclasses 19
values | src_encoding
stringclasses 2
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | length_bytes
int64 3
6.4M
| extension
stringclasses 4
values | content
stringlengths 3
6.12M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa766930163751a0ecbf3661279bc73afbf59c01
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/category_theory/is_connected.lean
|
72d75ff9dfad0fa110b28f6735db7c130f717900
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 14,286
|
lean
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
-/
import data.list.chain
import category_theory.punit
import category_theory.groupoid
/-!
# Connected category
Define a connected category as a _nonempty_ category for which every functor
to a discrete category is isomorphic to the constant functor.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
We give some equivalent definitions:
- A nonempty category for which every functor to a discrete category is
constant on objects.
See `any_functor_const_on_obj` and `connected.of_any_functor_const_on_obj`.
- A nonempty category for which every function `F` for which the presence of a
morphism `f : j₁ ⟶ j₂` implies `F j₁ = F j₂` must be constant everywhere.
See `constant_of_preserves_morphisms` and `connected.of_constant_of_preserves_morphisms`.
- A nonempty category for which any subset of its elements containing the
default and closed under morphisms is everything.
See `induct_on_objects` and `connected.of_induct`.
- A nonempty category for which every object is related under the reflexive
transitive closure of the relation "there is a morphism in some direction
from `j₁` to `j₂`".
See `connected_zigzag` and `zigzag_connected`.
- A nonempty category for which for any two objects there is a sequence of
morphisms (some reversed) from one to the other.
See `exists_zigzag'` and `connected_of_zigzag`.
We also prove the result that the functor given by `(X × -)` preserves any
connected limit. That is, any limit of shape `J` where `J` is a connected
category is preserved by the functor `(X × -)`. This appears in `category_theory.limits.connected`.
-/
universes v₁ v₂ u₁ u₂
noncomputable theory
open category_theory.category
open opposite
namespace category_theory
/--
A possibly empty category for which every functor to a discrete category is constant.
-/
class is_preconnected (J : Type u₁) [category.{v₁} J] : Prop :=
(iso_constant : Π {α : Type u₁} (F : J ⥤ discrete α) (j : J),
nonempty (F ≅ (functor.const J).obj (F.obj j)))
/--
We define a connected category as a _nonempty_ category for which every
functor to a discrete category is constant.
NB. Some authors include the empty category as connected, we do not.
We instead are interested in categories with exactly one 'connected
component'.
This allows us to show that the functor X ⨯ - preserves connected limits.
See <https://stacks.math.columbia.edu/tag/002S>
-/
class is_connected (J : Type u₁) [category.{v₁} J] extends is_preconnected J : Prop :=
[is_nonempty : nonempty J]
attribute [instance, priority 100] is_connected.is_nonempty
variables {J : Type u₁} [category.{v₁} J]
variables {K : Type u₂} [category.{v₂} K]
/--
If `J` is connected, any functor `F : J ⥤ discrete α` is isomorphic to
the constant functor with value `F.obj j` (for any choice of `j`).
-/
def iso_constant [is_preconnected J] {α : Type u₁} (F : J ⥤ discrete α) (j : J) :
F ≅ (functor.const J).obj (F.obj j) :=
(is_preconnected.iso_constant F j).some
/--
If J is connected, any functor to a discrete category is constant on objects.
The converse is given in `is_connected.of_any_functor_const_on_obj`.
-/
lemma any_functor_const_on_obj [is_preconnected J]
{α : Type u₁} (F : J ⥤ discrete α) (j j' : J) :
F.obj j = F.obj j' :=
((iso_constant F j').hom.app j).down.1
/--
If any functor to a discrete category is constant on objects, J is connected.
The converse of `any_functor_const_on_obj`.
-/
lemma is_connected.of_any_functor_const_on_obj [nonempty J]
(h : ∀ {α : Type u₁} (F : J ⥤ discrete α), ∀ (j j' : J), F.obj j = F.obj j') :
is_connected J :=
{ iso_constant := λ α F j',
⟨nat_iso.of_components (λ j, eq_to_iso (h F j j')) (λ _ _ _, subsingleton.elim _ _)⟩ }
/--
If `J` is connected, then given any function `F` such that the presence of a
morphism `j₁ ⟶ j₂` implies `F j₁ = F j₂`, we have that `F` is constant.
This can be thought of as a local-to-global property.
The converse is shown in `is_connected.of_constant_of_preserves_morphisms`
-/
lemma constant_of_preserves_morphisms [is_preconnected J] {α : Type u₁} (F : J → α)
(h : ∀ (j₁ j₂ : J) (f : j₁ ⟶ j₂), F j₁ = F j₂) (j j' : J) :
F j = F j' :=
any_functor_const_on_obj { obj := F, map := λ _ _ f, eq_to_hom (h _ _ f) } j j'
/--
`J` is connected if: given any function `F : J → α` which is constant for any
`j₁, j₂` for which there is a morphism `j₁ ⟶ j₂`, then `F` is constant.
This can be thought of as a local-to-global property.
The converse of `constant_of_preserves_morphisms`.
-/
lemma is_connected.of_constant_of_preserves_morphisms [nonempty J]
(h : ∀ {α : Type u₁} (F : J → α), (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), F j₁ = F j₂) →
(∀ j j' : J, F j = F j')) :
is_connected J :=
is_connected.of_any_functor_const_on_obj (λ _ F, h F.obj (λ _ _ f, (F.map f).down.1))
/--
An inductive-like property for the objects of a connected category.
If the set `p` is nonempty, and `p` is closed under morphisms of `J`,
then `p` contains all of `J`.
The converse is given in `is_connected.of_induct`.
-/
lemma induct_on_objects [is_preconnected J] (p : set J) {j₀ : J} (h0 : j₀ ∈ p)
(h1 : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) (j : J) :
j ∈ p :=
begin
injection (constant_of_preserves_morphisms (λ k, ulift.up (k ∈ p)) (λ j₁ j₂ f, _) j j₀) with i,
rwa i,
dsimp,
exact congr_arg ulift.up (propext (h1 f)),
end
/--
If any maximal connected component containing some element j₀ of J is all of J, then J is connected.
The converse of `induct_on_objects`.
-/
lemma is_connected.of_induct [nonempty J] {j₀ : J}
(h : ∀ (p : set J), j₀ ∈ p → (∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), j₁ ∈ p ↔ j₂ ∈ p) → ∀ (j : J), j ∈ p) :
is_connected J :=
is_connected.of_constant_of_preserves_morphisms (λ α F a,
begin
have w := h {j | F j = F j₀} rfl (λ _ _ f, by simp [a f]),
dsimp at w,
intros j j',
rw [w j, w j'],
end)
/--
Another induction principle for `is_preconnected J`:
given a type family `Z : J → Sort*` and
a rule for transporting in *both* directions along a morphism in `J`,
we can transport an `x : Z j₀` to a point in `Z j` for any `j`.
-/
lemma is_preconnected_induction [is_preconnected J] (Z : J → Sort*)
(h₁ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₁ → Z j₂)
(h₂ : Π {j₁ j₂ : J} (f : j₁ ⟶ j₂), Z j₂ → Z j₁)
{j₀ : J} (x : Z j₀) (j : J) : nonempty (Z j) :=
(induct_on_objects {j | nonempty (Z j)} ⟨x⟩
(λ j₁ j₂ f, ⟨by { rintro ⟨y⟩, exact ⟨h₁ f y⟩, }, by { rintro ⟨y⟩, exact ⟨h₂ f y⟩, }⟩) j : _)
/-- If `J` and `K` are equivalent, then if `J` is preconnected then `K` is as well. -/
lemma is_preconnected_of_equivalent {K : Type u₁} [category.{v₂} K] [is_preconnected J]
(e : J ≌ K) :
is_preconnected K :=
{ iso_constant := λ α F k, ⟨
calc F ≅ e.inverse ⋙ e.functor ⋙ F : (e.inv_fun_id_assoc F).symm
... ≅ e.inverse ⋙ (functor.const J).obj ((e.functor ⋙ F).obj (e.inverse.obj k)) :
iso_whisker_left e.inverse (iso_constant (e.functor ⋙ F) (e.inverse.obj k))
... ≅ e.inverse ⋙ (functor.const J).obj (F.obj k) :
iso_whisker_left _ ((F ⋙ functor.const J).map_iso (e.counit_iso.app k))
... ≅ (functor.const K).obj (F.obj k) : nat_iso.of_components (λ X, iso.refl _) (by simp),
⟩ }
/-- If `J` and `K` are equivalent, then if `J` is connected then `K` is as well. -/
lemma is_connected_of_equivalent {K : Type u₁} [category.{v₂} K]
(e : J ≌ K) [is_connected J] :
is_connected K :=
{ is_nonempty := nonempty.map e.functor.obj (by apply_instance),
to_is_preconnected := is_preconnected_of_equivalent e }
/-- If `J` is preconnected, then `Jᵒᵖ` is preconnected as well. -/
instance is_preconnected_op [is_preconnected J] : is_preconnected Jᵒᵖ :=
{ iso_constant := λ α F X, ⟨
nat_iso.of_components
(λ Y, (nonempty.some $ is_preconnected.iso_constant
(F.right_op ⋙ (discrete.opposite α).functor) (unop X)).app (unop Y))
(λ Y Z f, subsingleton.elim _ _)
⟩ }
/-- If `J` is connected, then `Jᵒᵖ` is connected as well. -/
instance is_connected_op [is_connected J] : is_connected Jᵒᵖ :=
{ is_nonempty := nonempty.intro (op (classical.arbitrary J)) }
lemma is_preconnected_of_is_preconnected_op [is_preconnected Jᵒᵖ] : is_preconnected J :=
is_preconnected_of_equivalent (op_op_equivalence J)
lemma is_connected_of_is_connected_op [is_connected Jᵒᵖ] : is_connected J :=
is_connected_of_equivalent (op_op_equivalence J)
/-- j₁ and j₂ are related by `zag` if there is a morphism between them. -/
@[reducible]
def zag (j₁ j₂ : J) : Prop := nonempty (j₁ ⟶ j₂) ∨ nonempty (j₂ ⟶ j₁)
lemma zag_symmetric : symmetric (@zag J _) :=
λ j₂ j₁ h, h.swap
/--
`j₁` and `j₂` are related by `zigzag` if there is a chain of
morphisms from `j₁` to `j₂`, with backward morphisms allowed.
-/
@[reducible]
def zigzag : J → J → Prop := relation.refl_trans_gen zag
lemma zigzag_symmetric : symmetric (@zigzag J _) :=
relation.refl_trans_gen.symmetric zag_symmetric
lemma zigzag_equivalence : _root_.equivalence (@zigzag J _) :=
mk_equivalence _
relation.reflexive_refl_trans_gen
zigzag_symmetric
relation.transitive_refl_trans_gen
/--
The setoid given by the equivalence relation `zigzag`. A quotient for this
setoid is a connected component of the category.
-/
def zigzag.setoid (J : Type u₂) [category.{v₁} J] : setoid J :=
{ r := zigzag,
iseqv := zigzag_equivalence }
/--
If there is a zigzag from `j₁` to `j₂`, then there is a zigzag from `F j₁` to
`F j₂` as long as `F` is a functor.
-/
lemma zigzag_obj_of_zigzag (F : J ⥤ K) {j₁ j₂ : J} (h : zigzag j₁ j₂) :
zigzag (F.obj j₁) (F.obj j₂) :=
h.lift _ $ λ j k, or.imp (nonempty.map (λ f, F.map f)) (nonempty.map (λ f, F.map f))
-- TODO: figure out the right way to generalise this to `zigzag`.
lemma zag_of_zag_obj (F : J ⥤ K) [full F] {j₁ j₂ : J} (h : zag (F.obj j₁) (F.obj j₂)) :
zag j₁ j₂ :=
or.imp (nonempty.map F.preimage) (nonempty.map F.preimage) h
/-- Any equivalence relation containing (⟶) holds for all pairs of a connected category. -/
lemma equiv_relation [is_connected J] (r : J → J → Prop) (hr : _root_.equivalence r)
(h : ∀ {j₁ j₂ : J} (f : j₁ ⟶ j₂), r j₁ j₂) :
∀ (j₁ j₂ : J), r j₁ j₂ :=
begin
have z : ∀ (j : J), r (classical.arbitrary J) j :=
induct_on_objects (λ k, r (classical.arbitrary J) k)
(hr.1 (classical.arbitrary J)) (λ _ _ f, ⟨λ t, hr.2.2 t (h f), λ t, hr.2.2 t (hr.2.1 (h f))⟩),
intros, apply hr.2.2 (hr.2.1 (z _)) (z _)
end
/-- In a connected category, any two objects are related by `zigzag`. -/
lemma is_connected_zigzag [is_connected J] (j₁ j₂ : J) : zigzag j₁ j₂ :=
equiv_relation _ zigzag_equivalence
(λ _ _ f, relation.refl_trans_gen.single (or.inl (nonempty.intro f))) _ _
/--
If any two objects in an nonempty category are related by `zigzag`, the category is connected.
-/
lemma zigzag_is_connected [nonempty J] (h : ∀ (j₁ j₂ : J), zigzag j₁ j₂) : is_connected J :=
begin
apply is_connected.of_induct,
intros p hp hjp j,
have: ∀ (j₁ j₂ : J), zigzag j₁ j₂ → (j₁ ∈ p ↔ j₂ ∈ p),
{ introv k,
induction k with _ _ rt_zag zag,
{ refl },
{ rw k_ih,
rcases zag with ⟨⟨_⟩⟩ | ⟨⟨_⟩⟩,
apply hjp zag,
apply (hjp zag).symm } },
rwa this j (classical.arbitrary J) (h _ _)
end
lemma exists_zigzag' [is_connected J] (j₁ j₂ : J) :
∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂ :=
list.exists_chain_of_relation_refl_trans_gen (is_connected_zigzag _ _)
/--
If any two objects in an nonempty category are linked by a sequence of (potentially reversed)
morphisms, then J is connected.
The converse of `exists_zigzag'`.
-/
lemma is_connected_of_zigzag [nonempty J]
(h : ∀ (j₁ j₂ : J), ∃ l, list.chain zag j₁ l ∧ list.last (j₁ :: l) (list.cons_ne_nil _ _) = j₂) :
is_connected J :=
begin
apply zigzag_is_connected,
intros j₁ j₂,
rcases h j₁ j₂ with ⟨l, hl₁, hl₂⟩,
apply list.relation_refl_trans_gen_of_exists_chain l hl₁ hl₂,
end
/-- If `discrete α` is connected, then `α` is (type-)equivalent to `punit`. -/
def discrete_is_connected_equiv_punit {α : Type u₁} [is_connected (discrete α)] : α ≃ punit :=
discrete.equiv_of_equivalence.{u₁ u₁}
{ functor := functor.star α,
inverse := discrete.functor (λ _, classical.arbitrary _),
unit_iso := by { exact (iso_constant _ (classical.arbitrary _)), },
counit_iso := functor.punit_ext _ _ }
variables {C : Type u₂} [category.{u₁} C]
/--
For objects `X Y : C`, any natural transformation `α : const X ⟶ const Y` from a connected
category must be constant.
This is the key property of connected categories which we use to establish properties about limits.
-/
lemma nat_trans_from_is_connected [is_preconnected J] {X Y : C}
(α : (functor.const J).obj X ⟶ (functor.const J).obj Y) :
∀ (j j' : J), α.app j = (α.app j' : X ⟶ Y) :=
@constant_of_preserves_morphisms _ _ _
(X ⟶ Y)
(λ j, α.app j)
(λ _ _ f, (by { have := α.naturality f, erw [id_comp, comp_id] at this, exact this.symm }))
instance [is_connected J] : full (functor.const J : C ⥤ J ⥤ C) :=
{ preimage := λ X Y f, f.app (classical.arbitrary J),
witness' := λ X Y f,
begin
ext j,
apply nat_trans_from_is_connected f (classical.arbitrary J) j,
end }
instance nonempty_hom_of_connected_groupoid {G} [groupoid G] [is_connected G] :
∀ (x y : G), nonempty (x ⟶ y) :=
begin
refine equiv_relation _ _ (λ j₁ j₂, nonempty.intro),
exact ⟨λ j, ⟨𝟙 _⟩, λ j₁ j₂, nonempty.map (λ f, inv f), λ _ _ _, nonempty.map2 (≫)⟩,
end
end category_theory
|
6bdb42e7bf7b861c187d99bca11d19f2ca9ed80a
|
38a6d5def645a1887e1306ceb4da06ff71452096
|
/_5_The_Physical_Layer/qubit_carriers/entropy_of_states.lean
|
0acd72a916d044083b01a5811a717df73a442c7f
|
[] |
no_license
|
QTM3x/Quantum-Internet
|
bcc2d61e2ae7233bb2b369fedaed22a1feb6fba1
|
f90e09fb6c03d35043654d8b1bec1c63d6012268
|
refs/heads/master
| 1,609,224,401,937
| 1,599,911,583,000
| 1,599,911,583,000
| 238,495,221
| 45
| 33
| null | 1,603,625,079,000
| 1,580,919,815,000
|
Jupyter Notebook
|
UTF-8
|
Lean
| false
| false
| 7,840
|
lean
|
import data.real.basic
import analysis.special_functions.exp_log
import _5_The_Physical_Layer.qubit_carriers.quantum_state
import _5_The_Physical_Layer.optical_fiber.quantum_channel
import common.shannon_theory
notation `|` x `|` := abs x
variables {n m : ℕ}
noncomputable theory
---- QUANTUM ENTROPY
/-
Definition (Quantum entropy):
"Suppose that Alice generates a quantum state ∣ψy⟩ in her
lab according to some probability density p_Y(y), corresponding
to a random variable Y. Suppose further that Bob has not yet
received the state from Alice and does not know which one she sent.
The expected density operator from Bob’s point of view is then
σ = 𝐄_Y{∣ψY⟩⟨ψY∣} = ∑ y, p_Y(y) • ∣ψy⟩⟨ψy∣.
The interpretation of the entropy H(σ) is that it quantifies Bob’s
uncertainty about the state Alice sent — his expected information
gain is H(σ) qubits upon receiving and measuring the state that
Alice sends."
https://arxiv.org/pdf/1106.1445.pdf
-/
def quantum_entropy (ρ : density_operator n) : ℝ :=
- Tr(ρ.M * matrix_log(ρ.M)).re
notation `H(` ρ `)` := quantum_entropy ρ
/-
Definition (quantum entropy of spectral decomposition)
-/
-- def quantum_entropy_of_spectral_decomposition :
/-
Theorem (quantum entropy of state and shannon entropy of prob dist)
-/
theorem quantum_entropy_eq_shannon_entropy_of_prob_dist
{ρ : density_operator n}
{prob_dist : multiset ℝ} :
-- {hρ : ρ = ∑ i, prob_dist i • ∣i▸dim⟩⟨i▸dim∣} :
H(ρ) = shannon_entropy(prob_dist) :=
begin
unfold quantum_entropy,
unfold shannon_entropy,
sorry
end
/-
Theorem (non-negativity): Quantum entropy is non-negative.
-/
theorem quantum_entropy_nonneg {n : ℕ} :
∀ (ρ : density_operator n), H(ρ) ≥ 0 :=
begin
intro ρ,
-- This follows from non-negativity of Shannon entropy
-- rw quantum_entropy_eq_shannon_entropy_of_prob_dist,
-- exact shannon_entropy_nonneg,
sorry
end
/-
Theorem (minimum value): The minimum value of quantum entropy is
zero and occurs when the state is pure.
-/
theorem minimum_value_quantum_entropy :
∀ (ρ : density_operator n), is_pure ρ → H(ρ) = 0 := sorry
/-
Theorem (maximum value): The maximum value of quantum entropy is
log d and occurs when the state is the maximally mixed state.
-/
theorem maximum_value_quantum_entropy {ρ : density_operator} :
H(ρ) ≤ real.log ρ.dim :=
begin
sorry
end
/-
Theorem (additivity for tensor states)
-/
theorem additive_quantum_entropy_tensor_states :
∀ (ρ₁ : density_operator n) (ρ₂ : density_operator m), H(ρ₁ ⊗ ρ₂) = H(ρ₁) + H(ρ₂) :=
begin
sorry
end
/-
Theorem (Araki-Lieb triangle inequality)
-/
theorem Araki_Lieb {ρ : density_operator} :
|H(A)_ρ - H(B)_ρ| ≤ H(AB)_ρ :=
begin
sorry
end
/-
Theorem (strong subadditivity)
Page 345 here https://arxiv.org/pdf/1106.1445.pdf.
-/
theorem strong_subadditivity {ρ : density_operator} :
H(AC)_ρ + H(BC)_ρ ≥ H(ABC)_ρ + H(C)_ρ :=
begin
have : I(A;B|C)_ρ = H(AC)_ρ + H(BC)_ρ - H(ABC)_ρ - H(C)_ρ, {sorry},
have : I(A;B|C)_ρ = H(B|C)_ρ - H(B|AC)_ρ, {sorry},
have : -H(B|AC)_ρ = D(ρ∥I⊗Tr_B(ρ)), {sorry},
have : H(B|C)_ρ = -D(Tr_A(ρ)∥I⊗Tr_AB(ρ)), {sorry},
have : D(ρ∥I_B⊗Tr_B(ρ)) ≥ D(Tr_A(ρ)∥I_B⊗Tr_AB(ρ)), {sorry},
sorry
end
/-
Theorem (unital channels increase entropy)
-/
theorem unital_channels_increase_entropy
{ρ : density_operator} {𝒩 : quantum_channel} {h𝒩 : is_unital(𝒩)} :
H(𝒩(ρ)) ≥ H(ρ) :=
begin
sorry
end
---- CONDITIONAL QUANTUM ENTROPY
/-
Definition (conditional quantum entropy)
-/
def cond_quantum_entropy (ρ : density_operator n) : ℝ := H(ρ) - H(Tr_A(ρ))
notation `H(` A `|` B `)_` ρ := cond_quantum_entropy(ρ)
/-
Theorem (conditioning does not increase entropy)
-/
theorem cond_entropy_leq_entropy (ρ : density_operator n) :
H(A)_ρ ≥ H(A|B)_ρ :=
begin
sorry
end
/-
Theorem (maximum of abs of conditional quantum entropy)
Page 333 here https://arxiv.org/pdf/1106.1445.pdf.
-/
theorem cond_quantum_entropy_max :
∀ (ρ : density_operator n), |H(A|B)_ρ| ≤ real.log Tr_B(ρ).dim :=
begin
-- start by rewriting using abs_le
-- first we prove that H(A|B)_ρ ≤ real.log Tr_B(ρ).dim
-- then we prove that H(A|B)_ρ ≥ - real.log Tr_B(ρ).dim
sorry
end
/-
Theorem (The π_A ⊗ π_B state saturates condition quantum entropy)
-/
theorem cond_quantum_entropy_saturated {ρ := π⊗π} :
H(A|B)_ρ = real.log Tr_B(ρ).dim :=
begin
sorry
end
---- COHERENT INFORMATION
/-
Definition (coherent information)
-/
def coherent_info (ρ : density_operator n) {hρ : ρ.dim ≥ 4} : ℝ :=
H(pTr(ρ.M , ρ.dim/2)) - H(ρ)
notation `I(` A `⟩` B `)_` ρ := coherent_information(ρ)
/-
Definition (reverse coherent information):
-/
def reverse_coherent_info (ρ : density_operator n) := sorry
/-
Theorem (coherent information of a maximally entangled state)
-/
theorem coherent_info_max_ent_state
{ρ : density_operator n} {hρ : is_maximally_entangled} :
I(A⟩B)_ρ = ... :=
begin
sorry
end
/-
Theorem (coherent information of purification)
"Thus, there is a sense in which the coherent information
measures the difference in the uncertainty of Bob and the
uncertainty of the environment."
https://arxiv.org/pdf/1106.1445.pdf
-/
theorem coh_info_purification :
∀ (ρ : density_operator n),
∃ {∣ψ⟩ : pure_state} {hψ : is_purification ρ ∣ψ⟩},
I(A⟩B)_ρ = H(B)_∣ψ⟩ - H(E)_∣ψ⟩ :=
begin
sorry
end
---- QUANTUM MUTUAL INFORMATION
/-
Definition (quantum mutual information)
-/
def mut_info (ρ : density_operator n) : ℝ :=
H(A)_ρ + H(B)_ρ - H(AB)_ρ
notation `I(` A `;` B `)_` ρ := mut_info(ρ)
/-
Lemma (rewrite in terms of cond entropy)
-/
lemma mut_info_cond_ent {ρ : density_operator n} :
I(A;B)_ρ = H(A)_ρ - H(A|B)_ρ :=
begin
sorry
end
/-
Lemma (another rewrite in terms of cond entropy)
-/
lemma mut_info_cond_ent' {ρ : density_operator n} :
I(A;B)_ρ = H(B)_ρ - H(B|A)_ρ :=
begin
sorry
end
/-
Lemma (non-negativity)
-/
lemma mut_info_nonneg {ρ : density_operator n} :
I(A;B)_ρ ≥ 0 :=
begin
sorry
end
---- HOLEVO INFORMATION
/-
Definition (Holevo information)
-/
def Holevo_info (ρ : density_operator n) := sorry
---- ACCESSIBLE INFORMATION
/-
Definition (accessible information)
-/
def accessible_info := sorry
---- CONDITIONAL QUANTUM MUTUAL INFORMATION
/-
Definition (conditional quantum mutual information)
-/
def cond_mut_info (ρ : density_operator n) : ℝ :=
H(A|C)_ρ + H(B|C)_ρ - H(AB|C)_ρ
notation `I(` A `;` B `|` C `)_` ρ := cond_mut_info ρ
/-
Lemma (quanutm mutual information chain rule)
-/
lemma mut_info_chain_rule {ρ : density_operator n} :
I(A;BC)_ρ = I(A;B)_ρ + I(A;C|B)_ρ :=
begin
sorry
end
/-
Lemma (non-negativity / a.k.a. strong subadditivity)
-/
lemma cond_mut_info_nonnegative {ρ : density_operator n} :
I(A;B|C)_ρ ≥ 0 :=
begin
sorry
end
/-
Lemma ("duality" of condition mutual information)
-/
lemma duality {ρ : density_operator n} {hρ : is_pure ρ} :
I(A;B|C)_ρ = I(A;B|D)_ρ :=
begin
sorry
end
---- QUANTUM RELATIVE ENTROPY
/-
Definition (quantum relative entropy)
-/
def quantum_relative_entropy (ρ : density_operator n) (σ : positive_semidefinite_operator) := sorry
notation `D(` ρ `∥` σ `)` := quantum_relative_entropy(ρ,σ)
/-
Theorem (quantum Pinsker inequality)
-/
theorem quantum_pinsker {ρ : density_operator n} {σ : linear_operator} :
D(ρ∥σ) ≥ 1/(2 * ln 2) * ∥ρ - σ∥₁^2 :=
begin
sorry
end
---- SQUASHED ENTANGLEMENT
/-
Definition (squashed entanglement of quantum state for a given partition)
-/
def squashed_entanglement (ρ : density_operator n)
{hρ : ρ.dim > 4} (partition_point : ℕ) : ℝ := sorry
|
21dc290d380f820549ffa099ccc6ff5f61847d65
|
cb43248ccd028db2c147695d2c68772d583ed390
|
/code/test.lean
|
30c992980a6a932ce728886ac1fea2ceba75868e
|
[] |
no_license
|
codyroux/ny-haskell
|
812d872a74be7c68d3dac5c69507214bad2c660e
|
463e730c5ff1e50c43415714aa27ea4ebf4f64b6
|
refs/heads/master
| 1,610,284,527,025
| 1,448,464,569,000
| 1,448,464,569,000
| 46,724,063
| 4
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 517
|
lean
|
import data.nat.div
print prefix dvd
print notation ∣
print notation +
check (3 ∣ 6)
-- print prefix ∣
print prefix num
print definition nat.add
definition pred : ℕ → ℕ
| pred 0 := 0
| pred (nat.succ n') := n'
definition add_nat : ℕ → ℕ → ℕ
| add_nat 0 m := m
| add_nat (nat.succ n') m := nat.succ (add_nat n' m)
definition mul_nat : ℕ → ℕ → ℕ
| mul_nat 0 m := m
| mul_nat (nat.succ n') m := add_nat m (mul_nat n' m)
definition prime = 2 ≤ p ∧ (d ∣ p → d = 1 ∨ d = p)
|
93a30c920274c93ceec277f0736342e10dd48f20
|
d9d511f37a523cd7659d6f573f990e2a0af93c6f
|
/src/algebra/lie/tensor_product.lean
|
b173fc9b4dbb7da1db01099db7b0bf3ef3da2606
|
[
"Apache-2.0"
] |
permissive
|
hikari0108/mathlib
|
b7ea2b7350497ab1a0b87a09d093ecc025a50dfa
|
a9e7d333b0cfd45f13a20f7b96b7d52e19fa2901
|
refs/heads/master
| 1,690,483,608,260
| 1,631,541,580,000
| 1,631,541,580,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 8,873
|
lean
|
/-
Copyright (c) 2021 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
-/
import algebra.lie.abelian
/-!
# Tensor products of Lie modules
Tensor products of Lie modules carry natural Lie module structures.
## Tags
lie module, tensor product, universal property
-/
universes u v w w₁ w₂ w₃
variables {R : Type u} [comm_ring R]
namespace tensor_product
open_locale tensor_product
namespace lie_module
open lie_module
variables {L : Type v} {M : Type w} {N : Type w₁} {P : Type w₂} {Q : Type w₃}
variables [lie_ring L] [lie_algebra R L]
variables [add_comm_group M] [module R M] [lie_ring_module L M] [lie_module R L M]
variables [add_comm_group N] [module R N] [lie_ring_module L N] [lie_module R L N]
variables [add_comm_group P] [module R P] [lie_ring_module L P] [lie_module R L P]
variables [add_comm_group Q] [module R Q] [lie_ring_module L Q] [lie_module R L Q]
local attribute [ext] tensor_product.ext
/-- It is useful to define the bracket via this auxiliary function so that we have a type-theoretic
expression of the fact that `L` acts by linear endomorphisms. It simplifies the proofs in
`lie_ring_module` below. -/
def has_bracket_aux (x : L) : module.End R (M ⊗[R] N) :=
(to_endomorphism R L M x).rtensor N + (to_endomorphism R L N x).ltensor M
/-- The tensor product of two Lie modules is a Lie ring module. -/
instance lie_ring_module : lie_ring_module L (M ⊗[R] N) :=
{ bracket := λ x, has_bracket_aux x,
add_lie := λ x y t, by
{ simp only [has_bracket_aux, linear_map.ltensor_add, linear_map.rtensor_add, lie_hom.map_add,
linear_map.add_apply], abel, },
lie_add := λ x, linear_map.map_add _,
leibniz_lie := λ x y t, by
{ suffices : (has_bracket_aux x).comp (has_bracket_aux y) =
has_bracket_aux ⁅x,y⁆ + (has_bracket_aux y).comp (has_bracket_aux x),
{ simp only [← linear_map.comp_apply, ← linear_map.add_apply], rw this, },
ext m n,
simp only [has_bracket_aux, lie_ring.of_associative_ring_bracket, linear_map.mul_apply,
mk_apply, linear_map.ltensor_sub, linear_map.compr₂_apply, function.comp_app,
linear_map.coe_comp, linear_map.rtensor_tmul, lie_hom.map_lie,
to_endomorphism_apply_apply, linear_map.add_apply, linear_map.map_add,
linear_map.rtensor_sub, linear_map.sub_apply, linear_map.ltensor_tmul],
abel, }, }
/-- The tensor product of two Lie modules is a Lie module. -/
instance lie_module : lie_module R L (M ⊗[R] N) :=
{ smul_lie := λ c x t, by
{ change has_bracket_aux (c • x) _ = c • has_bracket_aux _ _,
simp only [has_bracket_aux, smul_add, linear_map.rtensor_smul, linear_map.smul_apply,
linear_map.ltensor_smul, lie_hom.map_smul, linear_map.add_apply], },
lie_smul := λ c x, linear_map.map_smul _ c, }
@[simp] lemma lie_tmul_right (x : L) (m : M) (n : N) :
⁅x, m ⊗ₜ[R] n⁆ = ⁅x, m⁆ ⊗ₜ n + m ⊗ₜ ⁅x, n⁆ :=
show has_bracket_aux x (m ⊗ₜ[R] n) = _,
by simp only [has_bracket_aux, linear_map.rtensor_tmul, to_endomorphism_apply_apply,
linear_map.add_apply, linear_map.ltensor_tmul]
variables (R L M N P Q)
/-- The universal property for tensor product of modules of a Lie algebra: the `R`-linear
tensor-hom adjunction is equivariant with respect to the `L` action. -/
def lift : (M →ₗ[R] N →ₗ[R] P) ≃ₗ⁅R,L⁆ (M ⊗[R] N →ₗ[R] P) :=
{ map_lie' := λ x f, by
{ ext m n, simp only [mk_apply, linear_map.compr₂_apply, lie_tmul_right, linear_map.sub_apply,
lift.equiv_apply, linear_equiv.to_fun_eq_coe, lie_hom.lie_apply, linear_map.map_add],
abel, },
..tensor_product.lift.equiv R M N P }
@[simp] lemma lift_apply (f : M →ₗ[R] N →ₗ[R] P) (m : M) (n : N) :
lift R L M N P f (m ⊗ₜ n) = f m n :=
lift.equiv_apply R M N P f m n
/-- A weaker form of the universal property for tensor product of modules of a Lie algebra.
Note that maps `f` of type `M →ₗ⁅R,L⁆ N →ₗ[R] P` are exactly those `R`-bilinear maps satisfying
`⁅x, f m n⁆ = f ⁅x, m⁆ n + f m ⁅x, n⁆` for all `x, m, n` (see e.g, `lie_module_hom.map_lie₂`). -/
def lift_lie : (M →ₗ⁅R,L⁆ N →ₗ[R] P) ≃ₗ[R] (M ⊗[R] N →ₗ⁅R,L⁆ P) :=
(max_triv_linear_map_equiv_lie_module_hom.symm ≪≫ₗ
↑(max_triv_equiv (lift R L M N P))) ≪≫ₗ
max_triv_linear_map_equiv_lie_module_hom
@[simp] lemma coe_lift_lie_eq_lift_coe (f : M →ₗ⁅R,L⁆ N →ₗ[R] P) :
⇑(lift_lie R L M N P f) = lift R L M N P f :=
begin
suffices : (lift_lie R L M N P f : M ⊗[R] N →ₗ[R] P) = lift R L M N P f,
{ rw [← this, lie_module_hom.coe_to_linear_map], },
ext m n,
simp only [lift_lie, linear_equiv.trans_apply, lie_module_equiv.coe_to_linear_equiv,
coe_linear_map_max_triv_linear_map_equiv_lie_module_hom, coe_max_triv_equiv_apply,
coe_linear_map_max_triv_linear_map_equiv_lie_module_hom_symm],
end
lemma lift_lie_apply (f : M →ₗ⁅R,L⁆ N →ₗ[R] P) (m : M) (n : N) :
lift_lie R L M N P f (m ⊗ₜ n) = f m n :=
by simp only [coe_lift_lie_eq_lift_coe, lie_module_hom.coe_to_linear_map, lift_apply]
variables {R L M N P Q}
/-- A pair of Lie module morphisms `f : M → P` and `g : N → Q`, induce a Lie module morphism:
`M ⊗ N → P ⊗ Q`. -/
def map (f : M →ₗ⁅R,L⁆ P) (g : N →ₗ⁅R,L⁆ Q) : M ⊗[R] N →ₗ⁅R,L⁆ P ⊗[R] Q :=
{ map_lie' := λ x t, by
{ simp only [linear_map.to_fun_eq_coe],
apply t.induction_on,
{ simp only [linear_map.map_zero, lie_zero], },
{ intros m n, simp only [lie_module_hom.coe_to_linear_map, lie_tmul_right,
lie_module_hom.map_lie, map_tmul, linear_map.map_add], },
{ intros t₁ t₂ ht₁ ht₂, simp only [ht₁, ht₂, lie_add, linear_map.map_add], }, },
.. map (f : M →ₗ[R] P) (g : N →ₗ[R] Q), }
@[simp] lemma coe_linear_map_map (f : M →ₗ⁅R,L⁆ P) (g : N →ₗ⁅R,L⁆ Q) :
(map f g : M ⊗[R] N →ₗ[R] P ⊗[R] Q) = tensor_product.map (f : M →ₗ[R] P) (g : N →ₗ[R] Q) :=
rfl
@[simp] lemma map_tmul (f : M →ₗ⁅R,L⁆ P) (g : N →ₗ⁅R,L⁆ Q) (m : M) (n : N) :
map f g (m ⊗ₜ n) = (f m) ⊗ₜ (g n) :=
map_tmul f g m n
/-- Given Lie submodules `M' ⊆ M` and `N' ⊆ N`, this is the natural map: `M' ⊗ N' → M ⊗ N`. -/
def map_incl (M' : lie_submodule R L M) (N' : lie_submodule R L N) :
M' ⊗[R] N' →ₗ⁅R,L⁆ M ⊗[R] N :=
map M'.incl N'.incl
@[simp] lemma map_incl_def (M' : lie_submodule R L M) (N' : lie_submodule R L N) :
map_incl M' N' = map M'.incl N'.incl :=
rfl
end lie_module
end tensor_product
namespace lie_module
open_locale tensor_product
variables (R) (L : Type v) (M : Type w)
variables [lie_ring L] [lie_algebra R L]
variables [add_comm_group M] [module R M] [lie_ring_module L M] [lie_module R L M]
/-- The action of the Lie algebra on one of its modules, regarded as a morphism of Lie modules. -/
def to_module_hom : L ⊗[R] M →ₗ⁅R,L⁆ M :=
tensor_product.lie_module.lift_lie R L L M M
{ map_lie' := λ x m, by { ext n, simp [lie_ring.of_associative_ring_bracket], },
..(to_endomorphism R L M : L →ₗ[R] M →ₗ[R] M), }
@[simp] lemma to_module_hom_apply (x : L) (m : M) :
to_module_hom R L M (x ⊗ₜ m) = ⁅x, m⁆ :=
by simp only [to_module_hom, tensor_product.lie_module.lift_lie_apply, to_endomorphism_apply_apply,
lie_hom.coe_to_linear_map, lie_module_hom.coe_mk, linear_map.to_fun_eq_coe]
end lie_module
namespace lie_submodule
open_locale tensor_product
open tensor_product.lie_module
open lie_module
variables {L : Type v} {M : Type w}
variables [lie_ring L] [lie_algebra R L]
variables [add_comm_group M] [module R M] [lie_ring_module L M] [lie_module R L M]
variables (I : lie_ideal R L) (N : lie_submodule R L M)
/-- A useful alternative characterisation of Lie ideal operations on Lie submodules.
Given a Lie ideal `I ⊆ L` and a Lie submodule `N ⊆ M`, by tensoring the inclusion maps and then
applying the action of `L` on `M`, we obtain morphism of Lie modules `f : I ⊗ N → L ⊗ M → M`.
This lemma states that `⁅I, N⁆ = range f`. -/
lemma lie_ideal_oper_eq_tensor_map_range :
⁅I, N⁆ = ((to_module_hom R L M).comp (map_incl I N : ↥I ⊗ ↥N →ₗ⁅R,L⁆ L ⊗ M)).range :=
begin
rw [← coe_to_submodule_eq_iff, lie_ideal_oper_eq_linear_span, lie_module_hom.coe_submodule_range,
lie_module_hom.coe_linear_map_comp, linear_map.range_comp, map_incl_def, coe_linear_map_map,
tensor_product.map_range_eq_span_tmul, submodule.map_span],
congr, ext m, split,
{ rintros ⟨⟨x, hx⟩, ⟨n, hn⟩, rfl⟩, use x ⊗ₜ n, split,
{ use [⟨x, hx⟩, ⟨n, hn⟩], simp, },
{ simp, }, },
{ rintros ⟨t, ⟨⟨x, hx⟩, ⟨n, hn⟩, rfl⟩, h⟩, rw ← h, use [⟨x, hx⟩, ⟨n, hn⟩], simp, },
end
end lie_submodule
|
6c19bb8a76beb8c2233381465f20e3b2d21f5f2f
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/tactic/doc_commands.lean
|
3f493a4a83e64d102f8cce5e301ac11bfa18d390
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 16,402
|
lean
|
/-
Copyright (c) 2020 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Robert Y. Lewis
-/
/-!
# Documentation commands
We generate html documentation from mathlib. It is convenient to collect lists of tactics, commands,
notes, etc. To facilitate this, we declare these documentation entries in the library
using special commands.
* `library_note` adds a note describing a certain feature or design decision. These can be
referenced in doc strings with the text `note [name of note]`.
* `add_tactic_doc` adds an entry documenting an interactive tactic, command, hole command, or
attribute.
Since these commands are used in files imported by `tactic.core`, this file has no imports.
## Implementation details
`library_note note_id note_msg` creates a declaration `` `library_note.i `` for some `i`.
This declaration is a pair of strings `note_id` and `note_msg`, and it gets tagged with the
`library_note` attribute.
Similarly, `add_tactic_doc` creates a declaration `` `tactic_doc.i `` that stores the provided
information.
-/
/-- A rudimentary hash function on strings. -/
def string.hash (s : string) : ℕ :=
s.fold 1 (λ h c, (33*h + c.val) % unsigned_sz)
/-- Get the last component of a name, and convert it to a string. -/
meta def name.last : name → string
| (name.mk_string s _) := s
| (name.mk_numeral n _) := repr n
| anonymous := "[anonymous]"
open tactic
/--
`copy_doc_string fr to` copies the docstring from the declaration named `fr`
to each declaration named in the list `to`. -/
meta def tactic.copy_doc_string (fr : name) (to : list name) : tactic unit :=
do fr_ds ← doc_string fr,
to.mmap' $ λ tgt, add_doc_string tgt fr_ds
open lean lean.parser interactive
/--
`copy_doc_string source → target_1 target_2 ... target_n` copies the doc string of the
declaration named `source` to each of `target_1`, `target_2`, ..., `target_n`.
-/
@[user_command] meta def copy_doc_string_cmd
(_ : parse (tk "copy_doc_string")) : parser unit :=
do fr ← parser.ident,
tk "->",
to ← parser.many parser.ident,
expr.const fr _ ← resolve_name fr,
to ← parser.of_tactic (to.mmap $ λ n, expr.const_name <$> resolve_name n),
tactic.copy_doc_string fr to
/-! ### The `library_note` command -/
/-- A user attribute `library_note` for tagging decls of type `string × string` for use in note
output. -/
@[user_attribute] meta def library_note_attr : user_attribute :=
{ name := `library_note,
descr := "Notes about library features to be included in documentation",
parser := failed }
/--
`mk_reflected_definition name val` constructs a definition declaration by reflection.
Example: ``mk_reflected_definition `foo 17`` constructs the definition
declaration corresponding to `def foo : ℕ := 17`
-/
meta def mk_reflected_definition (decl_name : name) {type} [reflected _ type]
(body : type) [reflected _ body] : declaration :=
mk_definition decl_name (reflect type).collect_univ_params (reflect type) (reflect body)
/--
If `note_name` and `note` are strings, `add_library_note note_name note` adds a declaration named
`library_note.<note_name>` with `note` as the docstring and tags it with the `library_note`
attribute.
-/
meta def tactic.add_library_note (note_name note : string) : tactic unit :=
do let decl_name := `library_note <.> note_name,
add_decl $ mk_reflected_definition decl_name (),
add_doc_string decl_name note,
library_note_attr.set decl_name () tt none
open tactic
/--
A command to add library notes. Syntax:
```
/--
note message
-/
library_note "note id"
```
-/
@[user_command] meta def library_note (mi : interactive.decl_meta_info)
(_ : parse (tk "library_note")) : parser unit := do
note_name ← parser.pexpr,
note_name ← eval_pexpr string note_name,
some doc_string ← pure mi.doc_string | fail "library_note requires a doc string",
add_library_note note_name doc_string
/-- Collects all notes in the current environment.
Returns a list of pairs `(note_id, note_content)` -/
meta def tactic.get_library_notes : tactic (list (string × string)) :=
attribute.get_instances `library_note >>=
list.mmap (λ dcl, prod.mk dcl.last <$> doc_string dcl)
/-! ### The `add_tactic_doc_entry` command -/
/-- The categories of tactic doc entry. -/
@[derive [decidable_eq, has_reflect]]
inductive doc_category
| tactic | cmd | hole_cmd | attr
/-- Format a `doc_category` -/
meta def doc_category.to_string : doc_category → string
| doc_category.tactic := "tactic"
| doc_category.cmd := "command"
| doc_category.hole_cmd := "hole_command"
| doc_category.attr := "attribute"
meta instance : has_to_format doc_category := ⟨↑doc_category.to_string⟩
/-- The information used to generate a tactic doc entry -/
@[derive has_reflect]
structure tactic_doc_entry :=
(name : string)
(category : doc_category)
(decl_names : list _root_.name)
(tags : list string := [])
(inherit_description_from : option _root_.name := none)
/-- Turns a `tactic_doc_entry` into a JSON representation. -/
meta def tactic_doc_entry.to_json (d : tactic_doc_entry) (desc : string) : json :=
json.object [
("name", d.name),
("category", d.category.to_string),
("decl_names", d.decl_names.map (json.of_string ∘ to_string)),
("tags", d.tags.map json.of_string),
("description", desc)
]
meta instance tactic_doc_entry.has_to_string : has_to_string (tactic_doc_entry × string) :=
⟨λ ⟨doc, desc⟩, json.unparse (doc.to_json desc)⟩
/-- A user attribute `tactic_doc` for tagging decls of type `tactic_doc_entry`
for use in doc output -/
@[user_attribute] meta def tactic_doc_entry_attr : user_attribute :=
{ name := `tactic_doc,
descr := "Information about a tactic to be included in documentation",
parser := failed }
/-- Collects everything in the environment tagged with the attribute `tactic_doc`. -/
meta def tactic.get_tactic_doc_entries : tactic (list (tactic_doc_entry × string)) :=
attribute.get_instances `tactic_doc >>=
list.mmap (λ dcl, prod.mk <$> (mk_const dcl >>= eval_expr tactic_doc_entry) <*> doc_string dcl)
/-- `add_tactic_doc tde` adds a declaration to the environment
with `tde` as its body and tags it with the `tactic_doc`
attribute. If `tde.decl_names` has exactly one entry `` `decl`` and
if `tde.description` is the empty string, `add_tactic_doc` uses the doc
string of `decl` as the description. -/
meta def tactic.add_tactic_doc (tde : tactic_doc_entry) (doc : option string) : tactic unit :=
do desc ← doc <|> (do
inh_id ← match tde.inherit_description_from, tde.decl_names with
| some inh_id, _ := pure inh_id
| none, [inh_id] := pure inh_id
| none, _ := fail "A tactic doc entry must either:
1. have a description written as a doc-string for the `add_tactic_doc` invocation, or
2. have a single declaration in the `decl_names` field, to inherit a description from, or
3. explicitly indicate the declaration to inherit the description from using
`inherit_description_from`."
end,
doc_string inh_id <|> fail (to_string inh_id ++ " has no doc string")),
let decl_name := `tactic_doc <.> tde.category.to_string <.> tde.name,
add_decl $ mk_definition decl_name [] `(tactic_doc_entry) (reflect tde),
add_doc_string decl_name desc,
tactic_doc_entry_attr.set decl_name () tt none
/--
A command used to add documentation for a tactic, command, hole command, or attribute.
Usage: after defining an interactive tactic, command, or attribute,
add its documentation as follows.
```lean
/--
describe what the command does here
-/
add_tactic_doc
{ name := "display name of the tactic",
category := cat,
decl_names := [`dcl_1, `dcl_2],
tags := ["tag_1", "tag_2"] }
```
The argument to `add_tactic_doc` is a structure of type `tactic_doc_entry`.
* `name` refers to the display name of the tactic; it is used as the header of the doc entry.
* `cat` refers to the category of doc entry.
Options: `doc_category.tactic`, `doc_category.cmd`, `doc_category.hole_cmd`, `doc_category.attr`
* `decl_names` is a list of the declarations associated with this doc. For instance,
the entry for `linarith` would set ``decl_names := [`tactic.interactive.linarith]``.
Some entries may cover multiple declarations.
It is only necessary to list the interactive versions of tactics.
* `tags` is an optional list of strings used to categorize entries.
* The doc string is the body of the entry. It can be formatted with markdown.
What you are reading now is the description of `add_tactic_doc`.
If only one related declaration is listed in `decl_names` and if this
invocation of `add_tactic_doc` does not have a doc string, the doc string of
that declaration will become the body of the tactic doc entry. If there are
multiple declarations, you can select the one to be used by passing a name to
the `inherit_description_from` field.
If you prefer a tactic to have a doc string that is different then the doc entry,
you should write the doc entry as a doc string for the `add_tactic_doc` invocation.
Note that providing a badly formed `tactic_doc_entry` to the command can result in strange error
messages.
-/
@[user_command] meta def add_tactic_doc_command (mi : interactive.decl_meta_info)
(_ : parse $ tk "add_tactic_doc") : parser unit := do
pe ← parser.pexpr,
e ← eval_pexpr tactic_doc_entry pe,
tactic.add_tactic_doc e mi.doc_string .
/--
At various places in mathlib, we leave implementation notes that are referenced from many other
files. To keep track of these notes, we use the command `library_note`. This makes it easy to
retrieve a list of all notes, e.g. for documentation output.
These notes can be referenced in mathlib with the syntax `Note [note id]`.
Often, these references will be made in code comments (`--`) that won't be displayed in docs.
If such a reference is made in a doc string or module doc, it will be linked to the corresponding
note in the doc display.
Syntax:
```
/--
note message
-/
library_note "note id"
```
An example from `meta.expr`:
```
/--
Some declarations work with open expressions, i.e. an expr that has free variables.
Terms will free variables are not well-typed, and one should not use them in tactics like
`infer_type` or `unify`. You can still do syntactic analysis/manipulation on them.
The reason for working with open types is for performance: instantiating variables requires
iterating through the expression. In one performance test `pi_binders` was more than 6x
quicker than `mk_local_pis` (when applied to the type of all imported declarations 100x).
-/
library_note "open expressions"
```
This note can be referenced near a usage of `pi_binders`:
```
-- See Note [open expressions]
/-- behavior of f -/
def f := pi_binders ...
```
-/
add_tactic_doc
{ name := "library_note",
category := doc_category.cmd,
decl_names := [`library_note, `tactic.add_library_note],
tags := ["documentation"],
inherit_description_from := `library_note }
add_tactic_doc
{ name := "add_tactic_doc",
category := doc_category.cmd,
decl_names := [`add_tactic_doc_command, `tactic.add_tactic_doc],
tags := ["documentation"],
inherit_description_from := `add_tactic_doc_command }
add_tactic_doc
{ name := "copy_doc_string",
category := doc_category.cmd,
decl_names := [`copy_doc_string_cmd, `tactic.copy_doc_string],
tags := ["documentation"],
inherit_description_from := `copy_doc_string_cmd }
-- add docs to core tactics
/--
The congruence closure tactic `cc` tries to solve the goal by chaining
equalities from context and applying congruence (i.e. if `a = b`, then `f a = f b`).
It is a finishing tactic, i.e. it is meant to close
the current goal, not to make some inconclusive progress.
A mostly trivial example would be:
```lean
example (a b c : ℕ) (f : ℕ → ℕ) (h: a = b) (h' : b = c) : f a = f c := by cc
```
As an example requiring some thinking to do by hand, consider:
```lean
example (f : ℕ → ℕ) (x : ℕ)
(H1 : f (f (f x)) = x) (H2 : f (f (f (f (f x)))) = x) :
f x = x :=
by cc
```
The tactic works by building an equality matching graph. It's a graph where
the vertices are terms and they are linked by edges if they are known to
be equal. Once you've added all the equalities in your context, you take
the transitive closure of the graph and, for each connected component
(i.e. equivalence class) you can elect a term that will represent the
whole class and store proofs that the other elements are equal to it.
You then take the transitive closure of these equalities under the
congruence lemmas.
The `cc` implementation in Lean does a few more tricks: for example it
derives `a=b` from `nat.succ a = nat.succ b`, and `nat.succ a !=
nat.zero` for any `a`.
* The starting reference point is Nelson, Oppen, [Fast decision procedures based on congruence
closure](http://www.cs.colorado.edu/~bec/courses/csci5535-s09/reading/nelson-oppen-congruence.pdf),
Journal of the ACM (1980)
* The congruence lemmas for dependent type theory as used in Lean are described in
[Congruence closure in intensional type theory](https://leanprover.github.io/papers/congr.pdf)
(de Moura, Selsam IJCAR 2016).
-/
add_tactic_doc
{ name := "cc (congruence closure)",
category := doc_category.tactic,
decl_names := [`tactic.interactive.cc],
tags := ["core", "finishing"] }
/--
`conv {...}` allows the user to perform targeted rewriting on a goal or hypothesis,
by focusing on particular subexpressions.
See <https://leanprover-community.github.io/extras/conv.html> for more details.
Inside `conv` blocks, mathlib currently additionally provides
* `erw`,
* `ring`, `ring2` and `ring_exp`,
* `norm_num`,
* `norm_cast`,
* `apply_congr`, and
* `conv` (within another `conv`).
`apply_congr` applies congruence lemmas to step further inside expressions,
and sometimes gives better results than the automatically generated
congruence lemmas used by `congr`.
Using `conv` inside a `conv` block allows the user to return to the previous
state of the outer `conv` block after it is finished. Thus you can continue
editing an expression without having to start a new `conv` block and re-scoping
everything. For example:
```lean
example (a b c d : ℕ) (h₁ : b = c) (h₂ : a + c = a + d) : a + b = a + d :=
by conv
{ to_lhs,
conv
{ congr, skip,
rw h₁ },
rw h₂, }
```
Without `conv`, the above example would need to be proved using two successive
`conv` blocks, each beginning with `to_lhs`.
Also, as a shorthand, `conv_lhs` and `conv_rhs` are provided, so that
```lean
example : 0 + 0 = 0 :=
begin
conv_lhs { simp }
end
```
just means
```lean
example : 0 + 0 = 0 :=
begin
conv { to_lhs, simp }
end
```
and likewise for `to_rhs`.
-/
add_tactic_doc
{ name := "conv",
category := doc_category.tactic,
decl_names := [`tactic.interactive.conv],
tags := ["core"] }
add_tactic_doc
{ name := "simp",
category := doc_category.tactic,
decl_names := [`tactic.interactive.simp],
tags := ["core", "simplification"] }
/--
Accepts terms with the type `component tactic_state string` or `html empty` and
renders them interactively.
Requires a compatible version of the vscode extension to view the resulting widget.
### Example:
```lean
/-- A simple counter that can be incremented or decremented with some buttons. -/
meta def counter_widget {π α : Type} : component π α :=
component.ignore_props $ component.mk_simple int int 0 (λ _ x y, (x + y, none)) (λ _ s,
h "div" [] [
button "+" (1 : int),
html.of_string $ to_string $ s,
button "-" (-1)
]
)
#html counter_widget
```
-/
add_tactic_doc
{ name := "#html",
category := doc_category.cmd,
decl_names := [`show_widget_cmd],
tags := ["core", "widgets"] }
/--
The `add_decl_doc` command is used to add a doc string to an existing declaration.
```lean
def foo := 5
/--
Doc string for foo.
-/
add_decl_doc foo
```
-/
@[user_command] meta def add_decl_doc_command (mi : interactive.decl_meta_info)
(_ : parse $ tk "add_decl_doc") : parser unit := do
n ← parser.ident,
n ← resolve_constant n,
some doc ← pure mi.doc_string | fail "add_decl_doc requires a doc string",
add_doc_string n doc
add_tactic_doc
{ name := "add_decl_doc",
category := doc_category.cmd,
decl_names := [``add_decl_doc_command],
tags := ["documentation"] }
|
2b1bb923596a79166e095403c13ab3c2f3a0f1ad
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/calc1.lean
|
ba8eb878efc7a9dc39113e95ffa94db1d78b9c1f
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 1,639
|
lean
|
prelude constant A : Type.{1}
definition bool : Type.{1} := Type.{0}
constant eq : A → A → bool
infixl ` = `:50 := eq
axiom subst (P : A → bool) (a b : A) (H1 : a = b) (H2 : P a) : P b
axiom eq_trans (a b c : A) (H1 : a = b) (H2 : b = c) : a = c
axiom eq_refl (a : A) : a = a
constant le : A → A → bool
infixl ` ≤ `:50 := le
axiom le_trans (a b c : A) (H1 : a ≤ b) (H2 : b ≤ c) : a ≤ c
axiom le_refl (a : A) : a ≤ a
axiom eq_le_trans (a b c : A) (H1 : a = b) (H2 : b ≤ c) : a ≤ c
axiom le_eq_trans (a b c : A) (H1 : a ≤ b) (H2 : b = c) : a ≤ c
attribute subst [subst]
attribute eq_refl [refl]
attribute le_refl [refl]
attribute eq_trans [trans]
attribute le_trans [trans]
attribute eq_le_trans [trans]
attribute le_eq_trans [trans]
constants a b c d e f : A
axiom H1 : a = b
axiom H2 : b ≤ c
axiom H3 : c ≤ d
axiom H4 : d = e
check calc a = b : H1
... ≤ c : H2
... ≤ d : H3
... = e : H4
constant lt : A → A → bool
infixl ` < `:50 := lt
axiom lt_trans (a b c : A) (H1 : a < b) (H2 : b < c) : a < c
axiom le_lt_trans (a b c : A) (H1 : a ≤ b) (H2 : b < c) : a < c
axiom lt_le_trans (a b c : A) (H1 : a < b) (H2 : b ≤ c) : a < c
axiom H5 : c < d
check calc b ≤ c : H2
... < d : H5 -- Error le_lt_trans was not registered yet
attribute le_lt_trans [trans]
check calc b ≤ c : H2
... < d : H5
constant le2 : A → A → bool
infixl ` ≤ `:50 := le2
constant le2_trans (a b c : A) (H1 : le2 a b) (H2 : le2 b c) : le2 a c
attribute le2_trans [trans]
print raw calc b ≤ c : H2
... ≤ d : H3
... ≤ e : H4
|
cdcd027405061f98db8c32e1bce8eba42e50ecb1
|
dd0f5513e11c52db157d2fcc8456d9401a6cd9da
|
/09_Type_Classes.org.31.lean
|
8076a443123d996ec6c8c415096a674ef46188f9
|
[] |
no_license
|
cjmazey/lean-tutorial
|
ba559a49f82aa6c5848b9bf17b7389bf7f4ba645
|
381f61c9fcac56d01d959ae0fa6e376f2c4e3b34
|
refs/heads/master
| 1,610,286,098,832
| 1,447,124,923,000
| 1,447,124,923,000
| 43,082,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,398
|
lean
|
import standard
import data.nat
open nat decidable
definition ball (n : nat) (P : nat → Prop) : Prop :=
∀ x, x < n → P x
definition ball_zero (P : nat → Prop) : ball zero P :=
λ x Hlt, absurd Hlt !not_lt_zero
variables {n : nat} {P : nat → Prop}
definition ball_of_ball_succ (H : ball (succ n) P) : ball n P :=
λ x Hlt, H x (lt.step Hlt)
definition ball_succ_of_ball (H₁ : ball n P) (H₂ : P n) : ball (succ n) P :=
λ (x : nat) (Hlt : x < succ n), or.elim (eq_or_lt_of_le (le_of_lt_succ Hlt))
(λ he : x = n, eq.rec_on (eq.rec_on he rfl) H₂)
(λ hlt : x < n, H₁ x hlt)
definition not_ball_of_not (H₁ : ¬ P n) : ¬ ball (succ n) P :=
λ (H : ball (succ n) P), absurd (H n (lt.base n)) H₁
definition not_ball_succ_of_not_ball (H₁ : ¬ ball n P) : ¬ ball (succ n) P :=
λ (H : ball (succ n) P), absurd (ball_of_ball_succ H) H₁
definition dec_ball [instance] (H : decidable_pred P) : Π (n : nat), decidable (ball n P)
| dec_ball 0 := inl (ball_zero P)
| dec_ball (a+1) :=
match dec_ball a with
| inl iH :=
match H a with
| inl Pa := inl (ball_succ_of_ball iH Pa)
| inr nPa := inr (not_ball_of_not nPa)
end
| inr niH := inr (not_ball_succ_of_not_ball niH)
end
-- BEGIN
open bool
definition is_constant_range (f : nat → nat) (n : nat) : bool :=
if ∀ i, i < n → f i = f 0 then tt else ff
example : is_constant_range (λ i, zero) 10 = tt :=
rfl
-- END
|
cc5377ba6ddcec1554ac82fb115c5c5b15c9bc69
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/pequiv.lean
|
7b763acba996971d9875f0e98b2570a98f0cc926
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 11,824
|
lean
|
/-
Copyright (c) 2019 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.set.lattice
import Mathlib.PostPort
universes u v l u_1 w x u_2 u_3
namespace Mathlib
/-- A `pequiv` is a partial equivalence, a representation of a bijection between a subset
of `α` and a subset of `β` -/
structure pequiv (α : Type u) (β : Type v)
where
to_fun : α → Option β
inv_fun : β → Option α
inv : ∀ (a : α) (b : β), a ∈ inv_fun b ↔ b ∈ to_fun a
infixr:25 " ≃. " => Mathlib.pequiv
namespace pequiv
protected instance has_coe_to_fun {α : Type u} {β : Type v} : has_coe_to_fun (α ≃. β) :=
has_coe_to_fun.mk (fun (x : α ≃. β) => α → Option β) to_fun
@[simp] theorem coe_mk_apply {α : Type u} {β : Type v} (f₁ : α → Option β) (f₂ : β → Option α) (h : ∀ (a : α) (b : β), a ∈ f₂ b ↔ b ∈ f₁ a) (x : α) : coe_fn (mk f₁ f₂ h) x = f₁ x :=
rfl
theorem ext {α : Type u} {β : Type v} {f : α ≃. β} {g : α ≃. β} (h : ∀ (x : α), coe_fn f x = coe_fn g x) : f = g := sorry
theorem ext_iff {α : Type u} {β : Type v} {f : α ≃. β} {g : α ≃. β} : f = g ↔ ∀ (x : α), coe_fn f x = coe_fn g x :=
{ mp := congr_fun ∘ congr_arg fun {f : α ≃. β} (x : α) => coe_fn f x, mpr := ext }
protected def refl (α : Type u_1) : α ≃. α :=
mk some some sorry
protected def symm {α : Type u} {β : Type v} (f : α ≃. β) : β ≃. α :=
mk (inv_fun f) (to_fun f) sorry
theorem mem_iff_mem {α : Type u} {β : Type v} (f : α ≃. β) {a : α} {b : β} : a ∈ coe_fn (pequiv.symm f) b ↔ b ∈ coe_fn f a :=
inv f
theorem eq_some_iff {α : Type u} {β : Type v} (f : α ≃. β) {a : α} {b : β} : coe_fn (pequiv.symm f) b = some a ↔ coe_fn f a = some b :=
inv f
protected def trans {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) (g : β ≃. γ) : α ≃. γ :=
mk (fun (a : α) => option.bind (coe_fn f a) ⇑g) (fun (a : γ) => option.bind (coe_fn (pequiv.symm g) a) ⇑(pequiv.symm f))
sorry
@[simp] theorem refl_apply {α : Type u} (a : α) : coe_fn (pequiv.refl α) a = some a :=
rfl
@[simp] theorem symm_refl {α : Type u} : pequiv.symm (pequiv.refl α) = pequiv.refl α :=
rfl
@[simp] theorem symm_symm {α : Type u} {β : Type v} (f : α ≃. β) : pequiv.symm (pequiv.symm f) = f := sorry
theorem symm_injective {α : Type u} {β : Type v} : function.injective pequiv.symm :=
function.left_inverse.injective symm_symm
theorem trans_assoc {α : Type u} {β : Type v} {γ : Type w} {δ : Type x} (f : α ≃. β) (g : β ≃. γ) (h : γ ≃. δ) : pequiv.trans (pequiv.trans f g) h = pequiv.trans f (pequiv.trans g h) :=
ext fun (_x : α) => option.bind_assoc (coe_fn f _x) ⇑g ⇑h
theorem mem_trans {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) (g : β ≃. γ) (a : α) (c : γ) : c ∈ coe_fn (pequiv.trans f g) a ↔ ∃ (b : β), b ∈ coe_fn f a ∧ c ∈ coe_fn g b :=
option.bind_eq_some'
theorem trans_eq_some {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) (g : β ≃. γ) (a : α) (c : γ) : coe_fn (pequiv.trans f g) a = some c ↔ ∃ (b : β), coe_fn f a = some b ∧ coe_fn g b = some c :=
option.bind_eq_some'
theorem trans_eq_none {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) (g : β ≃. γ) (a : α) : coe_fn (pequiv.trans f g) a = none ↔ ∀ (b : β) (c : γ), ¬b ∈ coe_fn f a ∨ ¬c ∈ coe_fn g b := sorry
@[simp] theorem refl_trans {α : Type u} {β : Type v} (f : α ≃. β) : pequiv.trans (pequiv.refl α) f = f :=
ext fun (x : α) => option.ext fun (a : β) => id (iff.refl (a ∈ coe_fn f x))
@[simp] theorem trans_refl {α : Type u} {β : Type v} (f : α ≃. β) : pequiv.trans f (pequiv.refl β) = f := sorry
protected theorem inj {α : Type u} {β : Type v} (f : α ≃. β) {a₁ : α} {a₂ : α} {b : β} (h₁ : b ∈ coe_fn f a₁) (h₂ : b ∈ coe_fn f a₂) : a₁ = a₂ := sorry
theorem injective_of_forall_ne_is_some {α : Type u} {β : Type v} (f : α ≃. β) (a₂ : α) (h : ∀ (a₁ : α), a₁ ≠ a₂ → ↥(option.is_some (coe_fn f a₁))) : function.injective ⇑f := sorry
theorem injective_of_forall_is_some {α : Type u} {β : Type v} {f : α ≃. β} (h : ∀ (a : α), ↥(option.is_some (coe_fn f a))) : function.injective ⇑f := sorry
def of_set {α : Type u} (s : set α) [decidable_pred s] : α ≃. α :=
mk (fun (a : α) => ite (a ∈ s) (some a) none) (fun (a : α) => ite (a ∈ s) (some a) none) sorry
theorem mem_of_set_self_iff {α : Type u} {s : set α} [decidable_pred s] {a : α} : a ∈ coe_fn (of_set s) a ↔ a ∈ s := sorry
theorem mem_of_set_iff {α : Type u} {s : set α} [decidable_pred s] {a : α} {b : α} : a ∈ coe_fn (of_set s) b ↔ a = b ∧ a ∈ s := sorry
@[simp] theorem of_set_eq_some_iff {α : Type u} {s : set α} {h : decidable_pred s} {a : α} {b : α} : coe_fn (of_set s) b = some a ↔ a = b ∧ a ∈ s :=
mem_of_set_iff
@[simp] theorem of_set_eq_some_self_iff {α : Type u} {s : set α} {h : decidable_pred s} {a : α} : coe_fn (of_set s) a = some a ↔ a ∈ s :=
mem_of_set_self_iff
@[simp] theorem of_set_symm {α : Type u} (s : set α) [decidable_pred s] : pequiv.symm (of_set s) = of_set s :=
rfl
@[simp] theorem of_set_univ {α : Type u} : of_set set.univ = pequiv.refl α :=
rfl
@[simp] theorem of_set_eq_refl {α : Type u} {s : set α} [decidable_pred s] : of_set s = pequiv.refl α ↔ s = set.univ := sorry
theorem symm_trans_rev {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) (g : β ≃. γ) : pequiv.symm (pequiv.trans f g) = pequiv.trans (pequiv.symm g) (pequiv.symm f) :=
rfl
theorem trans_symm {α : Type u} {β : Type v} (f : α ≃. β) : pequiv.trans f (pequiv.symm f) = of_set (set_of fun (a : α) => ↥(option.is_some (coe_fn f a))) := sorry
theorem symm_trans {α : Type u} {β : Type v} (f : α ≃. β) : pequiv.trans (pequiv.symm f) f = of_set (set_of fun (b : β) => ↥(option.is_some (coe_fn (pequiv.symm f) b))) := sorry
theorem trans_symm_eq_iff_forall_is_some {α : Type u} {β : Type v} {f : α ≃. β} : pequiv.trans f (pequiv.symm f) = pequiv.refl α ↔ ∀ (a : α), ↥(option.is_some (coe_fn f a)) := sorry
protected instance has_bot {α : Type u} {β : Type v} : has_bot (α ≃. β) :=
has_bot.mk (mk (fun (_x : α) => none) (fun (_x : β) => none) sorry)
@[simp] theorem bot_apply {α : Type u} {β : Type v} (a : α) : coe_fn ⊥ a = none :=
rfl
@[simp] theorem symm_bot {α : Type u} {β : Type v} : pequiv.symm ⊥ = ⊥ :=
rfl
@[simp] theorem trans_bot {α : Type u} {β : Type v} {γ : Type w} (f : α ≃. β) : pequiv.trans f ⊥ = ⊥ := sorry
@[simp] theorem bot_trans {α : Type u} {β : Type v} {γ : Type w} (f : β ≃. γ) : pequiv.trans ⊥ f = ⊥ := sorry
theorem is_some_symm_get {α : Type u} {β : Type v} (f : α ≃. β) {a : α} (h : ↥(option.is_some (coe_fn f a))) : ↥(option.is_some (coe_fn (pequiv.symm f) (option.get h))) := sorry
def single {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] (a : α) (b : β) : α ≃. β :=
mk (fun (x : α) => ite (x = a) (some b) none) (fun (x : β) => ite (x = b) (some a) none) sorry
theorem mem_single {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] (a : α) (b : β) : b ∈ coe_fn (single a b) a :=
if_pos rfl
theorem mem_single_iff {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] (a₁ : α) (a₂ : α) (b₁ : β) (b₂ : β) : b₁ ∈ coe_fn (single a₂ b₂) a₁ ↔ a₁ = a₂ ∧ b₁ = b₂ := sorry
@[simp] theorem symm_single {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] (a : α) (b : β) : pequiv.symm (single a b) = single b a :=
rfl
@[simp] theorem single_apply {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] (a : α) (b : β) : coe_fn (single a b) a = some b :=
if_pos rfl
theorem single_apply_of_ne {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] {a₁ : α} {a₂ : α} (h : a₁ ≠ a₂) (b : β) : coe_fn (single a₁ b) a₂ = none :=
if_neg (ne.symm h)
theorem single_trans_of_mem {α : Type u} {β : Type v} {γ : Type w} [DecidableEq α] [DecidableEq β] [DecidableEq γ] (a : α) {b : β} {c : γ} {f : β ≃. γ} (h : c ∈ coe_fn f b) : pequiv.trans (single a b) f = single a c := sorry
theorem trans_single_of_mem {α : Type u} {β : Type v} {γ : Type w} [DecidableEq α] [DecidableEq β] [DecidableEq γ] {a : α} {b : β} (c : γ) {f : α ≃. β} (h : b ∈ coe_fn f a) : pequiv.trans f (single b c) = single a c :=
symm_injective (single_trans_of_mem c (iff.mpr (mem_iff_mem f) h))
@[simp] theorem single_trans_single {α : Type u} {β : Type v} {γ : Type w} [DecidableEq α] [DecidableEq β] [DecidableEq γ] (a : α) (b : β) (c : γ) : pequiv.trans (single a b) (single b c) = single a c :=
single_trans_of_mem a (mem_single b c)
@[simp] theorem single_subsingleton_eq_refl {α : Type u} [DecidableEq α] [subsingleton α] (a : α) (b : α) : single a b = pequiv.refl α := sorry
theorem trans_single_of_eq_none {β : Type v} {γ : Type w} {δ : Type x} [DecidableEq β] [DecidableEq γ] {b : β} (c : γ) {f : δ ≃. β} (h : coe_fn (pequiv.symm f) b = none) : pequiv.trans f (single b c) = ⊥ := sorry
theorem single_trans_of_eq_none {α : Type u} {β : Type v} {δ : Type x} [DecidableEq α] [DecidableEq β] (a : α) {b : β} {f : β ≃. δ} (h : coe_fn f b = none) : pequiv.trans (single a b) f = ⊥ :=
symm_injective (trans_single_of_eq_none a h)
theorem single_trans_single_of_ne {α : Type u} {β : Type v} {γ : Type w} [DecidableEq α] [DecidableEq β] [DecidableEq γ] {b₁ : β} {b₂ : β} (h : b₁ ≠ b₂) (a : α) (c : γ) : pequiv.trans (single a b₁) (single b₂ c) = ⊥ :=
single_trans_of_eq_none a (single_apply_of_ne (ne.symm h) c)
protected instance partial_order {α : Type u} {β : Type v} : partial_order (α ≃. β) :=
partial_order.mk (fun (f g : α ≃. β) => ∀ (a : α) (b : β), b ∈ coe_fn f a → b ∈ coe_fn g a)
(preorder.lt._default fun (f g : α ≃. β) => ∀ (a : α) (b : β), b ∈ coe_fn f a → b ∈ coe_fn g a) sorry sorry sorry
theorem le_def {α : Type u} {β : Type v} {f : α ≃. β} {g : α ≃. β} : f ≤ g ↔ ∀ (a : α) (b : β), b ∈ coe_fn f a → b ∈ coe_fn g a :=
iff.rfl
protected instance order_bot {α : Type u} {β : Type v} : order_bot (α ≃. β) :=
order_bot.mk ⊥ partial_order.le partial_order.lt sorry sorry sorry sorry
protected instance semilattice_inf_bot {α : Type u} {β : Type v} [DecidableEq α] [DecidableEq β] : semilattice_inf_bot (α ≃. β) :=
semilattice_inf_bot.mk order_bot.bot order_bot.le order_bot.lt sorry sorry sorry sorry
(fun (f g : α ≃. β) =>
mk (fun (a : α) => ite (coe_fn f a = coe_fn g a) (coe_fn f a) none)
(fun (b : β) => ite (coe_fn (pequiv.symm f) b = coe_fn (pequiv.symm g) b) (coe_fn (pequiv.symm f) b) none) sorry)
sorry sorry sorry
end pequiv
namespace equiv
def to_pequiv {α : Type u_1} {β : Type u_2} (f : α ≃ β) : α ≃. β :=
pequiv.mk (some ∘ ⇑f) (some ∘ ⇑(equiv.symm f)) sorry
@[simp] theorem to_pequiv_refl {α : Type u_1} : to_pequiv (equiv.refl α) = pequiv.refl α :=
rfl
theorem to_pequiv_trans {α : Type u_1} {β : Type u_2} {γ : Type u_3} (f : α ≃ β) (g : β ≃ γ) : to_pequiv (equiv.trans f g) = pequiv.trans (to_pequiv f) (to_pequiv g) :=
rfl
theorem to_pequiv_symm {α : Type u_1} {β : Type u_2} (f : α ≃ β) : to_pequiv (equiv.symm f) = pequiv.symm (to_pequiv f) :=
rfl
theorem to_pequiv_apply {α : Type u_1} {β : Type u_2} (f : α ≃ β) (x : α) : coe_fn (to_pequiv f) x = some (coe_fn f x) :=
rfl
|
6c27abf2bdaff070e0b73e4657fb074b97614b28
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/analysis/normed_space/spectrum.lean
|
220d6649b36d54f085d317ba98e636c45fa3c845
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 27,835
|
lean
|
/-
Copyright (c) 2021 Jireh Loreaux. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jireh Loreaux
-/
import algebra.algebra.spectrum
import analysis.special_functions.pow
import analysis.complex.liouville
import analysis.complex.polynomial
import analysis.analytic.radius_liminf
import topology.algebra.module.character_space
import analysis.normed_space.exponential
/-!
# The spectrum of elements in a complete normed algebra
This file contains the basic theory for the resolvent and spectrum of a Banach algebra.
## Main definitions
* `spectral_radius : ℝ≥0∞`: supremum of `‖k‖₊` for all `k ∈ spectrum 𝕜 a`
* `normed_ring.alg_equiv_complex_of_complete`: **Gelfand-Mazur theorem** For a complex
Banach division algebra, the natural `algebra_map ℂ A` is an algebra isomorphism whose inverse
is given by selecting the (unique) element of `spectrum ℂ a`
## Main statements
* `spectrum.is_open_resolvent_set`: the resolvent set is open.
* `spectrum.is_closed`: the spectrum is closed.
* `spectrum.subset_closed_ball_norm`: the spectrum is a subset of closed disk of radius
equal to the norm.
* `spectrum.is_compact`: the spectrum is compact.
* `spectrum.spectral_radius_le_nnnorm`: the spectral radius is bounded above by the norm.
* `spectrum.has_deriv_at_resolvent`: the resolvent function is differentiable on the resolvent set.
* `spectrum.pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius`: Gelfand's formula for the
spectral radius in Banach algebras over `ℂ`.
* `spectrum.nonempty`: the spectrum of any element in a complex Banach algebra is nonempty.
## TODO
* compute all derivatives of `resolvent a`.
-/
open_locale ennreal nnreal
/-- The *spectral radius* is the supremum of the `nnnorm` (`‖⬝‖₊`) of elements in the spectrum,
coerced into an element of `ℝ≥0∞`. Note that it is possible for `spectrum 𝕜 a = ∅`. In this
case, `spectral_radius a = 0`. It is also possible that `spectrum 𝕜 a` be unbounded (though
not for Banach algebras, see `spectrum.is_bounded`, below). In this case,
`spectral_radius a = ∞`. -/
noncomputable def spectral_radius (𝕜 : Type*) {A : Type*} [normed_field 𝕜] [ring A]
[algebra 𝕜 A] (a : A) : ℝ≥0∞ :=
⨆ k ∈ spectrum 𝕜 a, ‖k‖₊
variables {𝕜 : Type*} {A : Type*}
namespace spectrum
section spectrum_compact
open filter
variables [normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A]
local notation `σ` := spectrum 𝕜
local notation `ρ` := resolvent_set 𝕜
local notation `↑ₐ` := algebra_map 𝕜 A
@[simp] lemma spectral_radius.of_subsingleton [subsingleton A] (a : A) :
spectral_radius 𝕜 a = 0 :=
by simp [spectral_radius]
@[simp] lemma spectral_radius_zero : spectral_radius 𝕜 (0 : A) = 0 :=
by { nontriviality A, simp [spectral_radius] }
lemma mem_resolvent_set_of_spectral_radius_lt {a : A} {k : 𝕜} (h : spectral_radius 𝕜 a < ‖k‖₊) :
k ∈ ρ a :=
not_not.mp $ λ hn, h.not_le $ le_supr₂ k hn
variable [complete_space A]
lemma is_open_resolvent_set (a : A) : is_open (ρ a) :=
units.is_open.preimage ((continuous_algebra_map 𝕜 A).sub continuous_const)
protected lemma is_closed (a : A) : is_closed (σ a) :=
(is_open_resolvent_set a).is_closed_compl
lemma mem_resolvent_set_of_norm_lt_mul {a : A} {k : 𝕜} (h : ‖a‖ * ‖(1 : A)‖ < ‖k‖) :
k ∈ ρ a :=
begin
rw [resolvent_set, set.mem_set_of_eq, algebra.algebra_map_eq_smul_one],
nontriviality A,
have hk : k ≠ 0,
from ne_zero_of_norm_ne_zero ((mul_nonneg (norm_nonneg _) (norm_nonneg _)).trans_lt h).ne',
let ku := units.map (↑ₐ).to_monoid_hom (units.mk0 k hk),
rw [←inv_inv (‖(1 : A)‖), mul_inv_lt_iff (inv_pos.2 $ norm_pos_iff.2 (one_ne_zero : (1 : A) ≠ 0))]
at h,
have hku : ‖-a‖ < ‖(↑ku⁻¹:A)‖⁻¹ := by simpa [ku, norm_algebra_map] using h,
simpa [ku, sub_eq_add_neg, algebra.algebra_map_eq_smul_one] using (ku.add (-a) hku).is_unit,
end
lemma mem_resolvent_set_of_norm_lt [norm_one_class A] {a : A} {k : 𝕜} (h : ‖a‖ < ‖k‖) :
k ∈ ρ a :=
mem_resolvent_set_of_norm_lt_mul (by rwa [norm_one, mul_one])
lemma norm_le_norm_mul_of_mem {a : A} {k : 𝕜} (hk : k ∈ σ a) :
‖k‖ ≤ ‖a‖ * ‖(1 : A)‖ :=
le_of_not_lt $ mt mem_resolvent_set_of_norm_lt_mul hk
lemma norm_le_norm_of_mem [norm_one_class A] {a : A} {k : 𝕜} (hk : k ∈ σ a) :
‖k‖ ≤ ‖a‖ :=
le_of_not_lt $ mt mem_resolvent_set_of_norm_lt hk
lemma subset_closed_ball_norm_mul (a : A) :
σ a ⊆ metric.closed_ball (0 : 𝕜) (‖a‖ * ‖(1 : A)‖) :=
λ k hk, by simp [norm_le_norm_mul_of_mem hk]
lemma subset_closed_ball_norm [norm_one_class A] (a : A) :
σ a ⊆ metric.closed_ball (0 : 𝕜) (‖a‖) :=
λ k hk, by simp [norm_le_norm_of_mem hk]
lemma is_bounded (a : A) : metric.bounded (σ a) :=
(metric.bounded_iff_subset_ball 0).mpr ⟨‖a‖ * ‖(1 : A)‖, subset_closed_ball_norm_mul a⟩
protected theorem is_compact [proper_space 𝕜] (a : A) : is_compact (σ a) :=
metric.is_compact_of_is_closed_bounded (spectrum.is_closed a) (is_bounded a)
theorem spectral_radius_le_nnnorm [norm_one_class A] (a : A) :
spectral_radius 𝕜 a ≤ ‖a‖₊ :=
by { refine supr₂_le (λ k hk, _), exact_mod_cast norm_le_norm_of_mem hk }
lemma exists_nnnorm_eq_spectral_radius_of_nonempty [proper_space 𝕜] {a : A} (ha : (σ a).nonempty) :
∃ k ∈ σ a, (‖k‖₊ : ℝ≥0∞) = spectral_radius 𝕜 a :=
begin
obtain ⟨k, hk, h⟩ := (spectrum.is_compact a).exists_forall_ge ha continuous_nnnorm.continuous_on,
exact ⟨k, hk, le_antisymm (le_supr₂ k hk) (supr₂_le $ by exact_mod_cast h)⟩,
end
lemma spectral_radius_lt_of_forall_lt_of_nonempty [proper_space 𝕜] {a : A}
(ha : (σ a).nonempty) {r : ℝ≥0} (hr : ∀ k ∈ σ a, ‖k‖₊ < r) :
spectral_radius 𝕜 a < r :=
Sup_image.symm.trans_lt $ ((spectrum.is_compact a).Sup_lt_iff_of_continuous ha
(ennreal.continuous_coe.comp continuous_nnnorm).continuous_on (r : ℝ≥0∞)).mpr
(by exact_mod_cast hr)
open ennreal polynomial
variable (𝕜)
theorem spectral_radius_le_pow_nnnorm_pow_one_div (a : A) (n : ℕ) :
spectral_radius 𝕜 a ≤ (‖a ^ (n + 1)‖₊) ^ (1 / (n + 1) : ℝ) * (‖(1 : A)‖₊) ^ (1 / (n + 1) : ℝ) :=
begin
refine supr₂_le (λ k hk, _),
/- apply easy direction of the spectral mapping theorem for polynomials -/
have pow_mem : k ^ (n + 1) ∈ σ (a ^ (n + 1)),
by simpa only [one_mul, algebra.algebra_map_eq_smul_one, one_smul, aeval_monomial, one_mul,
eval_monomial] using subset_polynomial_aeval a (monomial (n + 1) (1 : 𝕜)) ⟨k, hk, rfl⟩,
/- power of the norm is bounded by norm of the power -/
have nnnorm_pow_le : (↑(‖k‖₊ ^ (n + 1)) : ℝ≥0∞) ≤ ‖a ^ (n + 1)‖₊ * ‖(1 : A)‖₊,
{ simpa only [real.to_nnreal_mul (norm_nonneg _), norm_to_nnreal, nnnorm_pow k (n + 1),
ennreal.coe_mul] using coe_mono (real.to_nnreal_mono (norm_le_norm_mul_of_mem pow_mem)) },
/- take (n + 1)ᵗʰ roots and clean up the left-hand side -/
have hn : 0 < ((n + 1 : ℕ) : ℝ), by exact_mod_cast nat.succ_pos',
convert monotone_rpow_of_nonneg (one_div_pos.mpr hn).le nnnorm_pow_le,
erw [coe_pow, ←rpow_nat_cast, ←rpow_mul, mul_one_div_cancel hn.ne', rpow_one],
rw [nat.cast_succ, ennreal.coe_mul_rpow],
end
theorem spectral_radius_le_liminf_pow_nnnorm_pow_one_div (a : A) :
spectral_radius 𝕜 a ≤ at_top.liminf (λ n : ℕ, (‖a ^ n‖₊ : ℝ≥0∞) ^ (1 / n : ℝ)) :=
begin
refine ennreal.le_of_forall_lt_one_mul_le (λ ε hε, _),
by_cases ε = 0,
{ simp only [h, zero_mul, zero_le'] },
have hε' : ε⁻¹ ≠ ∞,
from λ h', h (by simpa only [inv_inv, inv_top] using congr_arg (λ (x : ℝ≥0∞), x⁻¹) h'),
simp only [ennreal.mul_le_iff_le_inv h (hε.trans_le le_top).ne, mul_comm ε⁻¹,
liminf_eq_supr_infi_of_nat', ennreal.supr_mul, ennreal.infi_mul hε'],
rw [←ennreal.inv_lt_inv, inv_one] at hε,
obtain ⟨N, hN⟩ := eventually_at_top.mp
(ennreal.eventually_pow_one_div_le (ennreal.coe_ne_top : ↑‖(1 : A)‖₊ ≠ ∞) hε),
refine (le_trans _ (le_supr _ (N + 1))),
refine le_infi (λ n, _),
simp only [←add_assoc],
refine (spectral_radius_le_pow_nnnorm_pow_one_div 𝕜 a (n + N)).trans _,
norm_cast,
exact mul_le_mul_left' (hN (n + N + 1) (by linarith)) _,
end
end spectrum_compact
section resolvent
open filter asymptotics
variables [nontrivially_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `ρ` := resolvent_set 𝕜
local notation `↑ₐ` := algebra_map 𝕜 A
theorem has_deriv_at_resolvent {a : A} {k : 𝕜} (hk : k ∈ ρ a) :
has_deriv_at (resolvent a) (-(resolvent a k) ^ 2) k :=
begin
have H₁ : has_fderiv_at ring.inverse _ (↑ₐk - a) := has_fderiv_at_ring_inverse hk.unit,
have H₂ : has_deriv_at (λ k, ↑ₐk - a) 1 k,
{ simpa using (algebra.linear_map 𝕜 A).has_deriv_at.sub_const a },
simpa [resolvent, sq, hk.unit_spec, ← ring.inverse_unit hk.unit] using H₁.comp_has_deriv_at k H₂,
end
/- TODO: Once there is sufficient API for bornology, we should get a nice filter / asymptotics
version of this, for example: `tendsto (resolvent a) (cobounded 𝕜) (𝓝 0)` or more specifically
`(resolvent a) =O[cobounded 𝕜] (λ z, z⁻¹)`. -/
lemma norm_resolvent_le_forall (a : A) :
∀ ε > 0, ∃ R > 0, ∀ z : 𝕜, R ≤ ‖z‖ → ‖resolvent a z‖ ≤ ε :=
begin
obtain ⟨c, c_pos, hc⟩ := (@normed_ring.inverse_one_sub_norm A _ _).exists_pos,
rw [is_O_with_iff, eventually_iff, metric.mem_nhds_iff] at hc,
rcases hc with ⟨δ, δ_pos, hδ⟩,
simp only [cstar_ring.norm_one, mul_one] at hδ,
intros ε hε,
have ha₁ : 0 < ‖a‖ + 1 := lt_of_le_of_lt (norm_nonneg a) (lt_add_one _),
have min_pos : 0 < min (δ * (‖a‖ + 1)⁻¹) (ε * c⁻¹),
from lt_min (mul_pos δ_pos (inv_pos.mpr ha₁)) (mul_pos hε (inv_pos.mpr c_pos)),
refine ⟨(min (δ * (‖a‖ + 1)⁻¹) (ε * c⁻¹))⁻¹, inv_pos.mpr min_pos, (λ z hz, _)⟩,
have hnz : z ≠ 0 := norm_pos_iff.mp (lt_of_lt_of_le (inv_pos.mpr min_pos) hz),
replace hz := inv_le_of_inv_le min_pos hz,
rcases (⟨units.mk0 z hnz, units.coe_mk0 hnz⟩ : is_unit z) with ⟨z, rfl⟩,
have lt_δ : ‖z⁻¹ • a‖ < δ,
{ rw [units.smul_def, norm_smul, units.coe_inv, norm_inv],
calc ‖(z : 𝕜)‖⁻¹ * ‖a‖ ≤ δ * (‖a‖ + 1)⁻¹ * ‖a‖
: mul_le_mul_of_nonneg_right (hz.trans (min_le_left _ _)) (norm_nonneg _)
... < δ
: by { conv { rw mul_assoc, to_rhs, rw (mul_one δ).symm },
exact mul_lt_mul_of_pos_left
((inv_mul_lt_iff ha₁).mpr ((mul_one (‖a‖ + 1)).symm ▸ (lt_add_one _))) δ_pos } },
rw [←inv_smul_smul z (resolvent a (z : 𝕜)), units_smul_resolvent_self, resolvent,
algebra.algebra_map_eq_smul_one, one_smul, units.smul_def, norm_smul, units.coe_inv, norm_inv],
calc _ ≤ ε * c⁻¹ * c : mul_le_mul (hz.trans (min_le_right _ _)) (hδ (mem_ball_zero_iff.mpr lt_δ))
(norm_nonneg _) (mul_pos hε (inv_pos.mpr c_pos)).le
... = _ : inv_mul_cancel_right₀ c_pos.ne.symm ε,
end
end resolvent
section one_sub_smul
open continuous_multilinear_map ennreal formal_multilinear_series
open_locale nnreal ennreal
variables
[nontrivially_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A]
variable (𝕜)
/-- In a Banach algebra `A` over a nontrivially normed field `𝕜`, for any `a : A` the
power series with coefficients `a ^ n` represents the function `(1 - z • a)⁻¹` in a disk of
radius `‖a‖₊⁻¹`. -/
lemma has_fpower_series_on_ball_inverse_one_sub_smul [complete_space A] (a : A) :
has_fpower_series_on_ball (λ z : 𝕜, ring.inverse (1 - z • a))
(λ n, continuous_multilinear_map.mk_pi_field 𝕜 (fin n) (a ^ n)) 0 (‖a‖₊)⁻¹ :=
{ r_le :=
begin
refine le_of_forall_nnreal_lt (λ r hr, le_radius_of_bound_nnreal _ (max 1 ‖(1 : A)‖₊) (λ n, _)),
rw [←norm_to_nnreal, norm_mk_pi_field, norm_to_nnreal],
cases n,
{ simp only [le_refl, mul_one, or_true, le_max_iff, pow_zero] },
{ refine le_trans (le_trans (mul_le_mul_right' (nnnorm_pow_le' a n.succ_pos) (r ^ n.succ)) _)
(le_max_left _ _),
{ by_cases ‖a‖₊ = 0,
{ simp only [h, zero_mul, zero_le', pow_succ], },
{ rw [←coe_inv h, coe_lt_coe, nnreal.lt_inv_iff_mul_lt h] at hr,
simpa only [←mul_pow, mul_comm] using pow_le_one' hr.le n.succ } } }
end,
r_pos := ennreal.inv_pos.mpr coe_ne_top,
has_sum := λ y hy,
begin
have norm_lt : ‖y • a‖ < 1,
{ by_cases h : ‖a‖₊ = 0,
{ simp only [nnnorm_eq_zero.mp h, norm_zero, zero_lt_one, smul_zero] },
{ have nnnorm_lt : ‖y‖₊ < ‖a‖₊⁻¹,
by simpa only [←coe_inv h, mem_ball_zero_iff, metric.emetric_ball_nnreal] using hy,
rwa [←coe_nnnorm, ←real.lt_to_nnreal_iff_coe_lt, real.to_nnreal_one, nnnorm_smul,
←nnreal.lt_inv_iff_mul_lt h] } },
simpa [←smul_pow, (normed_ring.summable_geometric_of_norm_lt_1 _ norm_lt).has_sum_iff]
using (normed_ring.inverse_one_sub _ norm_lt).symm,
end }
variable {𝕜}
lemma is_unit_one_sub_smul_of_lt_inv_radius {a : A} {z : 𝕜} (h : ↑‖z‖₊ < (spectral_radius 𝕜 a)⁻¹) :
is_unit (1 - z • a) :=
begin
by_cases hz : z = 0,
{ simp only [hz, is_unit_one, sub_zero, zero_smul] },
{ let u := units.mk0 z hz,
suffices hu : is_unit (u⁻¹ • 1 - a),
{ rwa [is_unit.smul_sub_iff_sub_inv_smul, inv_inv u] at hu },
{ rw [units.smul_def, ←algebra.algebra_map_eq_smul_one, ←mem_resolvent_set_iff],
refine mem_resolvent_set_of_spectral_radius_lt _,
rwa [units.coe_inv, nnnorm_inv, coe_inv (nnnorm_ne_zero_iff.mpr
(units.coe_mk0 hz ▸ hz : (u : 𝕜) ≠ 0)), lt_inv_iff_lt_inv] } }
end
/-- In a Banach algebra `A` over `𝕜`, for `a : A` the function `λ z, (1 - z • a)⁻¹` is
differentiable on any closed ball centered at zero of radius `r < (spectral_radius 𝕜 a)⁻¹`. -/
theorem differentiable_on_inverse_one_sub_smul [complete_space A] {a : A} {r : ℝ≥0}
(hr : (r : ℝ≥0∞) < (spectral_radius 𝕜 a)⁻¹) :
differentiable_on 𝕜 (λ z : 𝕜, ring.inverse (1 - z • a)) (metric.closed_ball 0 r) :=
begin
intros z z_mem,
apply differentiable_at.differentiable_within_at,
have hu : is_unit (1 - z • a),
{ refine is_unit_one_sub_smul_of_lt_inv_radius (lt_of_le_of_lt (coe_mono _) hr),
simpa only [norm_to_nnreal, real.to_nnreal_coe]
using real.to_nnreal_mono (mem_closed_ball_zero_iff.mp z_mem) },
have H₁ : differentiable 𝕜 (λ w : 𝕜, 1 - w • a) := (differentiable_id.smul_const a).const_sub 1,
exact differentiable_at.comp z (differentiable_at_inverse hu.unit) (H₁.differentiable_at),
end
end one_sub_smul
section gelfand_formula
open filter ennreal continuous_multilinear_map
open_locale topological_space
variables
[normed_ring A] [normed_algebra ℂ A] [complete_space A]
/-- The `limsup` relationship for the spectral radius used to prove `spectrum.gelfand_formula`. -/
lemma limsup_pow_nnnorm_pow_one_div_le_spectral_radius (a : A) :
limsup (λ n : ℕ, ↑‖a ^ n‖₊ ^ (1 / n : ℝ)) at_top ≤ spectral_radius ℂ a :=
begin
refine ennreal.inv_le_inv.mp (le_of_forall_pos_nnreal_lt (λ r r_pos r_lt, _)),
simp_rw [inv_limsup, ←one_div],
let p : formal_multilinear_series ℂ ℂ A :=
λ n, continuous_multilinear_map.mk_pi_field ℂ (fin n) (a ^ n),
suffices h : (r : ℝ≥0∞) ≤ p.radius,
{ convert h,
simp only [p.radius_eq_liminf, ←norm_to_nnreal, norm_mk_pi_field],
congr,
ext n,
rw [norm_to_nnreal, ennreal.coe_rpow_def (‖a ^ n‖₊) (1 / n : ℝ), if_neg],
exact λ ha, by linarith [ha.2, (one_div_nonneg.mpr n.cast_nonneg : 0 ≤ (1 / n : ℝ))], },
{ have H₁ := (differentiable_on_inverse_one_sub_smul r_lt).has_fpower_series_on_ball r_pos,
exact ((has_fpower_series_on_ball_inverse_one_sub_smul ℂ a).exchange_radius H₁).r_le, }
end
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `‖a ^ n‖₊ ^ (1 / n)` -/
theorem pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius (a : A) :
tendsto (λ n : ℕ, ((‖a ^ n‖₊ ^ (1 / n : ℝ)) : ℝ≥0∞)) at_top (𝓝 (spectral_radius ℂ a)) :=
tendsto_of_le_liminf_of_limsup_le (spectral_radius_le_liminf_pow_nnnorm_pow_one_div ℂ a)
(limsup_pow_nnnorm_pow_one_div_le_spectral_radius a)
/- This is the same as `pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius` but for `norm`
instead of `nnnorm`. -/
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `‖a ^ n‖₊ ^ (1 / n)` -/
theorem pow_norm_pow_one_div_tendsto_nhds_spectral_radius (a : A) :
tendsto (λ n : ℕ, ennreal.of_real (‖a ^ n‖ ^ (1 / n : ℝ))) at_top (𝓝 (spectral_radius ℂ a)) :=
begin
convert pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius a,
ext1,
rw [←of_real_rpow_of_nonneg (norm_nonneg _) _, ←coe_nnnorm, coe_nnreal_eq],
exact one_div_nonneg.mpr (by exact_mod_cast zero_le _),
end
end gelfand_formula
section nonempty_spectrum
variables [normed_ring A] [normed_algebra ℂ A] [complete_space A] [nontrivial A] (a : A)
/-- In a (nontrivial) complex Banach algebra, every element has nonempty spectrum. -/
protected theorem nonempty : (spectrum ℂ a).nonempty :=
begin
/- Suppose `σ a = ∅`, then resolvent set is `ℂ`, any `(z • 1 - a)` is a unit, and `resolvent`
is differentiable on `ℂ`. -/
rw ←set.ne_empty_iff_nonempty,
by_contra h,
have H₀ : resolvent_set ℂ a = set.univ, by rwa [spectrum, set.compl_empty_iff] at h,
have H₁ : differentiable ℂ (λ z : ℂ, resolvent a z), from λ z,
(has_deriv_at_resolvent (H₀.symm ▸ set.mem_univ z : z ∈ resolvent_set ℂ a)).differentiable_at,
/- The norm of the resolvent is small for all sufficently large `z`, and by compactness and
continuity it is bounded on the complement of a large ball, thus uniformly bounded on `ℂ`.
By Liouville's theorem `λ z, resolvent a z` is constant -/
have H₂ := norm_resolvent_le_forall a,
have H₃ : ∀ z : ℂ, resolvent a z = resolvent a (0 : ℂ),
{ refine λ z, H₁.apply_eq_apply_of_bounded (bounded_iff_forall_norm_le.mpr _) z 0,
rcases H₂ 1 zero_lt_one with ⟨R, R_pos, hR⟩,
rcases (proper_space.is_compact_closed_ball (0 : ℂ) R).exists_bound_of_continuous_on
H₁.continuous.continuous_on with ⟨C, hC⟩,
use max C 1,
rintros _ ⟨w, rfl⟩,
refine or.elim (em (‖w‖ ≤ R)) (λ hw, _) (λ hw, _),
{ exact (hC w (mem_closed_ball_zero_iff.mpr hw)).trans (le_max_left _ _) },
{ exact (hR w (not_le.mp hw).le).trans (le_max_right _ _), }, },
/- `resolvent a 0 = 0`, which is a contradition because it isn't a unit. -/
have H₅ : resolvent a (0 : ℂ) = 0,
{ refine norm_eq_zero.mp (le_antisymm (le_of_forall_pos_le_add (λ ε hε, _)) (norm_nonneg _)),
rcases H₂ ε hε with ⟨R, R_pos, hR⟩,
simpa only [H₃ R] using (zero_add ε).symm.subst
(hR R (by exact_mod_cast (real.norm_of_nonneg R_pos.lt.le).symm.le)), },
/- `not_is_unit_zero` is where we need `nontrivial A`, it is unavoidable. -/
exact not_is_unit_zero (H₅.subst (is_unit_resolvent.mp
(mem_resolvent_set_iff.mp (H₀.symm ▸ set.mem_univ 0)))),
end
/-- In a complex Banach algebra, the spectral radius is always attained by some element of the
spectrum. -/
lemma exists_nnnorm_eq_spectral_radius : ∃ z ∈ spectrum ℂ a, (‖z‖₊ : ℝ≥0∞) = spectral_radius ℂ a :=
exists_nnnorm_eq_spectral_radius_of_nonempty (spectrum.nonempty a)
/-- In a complex Banach algebra, if every element of the spectrum has norm strictly less than
`r : ℝ≥0`, then the spectral radius is also strictly less than `r`. -/
lemma spectral_radius_lt_of_forall_lt {r : ℝ≥0} (hr : ∀ z ∈ spectrum ℂ a, ‖z‖₊ < r) :
spectral_radius ℂ a < r :=
spectral_radius_lt_of_forall_lt_of_nonempty (spectrum.nonempty a) hr
open_locale polynomial
open polynomial
/-- The **spectral mapping theorem** for polynomials in a Banach algebra over `ℂ`. -/
lemma map_polynomial_aeval (p : ℂ[X]) :
spectrum ℂ (aeval a p) = (λ k, eval k p) '' (spectrum ℂ a) :=
map_polynomial_aeval_of_nonempty a p (spectrum.nonempty a)
/-- A specialization of the spectral mapping theorem for polynomials in a Banach algebra over `ℂ`
to monic monomials. -/
protected lemma map_pow (n : ℕ) : spectrum ℂ (a ^ n) = (λ x, x ^ n) '' (spectrum ℂ a) :=
by simpa only [aeval_X_pow, eval_pow, eval_X] using map_polynomial_aeval a (X ^ n)
end nonempty_spectrum
section gelfand_mazur_isomorphism
variables [normed_ring A] [normed_algebra ℂ A] (hA : ∀ {a : A}, is_unit a ↔ a ≠ 0)
include hA
local notation `σ` := spectrum ℂ
lemma algebra_map_eq_of_mem {a : A} {z : ℂ} (h : z ∈ σ a) : algebra_map ℂ A z = a :=
by rwa [mem_iff, hA, not_not, sub_eq_zero] at h
/-- **Gelfand-Mazur theorem**: For a complex Banach division algebra, the natural `algebra_map ℂ A`
is an algebra isomorphism whose inverse is given by selecting the (unique) element of
`spectrum ℂ a`. In addition, `algebra_map_isometry` guarantees this map is an isometry.
Note: because `normed_division_ring` requires the field `norm_mul' : ∀ a b, ‖a * b‖ = ‖a‖ * ‖b‖`, we
don't use this type class and instead opt for a `normed_ring` in which the nonzero elements are
precisely the units. This allows for the application of this isomorphism in broader contexts, e.g.,
to the quotient of a complex Banach algebra by a maximal ideal. In the case when `A` is actually a
`normed_division_ring`, one may fill in the argument `hA` with the lemma `is_unit_iff_ne_zero`. -/
@[simps]
noncomputable def _root_.normed_ring.alg_equiv_complex_of_complete
[complete_space A] : ℂ ≃ₐ[ℂ] A :=
let nt : nontrivial A := ⟨⟨1, 0, hA.mp ⟨⟨1, 1, mul_one _, mul_one _⟩, rfl⟩⟩⟩ in
{ to_fun := algebra_map ℂ A,
inv_fun := λ a, (@spectrum.nonempty _ _ _ _ nt a).some,
left_inv := λ z, by simpa only [@scalar_eq _ _ _ _ _ nt _] using
(@spectrum.nonempty _ _ _ _ nt $ algebra_map ℂ A z).some_mem,
right_inv := λ a, algebra_map_eq_of_mem @hA (@spectrum.nonempty _ _ _ _ nt a).some_mem,
..algebra.of_id ℂ A }
end gelfand_mazur_isomorphism
section exp_mapping
local notation `↑ₐ` := algebra_map 𝕜 A
/-- For `𝕜 = ℝ` or `𝕜 = ℂ`, `exp 𝕜` maps the spectrum of `a` into the spectrum of `exp 𝕜 a`. -/
theorem exp_mem_exp [is_R_or_C 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
(a : A) {z : 𝕜} (hz : z ∈ spectrum 𝕜 a) : exp 𝕜 z ∈ spectrum 𝕜 (exp 𝕜 a) :=
begin
have hexpmul : exp 𝕜 a = exp 𝕜 (a - ↑ₐ z) * ↑ₐ (exp 𝕜 z),
{ rw [algebra_map_exp_comm z, ←exp_add_of_commute (algebra.commutes z (a - ↑ₐz)).symm,
sub_add_cancel] },
let b := ∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐz) ^ n,
have hb : summable (λ n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐz) ^ n),
{ refine summable_of_norm_bounded_eventually _ (real.summable_pow_div_factorial ‖a - ↑ₐz‖) _,
filter_upwards [filter.eventually_cofinite_ne 0] with n hn,
rw [norm_smul, mul_comm, norm_inv, is_R_or_C.norm_eq_abs, is_R_or_C.abs_cast_nat,
←div_eq_mul_inv],
exact div_le_div (pow_nonneg (norm_nonneg _) n) (norm_pow_le' (a - ↑ₐz) (zero_lt_iff.mpr hn))
(by exact_mod_cast nat.factorial_pos n)
(by exact_mod_cast nat.factorial_le (lt_add_one n).le) },
have h₀ : ∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐz) ^ (n + 1) = (a - ↑ₐz) * b,
{ simpa only [mul_smul_comm, pow_succ] using hb.tsum_mul_left (a - ↑ₐz) },
have h₁ : ∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐz) ^ (n + 1) = b * (a - ↑ₐz),
{ simpa only [pow_succ', algebra.smul_mul_assoc] using hb.tsum_mul_right (a - ↑ₐz) },
have h₃ : exp 𝕜 (a - ↑ₐz) = 1 + (a - ↑ₐz) * b,
{ rw exp_eq_tsum,
convert tsum_eq_zero_add (exp_series_summable' (a - ↑ₐz)),
simp only [nat.factorial_zero, nat.cast_one, inv_one, pow_zero, one_smul],
exact h₀.symm },
rw [spectrum.mem_iff, is_unit.sub_iff, ←one_mul (↑ₐ(exp 𝕜 z)), hexpmul, ←_root_.sub_mul,
commute.is_unit_mul_iff (algebra.commutes (exp 𝕜 z) (exp 𝕜 (a - ↑ₐz) - 1)).symm,
sub_eq_iff_eq_add'.mpr h₃, commute.is_unit_mul_iff (h₀ ▸ h₁ : (a - ↑ₐz) * b = b * (a - ↑ₐz))],
exact not_and_of_not_left _ (not_and_of_not_left _ ((not_iff_not.mpr is_unit.sub_iff).mp hz)),
end
end exp_mapping
end spectrum
namespace alg_hom
section normed_field
variables {F : Type*} [normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `↑ₐ` := algebra_map 𝕜 A
/-- An algebra homomorphism into the base field, as a continuous linear map (since it is
automatically bounded). See note [lower instance priority] -/
@[priority 100]
instance [alg_hom_class F 𝕜 A 𝕜] : continuous_linear_map_class F 𝕜 A 𝕜 :=
{ map_continuous := λ φ, add_monoid_hom_class.continuous_of_bound φ ‖(1 : A)‖ $
λ a, (mul_comm ‖a‖ ‖(1 : A)‖) ▸ spectrum.norm_le_norm_mul_of_mem (apply_mem_spectrum φ _),
.. alg_hom_class.linear_map_class }
/-- An algebra homomorphism into the base field, as a continuous linear map (since it is
automatically bounded). -/
def to_continuous_linear_map (φ : A →ₐ[𝕜] 𝕜) : A →L[𝕜] 𝕜 :=
{ cont := map_continuous φ, .. φ.to_linear_map }
@[simp] lemma coe_to_continuous_linear_map (φ : A →ₐ[𝕜] 𝕜) :
⇑φ.to_continuous_linear_map = φ := rfl
lemma norm_apply_le_self_mul_norm_one [alg_hom_class F 𝕜 A 𝕜] (f : F) (a : A) :
‖f a‖ ≤ ‖a‖ * ‖(1 : A)‖ :=
spectrum.norm_le_norm_mul_of_mem (apply_mem_spectrum f _)
lemma norm_apply_le_self [norm_one_class A] [alg_hom_class F 𝕜 A 𝕜] (f : F) (a : A) : ‖f a‖ ≤ ‖a‖ :=
spectrum.norm_le_norm_of_mem (apply_mem_spectrum f _)
end normed_field
section nontrivially_normed_field
variables [nontrivially_normed_field 𝕜] [normed_ring A] [normed_algebra 𝕜 A] [complete_space A]
local notation `↑ₐ` := algebra_map 𝕜 A
@[simp] lemma to_continuous_linear_map_norm [norm_one_class A] (φ : A →ₐ[𝕜] 𝕜) :
‖φ.to_continuous_linear_map‖ = 1 :=
continuous_linear_map.op_norm_eq_of_bounds zero_le_one
(λ a, (one_mul ‖a‖).symm ▸ spectrum.norm_le_norm_of_mem (apply_mem_spectrum φ _))
(λ _ _ h, by simpa only [coe_to_continuous_linear_map, map_one, norm_one, mul_one] using h 1)
end nontrivially_normed_field
end alg_hom
namespace weak_dual
namespace character_space
variables [nontrivially_normed_field 𝕜] [normed_ring A] [complete_space A]
variables [normed_algebra 𝕜 A]
/-- The equivalence between characters and algebra homomorphisms into the base field. -/
def equiv_alg_hom : (character_space 𝕜 A) ≃ (A →ₐ[𝕜] 𝕜) :=
{ to_fun := to_alg_hom,
inv_fun := λ f,
{ val := f.to_continuous_linear_map,
property := by { rw eq_set_map_one_map_mul, exact ⟨map_one f, map_mul f⟩ } },
left_inv := λ f, subtype.ext $ continuous_linear_map.ext $ λ x, rfl,
right_inv := λ f, alg_hom.ext $ λ x, rfl }
@[simp] lemma equiv_alg_hom_coe (f : character_space 𝕜 A) : ⇑(equiv_alg_hom f) = f := rfl
@[simp] lemma equiv_alg_hom_symm_coe (f : A →ₐ[𝕜] 𝕜) : ⇑(equiv_alg_hom.symm f) = f := rfl
end character_space
end weak_dual
|
06a38a6f060d399b528970a8f4847ae8a0c764d8
|
00c000939652bc85fffcfe8ba5dd194580a13c4b
|
/src/for_mathlib.lean
|
a02950106e93d9a13a81baf4f57b860a691b72ce
|
[
"Apache-2.0"
] |
permissive
|
johoelzl/qpf
|
a795220c4e872014a62126800313b74ba3b06680
|
d93ab1fb41d085e49ae476fa364535f40388f44d
|
refs/heads/master
| 1,587,372,400,745
| 1,548,633,467,000
| 1,548,633,467,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,701
|
lean
|
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Simon Hudon
-/
import data.pfun category.functor category.applicative
universes u v
lemma eq_mp_heq :
∀ {α β : Sort*} {a : α} {a' : β} (h₂ : a == a'), (eq.mp (type_eq_of_heq h₂) a) = a'
| α ._ a a' heq.rfl := rfl
namespace sigma
variables {α₁ α₂ α₃ : Type u}
variables {β₁ : α₁ → Type v} {β₂ : α₂ → Type v} {β₃ : α₃ → Type v}
variables {g : sigma β₂ → sigma β₃} {f : sigma β₁ → sigma β₂}
theorem eq_fst {s₁ s₂ : sigma β₁} : s₁ = s₂ → s₁.1 = s₂.1 :=
by cases s₁; cases s₂; cc
theorem eq_snd {s₁ s₂ : sigma β₁} : s₁ = s₂ → s₁.2 == s₂.2 :=
by cases s₁; cases s₂; cc
@[extensionality]
lemma ext {x₀ x₁ : sigma β₁}
(h₀ : x₀.1 = x₁.1)
(h₁ : x₀.1 = x₁.1 → x₀.2 == x₁.2) :
x₀ = x₁ :=
by casesm* sigma _; cases h₀; cases h₁ h₀; refl
lemma eta (x : sigma β₁) : sigma.mk x.1 x.2 = x :=
by cases x; refl
end sigma
namespace roption
variables {α : Type*} {β : Type*} {γ : Type*}
open function
lemma assert_if_neg {p : Prop}
(x : p → roption α)
(h : ¬ p)
: assert p x = roption.none :=
by { dsimp [assert,roption.none],
have : (∃ (h : p), (x h).dom) ↔ false,
{ split ; intros h' ; repeat { cases h' with h' },
exact h h' },
congr,
repeat { rw this <|> apply hfunext },
intros h h', cases h', }
lemma assert_if_pos {p : Prop}
(x : p → roption α)
(h : p)
: assert p x = x h :=
by { dsimp [assert],
have : (∃ (h : p), (x h).dom) ↔ (x h).dom,
{ split ; intros h'
; cases h' <|> split
; assumption, },
cases hx : x h, congr, rw [this,hx],
apply hfunext, rw [this,hx],
intros, simp [hx] }
@[simp]
lemma roption.none_bind {α β : Type*} (f : α → roption β)
: roption.none >>= f = roption.none :=
by simp [roption.none,has_bind.bind,roption.bind,assert_if_neg]
end roption
namespace monad
@[simp]
lemma bind_pure_star {m} [monad m] [is_lawful_monad m] (x : m punit) :
x >>= (λ (_x : punit), pure punit.star : punit → m punit) = x :=
by { transitivity,
{ apply congr_arg, ext z, cases z, refl },
{ simp } }
variables {α β γ : Type u}
variables {m : Type u → Type v} [monad m]
@[reducible]
def pipe (a : α → m β) (b : β → m γ) : α → m γ :=
λ x, a x >>= b
infixr ` >=> `:55 := pipe
@[functor_norm]
lemma map_bind_eq_bind_comp {α β γ} {m} [monad m] [is_lawful_monad m]
(f : α → β) (cmd : m α) (g : β → m γ) :
(f <$> cmd) >>= g = cmd >>= g ∘ f :=
by rw [← bind_pure_comp_eq_map,bind_assoc,(∘)]; simp
@[functor_norm]
lemma bind_map {α β γ} {m} [monad m] [is_lawful_monad m]
(f : α → γ → β) (cmd : m α) (g : α → m γ) :
cmd >>= (λ x, f x <$> g x) = do { x ← cmd, y ← g x, pure $ f x y } :=
by congr; ext; rw [← bind_pure (g x),map_bind]; simp
@[functor_norm]
lemma bind_seq {α β γ : Type u} {m} [monad m] [is_lawful_monad m]
(f : α → m (γ → β)) (cmd : m α) (g : α → m γ) :
cmd >>= (λ x, f x <*> g x) = do { x ← cmd, h ← f x, y ← g x, pure $ h y } :=
by congr; ext; simp [seq_eq_bind_map] with functor_norm
end monad
attribute [functor_norm] bind_assoc has_bind.and_then map_bind seq_left_eq seq_right_eq
namespace sum
variables {e : Type v} {α β : Type u}
protected def seq : Π (x : sum e (α → β)) (f : sum e α), sum e β
| (sum.inl e) _ := sum.inl e
| (sum.inr f) x := f <$> x
instance : applicative (sum e) :=
{ seq := @sum.seq e,
pure := @sum.inr e }
instance : is_lawful_applicative (sum e) :=
by constructor; intros;
casesm* _ ⊕ _; simp [(<*>),sum.seq,pure,(<$>)];
refl
end sum
namespace functor
def foldl (α : Type u) (β : Type v) := α → α
def foldr (α : Type u) (β : Type v) := α → α
instance foldr.applicative {α} : applicative (foldr α) :=
{ pure := λ _ _, id,
seq := λ _ _ f x, f ∘ x }
instance foldl.applicative {α} : applicative (foldl α) :=
{ pure := λ _ _, id,
seq := λ _ _ f x, x ∘ f }
instance foldr.is_lawful_applicative {α} : is_lawful_applicative (foldr α) :=
by refine { .. }; intros; refl
instance foldl.is_lawful_applicative {α} : is_lawful_applicative (foldl α) :=
by refine { .. }; intros; refl
def foldr.eval {α β} (x : foldr α β) : α → α := x
def foldl.eval {α β} (x : foldl α β) : α → α := x
def foldl.cons {α β} (x : α) : foldl (list α) β :=
list.cons x
def foldr.cons {α β} (x : α) : foldr (list α) β :=
list.cons x
def foldl.cons' {α} (x : α) : foldl (list α) punit :=
list.cons x
def foldl.lift {α} (x : α → α) : foldl α punit := x
def foldr.lift {α} (x : α → α) : foldr α punit := x
end functor
instance {α : Type u} : traversable (prod.{u u} α) :=
{ map := λ β γ f (x : α × β), prod.mk x.1 $ f x.2,
traverse := λ m _ β γ f (x : α × β), by exactI prod.mk x.1 <$> f x.2 }
namespace traversable
variables {t : Type u → Type u} [traversable t]
def to_list {α} (x : t α) : list α :=
@functor.foldr.eval _ (t punit) (traverse functor.foldr.cons x) []
end traversable
namespace name
def append_suffix : name → string → name
| (mk_string s n) s' := mk_string (s ++ s') n
| n _ := n
end name
namespace level
meta def fold_mvar {α} : level → (name → α → α) → α → α
| zero f := id
| (succ a) f := fold_mvar a f
| (param a) f := id
| (mvar a) f := f a
| (max a b) f := fold_mvar a f ∘ fold_mvar b f
| (imax a b) f := fold_mvar a f ∘ fold_mvar b f
end level
namespace expr
meta def replace_all (e : expr) (p : expr → Prop) [decidable_pred p] (r : expr) : expr :=
e.replace $ λ e i, guard (p e) >> pure (r.lift_vars 0 i)
meta def const_params : expr → list level
| (const _ ls) := ls
| _ := []
meta def collect_meta_univ (e : expr) : list name :=
native.rb_set.to_list $ e.fold native.mk_rb_set $ λ e' i s,
match e' with
| (sort u) := u.fold_mvar (flip native.rb_set.insert) s
| (const _ ls) := ls.foldl (λ s' l, l.fold_mvar (flip native.rb_set.insert) s') s
| _ := s
end
end expr
namespace tactic
meta def unify_univ (u u' : level) : tactic unit :=
unify (expr.sort u) (expr.sort u')
meta def add_decl' (d : declaration) : tactic expr :=
do add_decl d,
pure $ expr.const d.to_name $ d.univ_params.map level.param
meta def renew : expr → tactic expr
| (expr.local_const uniq pp bi t) := mk_local' pp bi t
| e := fail format!"{e} is not a local constant"
meta def trace_error {α} (tac : tactic α) : tactic α :=
λ s, match tac s with
| r@(result.success _ _) := r
| (result.exception (some msg) pos s') := (trace (msg ()) >> result.exception (some msg) pos) s'
| (result.exception none pos s') := (trace "no msg" >> result.exception none pos) s'
end
meta def is_type (e : expr) : tactic bool :=
do (expr.sort _) ← infer_type e | pure ff,
pure tt
meta def list_macros : expr → list (name × list expr) | e :=
e.fold [] (λ m i s,
match m with
| (expr.macro m args) := (expr.macro_def_name m, args) :: s
| _ := s end)
meta def expand_untrusted (tac : tactic unit) : tactic unit :=
do tgt ← target,
mv ← mk_meta_var tgt,
gs ← get_goals,
set_goals [mv],
tac,
env ← get_env,
pr ← env.unfold_untrusted_macros <$> instantiate_mvars mv,
set_goals gs,
exact pr
meta def binders : expr → tactic (list expr)
| (expr.pi n bi d b) :=
do v ← mk_local' n bi d,
(::) v <$> binders (b.instantiate_var v)
| _ := pure []
meta def rec_args_count (t c : name) : tactic ℕ :=
do ct ← mk_const c >>= infer_type,
(list.length ∘ list.filter (λ v : expr, v.local_type.is_app_of t)) <$> binders ct
meta def match_induct_hyp (n : name) : list expr → list expr → tactic (list $ expr × option expr)
| [] [] := pure []
| [] _ := fail "wrong number of inductive hypotheses"
| (x :: xs) [] := (::) (x,none) <$> match_induct_hyp xs []
| (x :: xs) (h :: hs) :=
do t ← infer_type x,
if t.is_app_of n
then (::) (x,h) <$> match_induct_hyp xs hs
else (::) (x,none) <$> match_induct_hyp xs (h :: hs)
meta def is_recursive_type (n : name) : tactic bool :=
do e ← get_env,
let cs := e.constructors_of n,
rs ← cs.mmap (rec_args_count n),
pure $ rs.any (λ r, r > 0)
meta def better_induction (e : expr) : tactic $ list (name × list (expr × option expr) × list (name × expr)) :=
do t ← infer_type e,
let tn := t.get_app_fn.const_name,
focus1 $
do vs ← induction e,
gs ← get_goals,
vs' ← mzip_with (λ g (pat : name × list expr × list (name × expr)),
do let ⟨n,args,σ⟩ := pat,
set_goals [g],
nrec ← rec_args_count tn n,
let ⟨args,rec⟩ := args.split_at (args.length - nrec),
args ← match_induct_hyp tn args rec,
pure ((n,args,σ))) gs vs,
set_goals gs,
pure vs'
meta def extract_def' {α} (n : name) (trusted : bool) (elab_def : tactic α) : tactic α :=
do cxt ← list.map to_implicit <$> local_context,
t ← target,
(r,d) ← solve_aux t elab_def,
d ← instantiate_mvars d,
t' ← pis cxt t,
d' ← lambdas cxt d,
let univ := t'.collect_univ_params,
add_decl $ declaration.defn n univ t' d' (reducibility_hints.regular 1 tt) trusted,
r <$ (applyc n; assumption)
open expr list nat
meta def remove_intl_const : expr → tactic expr
| v@(local_const uniq pp bi _) :=
do t ← infer_type v,
pure $ local_const uniq pp bi t
| e := pure e
meta def intron' : ℕ → tactic (list expr)
| 0 := pure []
| (succ n) := (::) <$> intro1 <*> intron' n
meta def unpi : expr → tactic (list expr × expr)
| (pi n bi d b) :=
do v ← mk_local' n bi d,
prod.map (cons v) id <$> unpi (b.instantiate_var v)
| e := pure ([],e)
meta def unify_app_aux : expr → expr → list expr → tactic expr
| e (pi _ _ d b) (a :: as) :=
do t ← infer_type a,
unify t d,
e' ← head_beta (e a),
b' ← whnf (b.instantiate_var a),
unify_app_aux e' b' as
| e t (_ :: _) := fail "too many arguments"
| e _ [] := pure e
meta def unify_app (e : expr) (args : list expr) : tactic expr :=
do t ← infer_type e >>= whnf,
unify_app_aux e t args
end tactic
namespace tactic.interactive
open lean lean.parser interactive interactive.types tactic
local postfix `*`:9000 := many
meta def clear_except (xs : parse ident *) : tactic unit :=
do let ns := name_set.of_list xs,
local_context >>= mmap' (λ h : expr,
when (¬ ns.contains h.local_pp_name) $
try $ tactic.clear h) ∘ list.reverse
meta def splita := split; [skip, assumption]
end tactic.interactive
|
5c98c252ca8fa19cc3d83d68ab86c39fcff931c0
|
48f4f349e1bb919d14ab7e5921d0cfe825f4c423
|
/folklore/measure_theory.lean
|
6129f8b85ec194a5bf54bb84e0f42a1ab913e6b4
|
[] |
no_license
|
thalesant/formalabstracts-2017
|
fdf4ff90d30ab1dcb6d4cf16a068a997ea5ecc80
|
c47181342c9e41954aa8d41f5049965b5f332bca
|
refs/heads/master
| 1,584,610,453,925
| 1,528,277,508,000
| 1,528,277,508,000
| 136,299,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,143
|
lean
|
import .real_axiom .analysis
open set classical real_axiom real_axiom.extended_real
local attribute [instance] prop_decidable
noncomputable theory
universe u
def countable_union {X : Type u} (f : ℕ → set X) : set X :=
{x | ∃ n, x ∈ f n}
variables {X : Type u} (σ : set (set X))
class sigma_algebra :=
(univ_mem : univ ∈ σ)
(closed_under_comp : ∀ s, s ∈ σ → univ \ s ∈ σ)
(closed_under_countable_union : ∀ f : ℕ → set X, (∀ n, f n ∈ σ) → countable_union f ∈ σ)
variable [sigma_algebra σ]
-- alternatively, we could define μ using subtypes, as a function {s : set X // s ∈ σ} → ℝ∞.
-- this complicates some things and simplifies others.
class measure_space (μ : (set X) → ℝ∞) :=
(measure_nonneg : ∀ s ∈ σ, μ s ≥ 0)
(measure_empty : μ ∅ = 0)
(measure_union : ∀ (f : ℕ → set X), (∀ n, f n ∈ σ) → (∀ m n, m ≠ n → f m ∩ f n = ∅) →
μ (countable_union f) = extended_real.countable_sum (μ ∘ f))
class finite_measure_space (μ : (set X) → ℝ∞) extends measure_space σ μ :=
(measure_univ_finite : ∃ b : ℝ, μ univ = of_real b)
def univ_measure (μ : (set X) → ℝ∞) [finite_measure_space σ μ] : ℝ :=
some (@finite_measure_space.measure_univ_finite X σ (by apply_instance) μ (by apply_instance))
def lebesgue_measurable (f : X → ℝ) :=
∀ t : ℝ, {x | f x > t} ∈ σ
-- note that we assume μ is a total function here. Theorems involving μ should assume the arguments are in σ.
variable μ : (set X) → ℝ∞
section
variable [measure_space σ μ]
-- this assumes s ∈ σ
def indicator_measure (s : set X) : ℝ∞ := μ s
def indicator_function (s : set X) (x : X) : ℝ := if x ∈ s then 1 else 0
def is_simple_function (ls : list (ℝ × (set X))) := ∀ p : ℝ × (set X), p ∈ ls → p.1 ≥ 0 ∧ p.2 ∈ σ
def simple_function (ls : list (ℝ × (set X))) (x : X) : ℝ :=
(ls.map (λ p : ℝ × (set X), p.1 * indicator_function p.2 x)).foldr (+) 0
unfinished simple_function_pos : ∀ {X} (σ : set (set X)) (μ : set X → ℝ∞) [sigma_algebra σ] [measure_space σ μ],
∀ (ls : list (ℝ × (set X))), (∀ p : ℝ × (set X), p ∈ ls → p.1 ≥ 0 ∧ p.2 ∈ σ) → ∀ x, simple_function ls x ≥ 0 :=
{ description := "simple functions are nonnegative" }
-- must use 0*∞ = 0
-- this assumes is_simple_function ls
def simple_function_integral (ls : list (ℝ × (set X))) : ℝ∞ :=
(ls.map (λ p : ℝ × (set X), of_real p.1 * indicator_measure μ p.2)).foldr (+) 0
-- this will be better looking once we have better parsing for unfinished
unfinished simple_function_integral_well_defined :
∀ {X μ σ} [sigma_algebra σ] [measure_space σ μ] (ls1 : list (ℝ × set X)) (ls2 : list (ℝ × set X)),
(∀ p : ℝ × (set X), p ∈ ls1 → p.1 ≥ 0 ∧ p.2 ∈ σ) → (∀ p : ℝ × (set X), p ∈ ls2 → p.1 ≥ 0 ∧ p.2 ∈ σ) →
(lebesgue_measurable σ (simple_function ls1)) → (lebesgue_measurable σ (simple_function ls2)) →
(simple_function ls1 = simple_function ls2) → (simple_function_integral μ ls1 = simple_function_integral μ ls2) :=
{ description := "the integral of a simple function does not depend on the representation of that function" }
-- assumes (h : ∀ x, f x ≥ 0)
def nonneg_function_integral (f : X → ℝ) : ℝ∞ :=
extended_real.sup
(image (simple_function_integral μ)
{ls : list (ℝ × (set X)) | is_simple_function σ ls ∧ ∀ x, 0 ≤ simple_function ls x ∧ (simple_function ls x) ≤ f x})
-- this shorter version should work when unfinished is fixed
/-unfinished simple_function_integral_eq_nonneg_function_integral :
∀ {X μ σ} [sigma_algebra σ] [measure_space σ μ] (ls : list (ℝ × set X)),
(∀ p : ℝ × (set X), p ∈ ls → p.1 ≥ 0 ∧ p.2 ∈ σ) → (lebesgue_measurable σ (simple_function ls)) →
simple_function_integral μ ls = nonneg_function_integral σ μ (simple_function ls) :=
{ description := "the simple and nonnegative integrals match" }-/
unfinished simple_function_integral_eq_nonneg_function_integral :
∀ {X μ σ} [sigma_algebra σ] [measure_space σ μ] (ls : list (ℝ × set X)),
(∀ p : ℝ × (set X), p ∈ ls → p.1 ≥ 0 ∧ p.2 ∈ σ) → (lebesgue_measurable σ (simple_function ls)) →
simple_function_integral μ ls = @nonneg_function_integral _ σ _ μ (by apply_instance) (simple_function ls) :=
{ description := "the simple and nonnegative integrals match" }
def pos_part (f : X → ℝ) (x : X) : ℝ := if f x > 0 then f x else 0
def neg_part (f : X → ℝ) (x : X) : ℝ := if f x < 0 then -(f x) else 0
theorem pos_part_nonneg (f : X → ℝ) (x : X) : pos_part f x ≥ 0 :=
begin cases (em (f x > 0)), all_goals {unfold pos_part, simp *}, {apply le_of_lt, assumption}, apply le_refl end
theorem neg_part_nonneg (f : X → ℝ) (x : X) : neg_part f x ≥ 0 :=
begin cases (em (f x < 0)), all_goals {unfold neg_part, simp *}, {apply le_of_lt, apply neg_pos_of_neg, assumption}, apply le_refl end
unfinished pos_part_plus_neg_part : ∀ {X} (f : X → ℝ) (x : X), abs (f x) = pos_part f x + neg_part f x :=
{ description := "abs f decomposes into the sum of pos and neg parts" }
def signed_integral_exists_condition (f : X → ℝ) :=
nonneg_function_integral σ μ (pos_part f) < inf ∨ nonneg_function_integral σ μ (neg_part f) < inf
-- assumes signed_integral_exists_condition f
def signed_function_integral (f : X → ℝ) : ℝ∞ :=
nonneg_function_integral σ μ (pos_part f) + nonneg_function_integral σ μ (neg_part f)
class lebesgue_integrable (f : X → ℝ) :=
(r : ℝ)
(f_integrable : signed_integral_exists_condition σ μ f)
(integral_value : signed_function_integral σ μ f = of_real r)
def lebesgue_integral (f : X → ℝ) [lebesgue_integrable σ μ f] : ℝ :=
@lebesgue_integrable.r X σ (by apply_instance) μ (by apply_instance) f (by apply_instance)
def almost_everywhere (P : X → Prop) := {x | ¬ P x} ∈ σ ∧ μ {x | ¬ P x} = 0
end
|
c6e274a593b6d976a5cdf538dcb173b9ab36b831
|
a4673261e60b025e2c8c825dfa4ab9108246c32e
|
/stage0/src/Lean/LocalContext.lean
|
dcc4b44225d71773282eaf9152030d52f15fe4af
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/lean4
|
c02dec0cc32c4bccab009285475f265f17d73228
|
2909313475588cc20ac0436e55548a4502050d0a
|
refs/heads/master
| 1,674,129,550,893
| 1,606,415,348,000
| 1,606,415,348,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 14,958
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Std.Data.PersistentArray
import Lean.Expr
import Lean.Hygiene
namespace Lean
inductive LocalDecl :=
| cdecl (index : Nat) (fvarId : FVarId) (userName : Name) (type : Expr) (bi : BinderInfo)
| ldecl (index : Nat) (fvarId : FVarId) (userName : Name) (type : Expr) (value : Expr) (nonDep : Bool)
@[export lean_mk_local_decl]
def mkLocalDeclEx (index : Nat) (fvarId : FVarId) (userName : Name) (type : Expr) (bi : BinderInfo) : LocalDecl :=
LocalDecl.cdecl index fvarId userName type bi
@[export lean_mk_let_decl]
def mkLetDeclEx (index : Nat) (fvarId : FVarId) (userName : Name) (type : Expr) (val : Expr) : LocalDecl :=
LocalDecl.ldecl index fvarId userName type val false
@[export lean_local_decl_binder_info]
def LocalDecl.binderInfoEx : LocalDecl → BinderInfo
| LocalDecl.cdecl _ _ _ _ bi => bi
| _ => BinderInfo.default
namespace LocalDecl
instance : Inhabited LocalDecl := ⟨ldecl arbitrary arbitrary arbitrary arbitrary arbitrary false⟩
def isLet : LocalDecl → Bool
| cdecl .. => false
| ldecl .. => true
def index : LocalDecl → Nat
| cdecl (index := i) .. => i
| ldecl (index := i) .. => i
def setIndex : LocalDecl → Nat → LocalDecl
| cdecl _ id n t bi, idx => cdecl idx id n t bi
| ldecl _ id n t v nd, idx => ldecl idx id n t v nd
def fvarId : LocalDecl → FVarId
| cdecl (fvarId := id) .. => id
| ldecl (fvarId := id) .. => id
def userName : LocalDecl → Name
| cdecl (userName := n) .. => n
| ldecl (userName := n) .. => n
def type : LocalDecl → Expr
| cdecl (type := t) .. => t
| ldecl (type := t) .. => t
def setType : LocalDecl → Expr → LocalDecl
| cdecl idx id n _ bi, t => cdecl idx id n t bi
| ldecl idx id n _ v nd, t => ldecl idx id n t v nd
def binderInfo : LocalDecl → BinderInfo
| cdecl (bi := bi) .. => bi
| ldecl .. => BinderInfo.default
def isAuxDecl (d : LocalDecl) : Bool :=
d.binderInfo.isAuxDecl
def value? : LocalDecl → Option Expr
| cdecl .. => none
| ldecl (value := v) .. => some v
def value : LocalDecl → Expr
| cdecl .. => panic! "let declaration expected"
| ldecl (value := v) .. => v
def setValue : LocalDecl → Expr → LocalDecl
| ldecl idx id n t _ nd, v => ldecl idx id n t v nd
| d, _ => d
def updateUserName : LocalDecl → Name → LocalDecl
| cdecl index id _ type bi, userName => cdecl index id userName type bi
| ldecl index id _ type val nd, userName => ldecl index id userName type val nd
def updateBinderInfo : LocalDecl → BinderInfo → LocalDecl
| cdecl index id n type _, bi => cdecl index id n type bi
| ldecl .., _ => panic! "unexpected let declaration"
def toExpr (decl : LocalDecl) : Expr :=
mkFVar decl.fvarId
def hasExprMVar : LocalDecl → Bool
| cdecl (type := t) .. => t.hasExprMVar
| ldecl (type := t) (value := v) .. => t.hasExprMVar || v.hasExprMVar
end LocalDecl
open Std (PersistentHashMap PersistentArray PArray)
structure LocalContext :=
(fvarIdToDecl : PersistentHashMap FVarId LocalDecl := {})
(decls : PersistentArray (Option LocalDecl) := {})
namespace LocalContext
instance : Inhabited LocalContext := ⟨{}⟩
@[export lean_mk_empty_local_ctx]
def mkEmpty : Unit → LocalContext := fun _ => {}
def empty : LocalContext := {}
@[export lean_local_ctx_is_empty]
def isEmpty (lctx : LocalContext) : Bool :=
lctx.fvarIdToDecl.isEmpty
/- Low level API for creating local declarations. It is used to implement actions in the monads `Elab` and `Tactic`. It should not be used directly since the argument `(name : Name)` is assumed to be "unique". -/
@[export lean_local_ctx_mk_local_decl]
def mkLocalDecl (lctx : LocalContext) (fvarId : FVarId) (userName : Name) (type : Expr) (bi : BinderInfo := BinderInfo.default) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
let idx := decls.size
let decl := LocalDecl.cdecl idx fvarId userName type bi
{ fvarIdToDecl := map.insert fvarId decl, decls := decls.push decl }
@[export lean_local_ctx_mk_let_decl]
def mkLetDecl (lctx : LocalContext) (fvarId : FVarId) (userName : Name) (type : Expr) (value : Expr) (nonDep := false) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
let idx := decls.size
let decl := LocalDecl.ldecl idx fvarId userName type value nonDep
{ fvarIdToDecl := map.insert fvarId decl, decls := decls.push decl }
/- Low level API -/
def addDecl (lctx : LocalContext) (newDecl : LocalDecl) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
let idx := decls.size
let newDecl := newDecl.setIndex idx
{ fvarIdToDecl := map.insert newDecl.fvarId newDecl, decls := decls.push newDecl }
@[export lean_local_ctx_find]
def find? (lctx : LocalContext) (fvarId : FVarId) : Option LocalDecl :=
lctx.fvarIdToDecl.find? fvarId
def findFVar? (lctx : LocalContext) (e : Expr) : Option LocalDecl :=
lctx.find? e.fvarId!
def get! (lctx : LocalContext) (fvarId : FVarId) : LocalDecl :=
match lctx.find? fvarId with
| some d => d
| none => panic! "unknown free variable"
def getFVar! (lctx : LocalContext) (e : Expr) : LocalDecl :=
lctx.get! e.fvarId!
def contains (lctx : LocalContext) (fvarId : FVarId) : Bool :=
lctx.fvarIdToDecl.contains fvarId
def containsFVar (lctx : LocalContext) (e : Expr) : Bool :=
lctx.contains e.fvarId!
def getFVarIds (lctx : LocalContext) : Array FVarId :=
lctx.decls.foldl (init := #[]) fun r decl? => match decl? with
| some decl => r.push decl.fvarId
| none => r
def getFVars (lctx : LocalContext) : Array Expr :=
lctx.getFVarIds.map mkFVar
private partial def popTailNoneAux (a : PArray (Option LocalDecl)) : PArray (Option LocalDecl) :=
if a.size == 0 then a
else match a.get! (a.size - 1) with
| none => popTailNoneAux a.pop
| some _ => a
@[export lean_local_ctx_erase]
def erase (lctx : LocalContext) (fvarId : FVarId) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
match map.find? fvarId with
| none => lctx
| some decl => { fvarIdToDecl := map.erase fvarId, decls := popTailNoneAux (decls.set decl.index none) }
@[export lean_local_ctx_pop]
def pop (lctx : LocalContext): LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
if decls.size == 0 then lctx
else match decls.get! (decls.size - 1) with
| none => lctx -- unreachable
| some decl => { fvarIdToDecl := map.erase decl.fvarId, decls := popTailNoneAux decls.pop }
@[export lean_local_ctx_find_from_user_name]
def findFromUserName? (lctx : LocalContext) (userName : Name) : Option LocalDecl :=
lctx.decls.findSomeRev? fun decl =>
match decl with
| none => none
| some decl => if decl.userName == userName then some decl else none
@[export lean_local_ctx_uses_user_name]
def usesUserName (lctx : LocalContext) (userName : Name) : Bool :=
(lctx.findFromUserName? userName).isSome
private partial def getUnusedNameAux (lctx : LocalContext) (suggestion : Name) (i : Nat) : Name × Nat :=
let curr := suggestion.appendIndexAfter i
if lctx.usesUserName curr then getUnusedNameAux lctx suggestion (i + 1)
else (curr, i + 1)
@[export lean_local_ctx_get_unused_name]
def getUnusedName (lctx : LocalContext) (suggestion : Name) : Name :=
let suggestion := suggestion.eraseMacroScopes
if lctx.usesUserName suggestion then (getUnusedNameAux lctx suggestion 1).1
else suggestion
@[export lean_local_ctx_last_decl]
def lastDecl (lctx : LocalContext) : Option LocalDecl :=
lctx.decls.get! (lctx.decls.size - 1)
def setUserName (lctx : LocalContext) (fvarId : FVarId) (userName : Name) : LocalContext :=
let decl := lctx.get! fvarId
let decl := decl.updateUserName userName
{ fvarIdToDecl := lctx.fvarIdToDecl.insert decl.fvarId decl,
decls := lctx.decls.set decl.index decl }
@[export lean_local_ctx_rename_user_name]
def renameUserName (lctx : LocalContext) (fromName : Name) (toName : Name) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
match lctx.findFromUserName? fromName with
| none => lctx
| some decl =>
let decl := decl.updateUserName toName;
{ fvarIdToDecl := map.insert decl.fvarId decl,
decls := decls.set decl.index decl }
/--
Low-level function for updating the local context.
Assumptions about `f`, the resulting nested expressions must be definitionally equal to their original values,
the `index` nor `fvarId` are modified. -/
@[inline] def modifyLocalDecl (lctx : LocalContext) (fvarId : FVarId) (f : LocalDecl → LocalDecl) : LocalContext :=
match lctx with
| { fvarIdToDecl := map, decls := decls } =>
match lctx.find? fvarId with
| none => lctx
| some decl =>
let decl := f decl;
{ fvarIdToDecl := map.insert decl.fvarId decl,
decls := decls.set decl.index decl }
def updateBinderInfo (lctx : LocalContext) (fvarId : FVarId) (bi : BinderInfo) : LocalContext :=
modifyLocalDecl lctx fvarId fun decl => decl.updateBinderInfo bi
@[export lean_local_ctx_num_indices]
def numIndices (lctx : LocalContext) : Nat :=
lctx.decls.size
@[export lean_local_ctx_get]
def getAt! (lctx : LocalContext) (i : Nat) : Option LocalDecl :=
lctx.decls.get! i
section
universes u v
variables {m : Type u → Type v} [Monad m]
variable {β : Type u}
@[specialize] def foldlM (lctx : LocalContext) (f : β → LocalDecl → m β) (init : β) (start : Nat := 0) : m β :=
lctx.decls.foldlM (init := init) (start := start) fun b decl => match decl with
| none => pure b
| some decl => f b decl
@[specialize] def forM (lctx : LocalContext) (f : LocalDecl → m PUnit) : m PUnit :=
lctx.decls.forM fun decl => match decl with
| none => pure PUnit.unit
| some decl => f decl
@[specialize] def findDeclM? (lctx : LocalContext) (f : LocalDecl → m (Option β)) : m (Option β) :=
lctx.decls.findSomeM? fun decl => match decl with
| none => pure none
| some decl => f decl
@[specialize] def findDeclRevM? (lctx : LocalContext) (f : LocalDecl → m (Option β)) : m (Option β) :=
lctx.decls.findSomeRevM? fun decl => match decl with
| none => pure none
| some decl => f decl
end
@[inline] def foldl {β} (lctx : LocalContext) (f : β → LocalDecl → β) (init : β) (start : Nat := 0) : β :=
Id.run <| lctx.foldlM f init start
@[inline] def findDecl? {β} (lctx : LocalContext) (f : LocalDecl → Option β) : Option β :=
Id.run <| lctx.findDeclM? f
@[inline] def findDeclRev? {β} (lctx : LocalContext) (f : LocalDecl → Option β) : Option β :=
Id.run <| lctx.findDeclRevM? f
partial def isSubPrefixOfAux (a₁ a₂ : PArray (Option LocalDecl)) (exceptFVars : Array Expr) (i j : Nat) : Bool :=
if i < a₁.size then
match a₁[i] with
| none => isSubPrefixOfAux a₁ a₂ exceptFVars (i+1) j
| some decl₁ =>
if exceptFVars.any fun fvar => fvar.fvarId! == decl₁.fvarId then
isSubPrefixOfAux a₁ a₂ exceptFVars (i+1) j
else if j < a₂.size then
match a₂[j] with
| none => isSubPrefixOfAux a₁ a₂ exceptFVars i (j+1)
| some decl₂ => if decl₁.fvarId == decl₂.fvarId then isSubPrefixOfAux a₁ a₂ exceptFVars (i+1) (j+1) else isSubPrefixOfAux a₁ a₂ exceptFVars i (j+1)
else false
else true
/- Given `lctx₁ - exceptFVars` of the form `(x_1 : A_1) ... (x_n : A_n)`, then return true
iff there is a local context `B_1* (x_1 : A_1) ... B_n* (x_n : A_n)` which is a prefix
of `lctx₂` where `B_i`'s are (possibly empty) sequences of local declarations. -/
def isSubPrefixOf (lctx₁ lctx₂ : LocalContext) (exceptFVars : Array Expr := #[]) : Bool :=
isSubPrefixOfAux lctx₁.decls lctx₂.decls exceptFVars 0 0
@[inline] def mkBinding (isLambda : Bool) (lctx : LocalContext) (xs : Array Expr) (b : Expr) : Expr :=
let b := b.abstract xs
xs.size.foldRev (init := b) fun i b =>
let x := xs[i]
match lctx.findFVar? x with
| some (LocalDecl.cdecl _ _ n ty bi) =>
let ty := ty.abstractRange i xs;
if isLambda then
Lean.mkLambda n bi ty b
else
Lean.mkForall n bi ty b
| some (LocalDecl.ldecl _ _ n ty val nonDep) =>
if b.hasLooseBVar 0 then
let ty := ty.abstractRange i xs
let val := val.abstractRange i xs
mkLet n ty val b nonDep
else
b.lowerLooseBVars 1 1
| none => panic! "unknown free variable"
def mkLambda (lctx : LocalContext) (xs : Array Expr) (b : Expr) : Expr :=
mkBinding true lctx xs b
def mkForall (lctx : LocalContext) (xs : Array Expr) (b : Expr) : Expr :=
mkBinding false lctx xs b
section
universes u
variables {m : Type → Type u} [Monad m]
@[inline] def anyM (lctx : LocalContext) (p : LocalDecl → m Bool) : m Bool :=
lctx.decls.anyM fun d => match d with
| some decl => p decl
| none => pure false
@[inline] def allM (lctx : LocalContext) (p : LocalDecl → m Bool) : m Bool :=
lctx.decls.allM fun d => match d with
| some decl => p decl
| none => pure true
end
@[inline] def any (lctx : LocalContext) (p : LocalDecl → Bool) : Bool :=
Id.run <| lctx.anyM p
@[inline] def all (lctx : LocalContext) (p : LocalDecl → Bool) : Bool :=
Id.run <| lctx.allM p
def sanitizeNames (lctx : LocalContext) : StateM NameSanitizerState LocalContext := do
let st ← get
if !getSanitizeNames st.options then pure lctx else
flip StateT.run' ({} : NameSet) $
lctx.decls.size.foldRevM
(fun i lctx =>
match lctx.decls.get! i with
| none => pure lctx
| some decl => do
let usedNames ← get
set <| usedNames.insert decl.userName
if decl.userName.hasMacroScopes || usedNames.contains decl.userName then do
let userNameNew ← sanitizeName decl.userName
pure <| lctx.setUserName decl.fvarId userNameNew
else
pure lctx)
lctx
end LocalContext
class MonadLCtx (m : Type → Type) :=
(getLCtx : m LocalContext)
export MonadLCtx (getLCtx)
instance (m n) [MonadLCtx m] [MonadLift m n] : MonadLCtx n :=
{ getLCtx := liftM (getLCtx : m _) }
def replaceFVarIdAtLocalDecl (fvarId : FVarId) (e : Expr) (d : LocalDecl) : LocalDecl :=
if d.fvarId == fvarId then d
else match d with
| LocalDecl.cdecl idx id n type bi => LocalDecl.cdecl idx id n (type.replaceFVarId fvarId e) bi
| LocalDecl.ldecl idx id n type val nonDep => LocalDecl.ldecl idx id n (type.replaceFVarId fvarId e) (val.replaceFVarId fvarId e) nonDep
end Lean
|
1ada13b6b19b8e4abf94ad5e9cd592ffcd4093a7
|
9028d228ac200bbefe3a711342514dd4e4458bff
|
/src/data/equiv/basic.lean
|
fb44e5639a9d9c0ae12b45762c77cb3a1d06f24e
|
[
"Apache-2.0"
] |
permissive
|
mcncm/mathlib
|
8d25099344d9d2bee62822cb9ed43aa3e09fa05e
|
fde3d78cadeec5ef827b16ae55664ef115e66f57
|
refs/heads/master
| 1,672,743,316,277
| 1,602,618,514,000
| 1,602,618,514,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 77,422
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Mario Carneiro
-/
import data.set.function
import algebra.group.basic
/-!
# Equivalence between types
In this file we define two types:
* `equiv α β` a.k.a. `α ≃ β`: a bijective map `α → β` bundled with its inverse map; we use this (and
not equality!) to express that various `Type`s or `Sort`s are equivalent.
* `equiv.perm α`: the group of permutations `α ≃ α`.
Then we define
* canonical isomorphisms between various types: e.g.,
- `equiv.refl α` is the identity map interpreted as `α ≃ α`;
- `equiv.sum_equiv_sigma_bool` is the canonical equivalence between the sum of two types `α ⊕ β`
and the sigma-type `Σ b : bool, cond b α β`;
- `equiv.prod_sum_distrib : α × (β ⊕ γ) ≃ (α × β) ⊕ (α × γ)` shows that type product and type sum
satisfy the distributive law up to a canonical equivalence;
* operations on equivalences: e.g.,
- `equiv.symm e : β ≃ α` is the inverse of `e : α ≃ β`;
- `equiv.trans e₁ e₂ : α ≃ γ` is the composition of `e₁ : α ≃ β` and `e₂ : β ≃ γ` (note the order
of the arguments!);
- `equiv.prod_congr ea eb : α₁ × β₁ ≃ α₂ × β₂`: combine two equivalences `ea : α₁ ≃ α₂` and
`eb : β₁ ≃ β₂` using `prod.map`.
* definitions that transfer some instances along an equivalence. By convention, we transfer
instances from right to left.
- `equiv.inhabited` takes `e : α ≃ β` and `[inhabited β]` and returns `inhabited α`;
- `equiv.unique` takes `e : α ≃ β` and `[unique β]` and returns `unique α`;
- `equiv.decidable_eq` takes `e : α ≃ β` and `[decidable_eq β]` and returns `decidable_eq α`.
More definitions of this kind can be found in other files. E.g., `data/equiv/transfer_instance`
does it for many algebraic type classes like `group`, `module`, etc.
* group structure on `equiv.perm α`. More lemmas about `equiv.perm` can be found in `data/equiv/perm`.
## Tags
equivalence, congruence, bijective map
-/
open function
universes u v w z
variables {α : Sort u} {β : Sort v} {γ : Sort w}
/-- `α ≃ β` is the type of functions from `α → β` with a two-sided inverse. -/
@[nolint inhabited]
structure equiv (α : Sort*) (β : Sort*) :=
(to_fun : α → β)
(inv_fun : β → α)
(left_inv : left_inverse inv_fun to_fun)
(right_inv : right_inverse inv_fun to_fun)
infix ` ≃ `:25 := equiv
/-- Convert an involutive function `f` to an equivalence with `to_fun = inv_fun = f`. -/
def function.involutive.to_equiv (f : α → α) (h : involutive f) : α ≃ α :=
⟨f, f, h.left_inverse, h.right_inverse⟩
namespace equiv
/-- `perm α` is the type of bijections from `α` to itself. -/
@[reducible] def perm (α : Sort*) := equiv α α
instance : has_coe_to_fun (α ≃ β) :=
⟨_, to_fun⟩
@[simp] theorem coe_fn_mk (f : α → β) (g l r) : (equiv.mk f g l r : α → β) = f :=
rfl
/-- The map `coe_fn : (r ≃ s) → (r → s)` is injective. We can't use `function.injective`
here but mimic its signature by using `⦃e₁ e₂⦄`. -/
theorem coe_fn_injective : ∀ ⦃e₁ e₂ : equiv α β⦄, (e₁ : α → β) = e₂ → e₁ = e₂
| ⟨f₁, g₁, l₁, r₁⟩ ⟨f₂, g₂, l₂, r₂⟩ h :=
have f₁ = f₂, from h,
have g₁ = g₂, from l₁.eq_right_inverse (this.symm ▸ r₂),
by simp *
@[ext] lemma ext {f g : equiv α β} (H : ∀ x, f x = g x) : f = g :=
coe_fn_injective (funext H)
@[ext] lemma perm.ext {σ τ : equiv.perm α} (H : ∀ x, σ x = τ x) : σ = τ :=
equiv.ext H
lemma ext_iff {f g : equiv α β} : f = g ↔ ∀ x, f x = g x :=
⟨λ h x, h ▸ rfl, ext⟩
lemma perm.ext_iff {σ τ : equiv.perm α} : σ = τ ↔ ∀ x, σ x = τ x :=
ext_iff
/-- Any type is equivalent to itself. -/
@[refl] protected def refl (α : Sort*) : α ≃ α := ⟨id, id, λ x, rfl, λ x, rfl⟩
instance inhabited' : inhabited (α ≃ α) := ⟨equiv.refl α⟩
/-- Inverse of an equivalence `e : α ≃ β`. -/
@[symm] protected def symm (e : α ≃ β) : β ≃ α := ⟨e.inv_fun, e.to_fun, e.right_inv, e.left_inv⟩
/-- Composition of equivalences `e₁ : α ≃ β` and `e₂ : β ≃ γ`. -/
@[trans] protected def trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : α ≃ γ :=
⟨e₂.to_fun ∘ e₁.to_fun, e₁.inv_fun ∘ e₂.inv_fun,
e₂.left_inv.comp e₁.left_inv, e₂.right_inv.comp e₁.right_inv⟩
@[simp]
lemma to_fun_as_coe (e : α ≃ β) : e.to_fun = e := rfl
@[simp]
lemma inv_fun_as_coe (e : α ≃ β) : e.inv_fun = e.symm := rfl
protected theorem injective (e : α ≃ β) : injective e :=
e.left_inv.injective
protected theorem surjective (e : α ≃ β) : surjective e :=
e.right_inv.surjective
protected theorem bijective (f : α ≃ β) : bijective f :=
⟨f.injective, f.surjective⟩
@[simp] lemma range_eq_univ {α : Type*} {β : Type*} (e : α ≃ β) : set.range e = set.univ :=
set.eq_univ_of_forall e.surjective
protected theorem subsingleton (e : α ≃ β) [subsingleton β] : subsingleton α :=
e.injective.subsingleton
/-- Transfer `decidable_eq` across an equivalence. -/
protected def decidable_eq (e : α ≃ β) [decidable_eq β] : decidable_eq α :=
e.injective.decidable_eq
lemma nonempty_iff_nonempty (e : α ≃ β) : nonempty α ↔ nonempty β :=
nonempty.congr e e.symm
/-- If `α ≃ β` and `β` is inhabited, then so is `α`. -/
protected def inhabited [inhabited β] (e : α ≃ β) : inhabited α :=
⟨e.symm (default _)⟩
/-- If `α ≃ β` and `β` is a singleton type, then so is `α`. -/
protected def unique [unique β] (e : α ≃ β) : unique α :=
e.symm.surjective.unique
/-- Equivalence between equal types. -/
protected def cast {α β : Sort*} (h : α = β) : α ≃ β :=
⟨cast h, cast h.symm, λ x, by { cases h, refl }, λ x, by { cases h, refl }⟩
@[simp] theorem coe_fn_symm_mk (f : α → β) (g l r) : ((equiv.mk f g l r).symm : β → α) = g :=
rfl
@[simp] theorem coe_refl : ⇑(equiv.refl α) = id := rfl
theorem refl_apply (x : α) : equiv.refl α x = x := rfl
@[simp] theorem coe_trans (f : α ≃ β) (g : β ≃ γ) : ⇑(f.trans g) = g ∘ f := rfl
theorem trans_apply (f : α ≃ β) (g : β ≃ γ) (a : α) : (f.trans g) a = g (f a) := rfl
@[simp] theorem apply_symm_apply (e : α ≃ β) (x : β) : e (e.symm x) = x :=
e.right_inv x
@[simp] theorem symm_apply_apply (e : α ≃ β) (x : α) : e.symm (e x) = x :=
e.left_inv x
@[simp] theorem symm_comp_self (e : α ≃ β) : e.symm ∘ e = id := funext e.symm_apply_apply
@[simp] theorem self_comp_symm (e : α ≃ β) : e ∘ e.symm = id := funext e.apply_symm_apply
@[simp] lemma symm_trans_apply (f : α ≃ β) (g : β ≃ γ) (a : γ) :
(f.trans g).symm a = f.symm (g.symm a) := rfl
@[simp] theorem apply_eq_iff_eq (f : α ≃ β) {x y : α} : f x = f y ↔ x = y :=
f.injective.eq_iff
theorem apply_eq_iff_eq_symm_apply {α β : Sort*} (f : α ≃ β) {x : α} {y : β} :
f x = y ↔ x = f.symm y :=
begin
conv_lhs { rw ←apply_symm_apply f y, },
rw apply_eq_iff_eq,
end
@[simp] theorem cast_apply {α β} (h : α = β) (x : α) : equiv.cast h x = cast h x := rfl
lemma symm_apply_eq {α β} (e : α ≃ β) {x y} : e.symm x = y ↔ x = e y :=
⟨λ H, by simp [H.symm], λ H, by simp [H]⟩
lemma eq_symm_apply {α β} (e : α ≃ β) {x y} : y = e.symm x ↔ e y = x :=
(eq_comm.trans e.symm_apply_eq).trans eq_comm
@[simp] theorem symm_symm (e : α ≃ β) : e.symm.symm = e := by { cases e, refl }
@[simp] theorem trans_refl (e : α ≃ β) : e.trans (equiv.refl β) = e := by { cases e, refl }
@[simp] theorem refl_symm : (equiv.refl α).symm = equiv.refl α := rfl
@[simp] theorem refl_trans (e : α ≃ β) : (equiv.refl α).trans e = e := by { cases e, refl }
@[simp] theorem symm_trans (e : α ≃ β) : e.symm.trans e = equiv.refl β := ext (by simp)
@[simp] theorem trans_symm (e : α ≃ β) : e.trans e.symm = equiv.refl α := ext (by simp)
lemma trans_assoc {δ} (ab : α ≃ β) (bc : β ≃ γ) (cd : γ ≃ δ) :
(ab.trans bc).trans cd = ab.trans (bc.trans cd) :=
equiv.ext $ assume a, rfl
theorem left_inverse_symm (f : equiv α β) : left_inverse f.symm f := f.left_inv
theorem right_inverse_symm (f : equiv α β) : function.right_inverse f.symm f := f.right_inv
/-- If `α` is equivalent to `β` and `γ` is equivalent to `δ`, then the type of equivalences `α ≃ γ`
is equivalent to the type of equivalences `β ≃ δ`. -/
def equiv_congr {δ} (ab : α ≃ β) (cd : γ ≃ δ) : (α ≃ γ) ≃ (β ≃ δ) :=
⟨ λac, (ab.symm.trans ac).trans cd, λbd, ab.trans $ bd.trans $ cd.symm,
assume ac, by { ext x, simp }, assume ac, by { ext x, simp } ⟩
/-- If `α` is equivalent to `β`, then `perm α` is equivalent to `perm β`. -/
def perm_congr {α : Type*} {β : Type*} (e : α ≃ β) : perm α ≃ perm β :=
equiv_congr e e
protected lemma image_eq_preimage {α β} (e : α ≃ β) (s : set α) : e '' s = e.symm ⁻¹' s :=
set.ext $ assume x, set.mem_image_iff_of_inverse e.left_inv e.right_inv
protected lemma subset_image {α β} (e : α ≃ β) (s : set α) (t : set β) :
t ⊆ e '' s ↔ e.symm '' t ⊆ s :=
by rw [set.image_subset_iff, e.image_eq_preimage]
lemma symm_image_image {α β} (f : equiv α β) (s : set α) : f.symm '' (f '' s) = s :=
by { rw [← set.image_comp], simp }
protected lemma image_compl {α β} (f : equiv α β) (s : set α) :
f '' sᶜ = (f '' s)ᶜ :=
set.image_compl_eq f.bijective
/-!
### The group of permutations (self-equivalences) of a type `α`
-/
namespace perm
instance perm_group {α : Type u} : group (perm α) :=
begin
refine { mul := λ f g, equiv.trans g f, one := equiv.refl α, inv:= equiv.symm, ..};
intros; apply equiv.ext; try { apply trans_apply },
apply symm_apply_apply
end
@[simp] theorem mul_apply {α : Type u} (f g : perm α) (x) : (f * g) x = f (g x) :=
equiv.trans_apply _ _ _
@[simp] theorem one_apply {α : Type u} (x) : (1 : perm α) x = x := rfl
@[simp] lemma inv_apply_self {α : Type u} (f : perm α) (x) :
f⁻¹ (f x) = x := equiv.symm_apply_apply _ _
@[simp] lemma apply_inv_self {α : Type u} (f : perm α) (x) :
f (f⁻¹ x) = x := equiv.apply_symm_apply _ _
lemma one_def {α : Type u} : (1 : perm α) = equiv.refl α := rfl
lemma mul_def {α : Type u} (f g : perm α) : f * g = g.trans f := rfl
lemma inv_def {α : Type u} (f : perm α) : f⁻¹ = f.symm := rfl
end perm
/-- If `α` is an empty type, then it is equivalent to the `empty` type. -/
def equiv_empty (h : α → false) : α ≃ empty :=
⟨λ x, (h x).elim, λ e, e.rec _, λ x, (h x).elim, λ e, e.rec _⟩
/-- `false` is equivalent to `empty`. -/
def false_equiv_empty : false ≃ empty :=
equiv_empty _root_.id
/-- If `α` is an empty type, then it is equivalent to the `pempty` type in any universe. -/
def {u' v'} equiv_pempty {α : Sort v'} (h : α → false) : α ≃ pempty.{u'} :=
⟨λ x, (h x).elim, λ e, e.rec _, λ x, (h x).elim, λ e, e.rec _⟩
/-- `false` is equivalent to `pempty`. -/
def false_equiv_pempty : false ≃ pempty :=
equiv_pempty _root_.id
/-- `empty` is equivalent to `pempty`. -/
def empty_equiv_pempty : empty ≃ pempty :=
equiv_pempty $ empty.rec _
/-- `pempty` types from any two universes are equivalent. -/
def pempty_equiv_pempty : pempty.{v} ≃ pempty.{w} :=
equiv_pempty pempty.elim
/-- If `α` is not `nonempty`, then it is equivalent to `empty`. -/
def empty_of_not_nonempty {α : Sort*} (h : ¬ nonempty α) : α ≃ empty :=
equiv_empty $ assume a, h ⟨a⟩
/-- If `α` is not `nonempty`, then it is equivalent to `pempty`. -/
def pempty_of_not_nonempty {α : Sort*} (h : ¬ nonempty α) : α ≃ pempty :=
equiv_pempty $ assume a, h ⟨a⟩
/-- The `Sort` of proofs of a true proposition is equivalent to `punit`. -/
def prop_equiv_punit {p : Prop} (h : p) : p ≃ punit :=
⟨λ x, (), λ x, h, λ _, rfl, λ ⟨⟩, rfl⟩
/-- `true` is equivalent to `punit`. -/
def true_equiv_punit : true ≃ punit := prop_equiv_punit trivial
/-- `ulift α` is equivalent to `α`. -/
protected def ulift {α : Type v} : ulift.{u} α ≃ α :=
⟨ulift.down, ulift.up, ulift.up_down, λ a, rfl⟩
@[simp] lemma coe_ulift {α : Type v} : ⇑(@equiv.ulift.{u} α) = ulift.down := rfl
@[simp] lemma coe_ulift_symm {α : Type v} : ⇑(@equiv.ulift.{u} α).symm = ulift.up := rfl
/-- `plift α` is equivalent to `α`. -/
protected def plift : plift α ≃ α :=
⟨plift.down, plift.up, plift.up_down, plift.down_up⟩
@[simp] lemma coe_plift : ⇑(@equiv.plift α) = plift.down := rfl
@[simp] lemma coe_plift_symm : ⇑(@equiv.plift α).symm = plift.up := rfl
/-- equivalence of propositions is the same as iff -/
def of_iff {P Q : Prop} (h : P ↔ Q) : P ≃ Q :=
{ to_fun := h.mp,
inv_fun := h.mpr,
left_inv := λ x, rfl,
right_inv := λ y, rfl }
/-- If `α₁` is equivalent to `α₂` and `β₁` is equivalent to `β₂`, then the type of maps `α₁ → β₁`
is equivalent to the type of maps `α₂ → β₂`. -/
@[congr] def arrow_congr {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) :
(α₁ → β₁) ≃ (α₂ → β₂) :=
{ to_fun := λ f, e₂.to_fun ∘ f ∘ e₁.inv_fun,
inv_fun := λ f, e₂.inv_fun ∘ f ∘ e₁.to_fun,
left_inv := λ f, funext $ λ x, by simp,
right_inv := λ f, funext $ λ x, by simp }
@[simp] lemma arrow_congr_apply {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂)
(f : α₁ → β₁) (x : α₂) :
arrow_congr e₁ e₂ f x = (e₂ $ f $ e₁.symm x) :=
rfl
lemma arrow_congr_comp {α₁ β₁ γ₁ α₂ β₂ γ₂ : Sort*}
(ea : α₁ ≃ α₂) (eb : β₁ ≃ β₂) (ec : γ₁ ≃ γ₂) (f : α₁ → β₁) (g : β₁ → γ₁) :
arrow_congr ea ec (g ∘ f) = (arrow_congr eb ec g) ∘ (arrow_congr ea eb f) :=
by { ext, simp only [comp, arrow_congr_apply, eb.symm_apply_apply] }
@[simp] lemma arrow_congr_refl {α β : Sort*} :
arrow_congr (equiv.refl α) (equiv.refl β) = equiv.refl (α → β) := rfl
@[simp] lemma arrow_congr_trans {α₁ β₁ α₂ β₂ α₃ β₃ : Sort*}
(e₁ : α₁ ≃ α₂) (e₁' : β₁ ≃ β₂) (e₂ : α₂ ≃ α₃) (e₂' : β₂ ≃ β₃) :
arrow_congr (e₁.trans e₂) (e₁'.trans e₂') = (arrow_congr e₁ e₁').trans (arrow_congr e₂ e₂') :=
rfl
@[simp] lemma arrow_congr_symm {α₁ β₁ α₂ β₂ : Sort*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) :
(arrow_congr e₁ e₂).symm = arrow_congr e₁.symm e₂.symm :=
rfl
/--
A version of `equiv.arrow_congr` in `Type`, rather than `Sort`.
The `equiv_rw` tactic is not able to use the default `Sort` level `equiv.arrow_congr`,
because Lean's universe rules will not unify `?l_1` with `imax (1 ?m_1)`.
-/
@[congr]
def arrow_congr' {α₁ β₁ α₂ β₂ : Type*} (hα : α₁ ≃ α₂) (hβ : β₁ ≃ β₂) : (α₁ → β₁) ≃ (α₂ → β₂) :=
equiv.arrow_congr hα hβ
@[simp] lemma arrow_congr'_apply {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂)
(f : α₁ → β₁) (x : α₂) :
arrow_congr' e₁ e₂ f x = (e₂ $ f $ e₁.symm x) :=
rfl
@[simp] lemma arrow_congr'_refl {α β : Type*} :
arrow_congr' (equiv.refl α) (equiv.refl β) = equiv.refl (α → β) := rfl
@[simp] lemma arrow_congr'_trans {α₁ β₁ α₂ β₂ α₃ β₃ : Type*}
(e₁ : α₁ ≃ α₂) (e₁' : β₁ ≃ β₂) (e₂ : α₂ ≃ α₃) (e₂' : β₂ ≃ β₃) :
arrow_congr' (e₁.trans e₂) (e₁'.trans e₂') = (arrow_congr' e₁ e₁').trans (arrow_congr' e₂ e₂') :=
rfl
@[simp] lemma arrow_congr'_symm {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) :
(arrow_congr' e₁ e₂).symm = arrow_congr' e₁.symm e₂.symm :=
rfl
/-- Conjugate a map `f : α → α` by an equivalence `α ≃ β`. -/
def conj (e : α ≃ β) : (α → α) ≃ (β → β) := arrow_congr e e
@[simp] lemma conj_apply (e : α ≃ β) (f : α → α) (x : β) :
e.conj f x = (e $ f $ e.symm x) :=
rfl
@[simp] lemma conj_refl : conj (equiv.refl α) = equiv.refl (α → α) := rfl
@[simp] lemma conj_symm (e : α ≃ β) : e.conj.symm = e.symm.conj := rfl
@[simp] lemma conj_trans (e₁ : α ≃ β) (e₂ : β ≃ γ) :
(e₁.trans e₂).conj = e₁.conj.trans e₂.conj :=
rfl
-- This should not be a simp lemma as long as `(∘)` is reducible:
-- when `(∘)` is reducible, Lean can unify `f₁ ∘ f₂` with any `g` using
-- `f₁ := g` and `f₂ := λ x, x`. This causes nontermination.
lemma conj_comp (e : α ≃ β) (f₁ f₂ : α → α) :
e.conj (f₁ ∘ f₂) = (e.conj f₁) ∘ (e.conj f₂) :=
by apply arrow_congr_comp
/-- `punit` sorts in any two universes are equivalent. -/
def punit_equiv_punit : punit.{v} ≃ punit.{w} :=
⟨λ _, punit.star, λ _, punit.star, λ u, by { cases u, refl }, λ u, by { cases u, reflexivity }⟩
section
/-- The sort of maps to `punit.{v}` is equivalent to `punit.{w}`. -/
def arrow_punit_equiv_punit (α : Sort*) : (α → punit.{v}) ≃ punit.{w} :=
⟨λ f, punit.star, λ u f, punit.star,
λ f, by { funext x, cases f x, refl }, λ u, by { cases u, reflexivity }⟩
/-- The sort of maps from `punit` is equivalent to the codomain. -/
def punit_arrow_equiv (α : Sort*) : (punit.{u} → α) ≃ α :=
⟨λ f, f punit.star, λ a u, a, λ f, by { ext ⟨⟩, refl }, λ u, rfl⟩
/-- The sort of maps from `empty` is equivalent to `punit`. -/
def empty_arrow_equiv_punit (α : Sort*) : (empty → α) ≃ punit.{u} :=
⟨λ f, punit.star, λ u e, e.rec _, λ f, funext $ λ x, x.rec _, λ u, by { cases u, refl }⟩
/-- The sort of maps from `pempty` is equivalent to `punit`. -/
def pempty_arrow_equiv_punit (α : Sort*) : (pempty → α) ≃ punit.{u} :=
⟨λ f, punit.star, λ u e, e.rec _, λ f, funext $ λ x, x.rec _, λ u, by { cases u, refl }⟩
/-- The sort of maps from `false` is equivalent to `punit`. -/
def false_arrow_equiv_punit (α : Sort*) : (false → α) ≃ punit.{u} :=
calc (false → α) ≃ (empty → α) : arrow_congr false_equiv_empty (equiv.refl _)
... ≃ punit : empty_arrow_equiv_punit _
end
/-- Product of two equivalences. If `α₁ ≃ α₂` and `β₁ ≃ β₂`, then `α₁ × β₁ ≃ α₂ × β₂`. -/
@[congr] def prod_congr {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) : α₁ × β₁ ≃ α₂ × β₂ :=
⟨prod.map e₁ e₂, prod.map e₁.symm e₂.symm, λ ⟨a, b⟩, by simp, λ ⟨a, b⟩, by simp⟩
@[simp] theorem coe_prod_congr {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) :
⇑(prod_congr e₁ e₂) = prod.map e₁ e₂ :=
rfl
@[simp] theorem prod_congr_symm {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) :
(prod_congr e₁ e₂).symm = prod_congr e₁.symm e₂.symm :=
rfl
/-- Type product is commutative up to an equivalence: `α × β ≃ β × α`. -/
def prod_comm (α β : Type*) : α × β ≃ β × α :=
⟨prod.swap, prod.swap, λ⟨a, b⟩, rfl, λ⟨a, b⟩, rfl⟩
@[simp] lemma coe_prod_comm (α β) : ⇑(prod_comm α β)= prod.swap := rfl
@[simp] lemma prod_comm_symm (α β) : (prod_comm α β).symm = prod_comm β α := rfl
/-- Type product is associative up to an equivalence. -/
def prod_assoc (α β γ : Sort*) : (α × β) × γ ≃ α × (β × γ) :=
⟨λ p, ⟨p.1.1, ⟨p.1.2, p.2⟩⟩, λp, ⟨⟨p.1, p.2.1⟩, p.2.2⟩, λ ⟨⟨a, b⟩, c⟩, rfl, λ ⟨a, ⟨b, c⟩⟩, rfl⟩
@[simp] theorem prod_assoc_apply {α β γ : Sort*} (p : (α × β) × γ) :
prod_assoc α β γ p = ⟨p.1.1, ⟨p.1.2, p.2⟩⟩ := rfl
@[simp] theorem prod_assoc_sym_apply {α β γ : Sort*} (p : α × (β × γ)) :
(prod_assoc α β γ).symm p = ⟨⟨p.1, p.2.1⟩, p.2.2⟩ := rfl
section
/-- `punit` is a right identity for type product up to an equivalence. -/
def prod_punit (α : Type*) : α × punit.{u+1} ≃ α :=
⟨λ p, p.1, λ a, (a, punit.star), λ ⟨_, punit.star⟩, rfl, λ a, rfl⟩
@[simp] theorem prod_punit_apply {α : Sort*} (a : α × punit.{u+1}) : prod_punit α a = a.1 := rfl
/-- `punit` is a left identity for type product up to an equivalence. -/
def punit_prod (α : Type*) : punit.{u+1} × α ≃ α :=
calc punit × α ≃ α × punit : prod_comm _ _
... ≃ α : prod_punit _
@[simp] theorem punit_prod_apply {α : Type*} (a : punit.{u+1} × α) : punit_prod α a = a.2 := rfl
/-- `empty` type is a right absorbing element for type product up to an equivalence. -/
def prod_empty (α : Type*) : α × empty ≃ empty :=
equiv_empty (λ ⟨_, e⟩, e.rec _)
/-- `empty` type is a left absorbing element for type product up to an equivalence. -/
def empty_prod (α : Type*) : empty × α ≃ empty :=
equiv_empty (λ ⟨e, _⟩, e.rec _)
/-- `pempty` type is a right absorbing element for type product up to an equivalence. -/
def prod_pempty (α : Type*) : α × pempty ≃ pempty :=
equiv_pempty (λ ⟨_, e⟩, e.rec _)
/-- `pempty` type is a left absorbing element for type product up to an equivalence. -/
def pempty_prod (α : Type*) : pempty × α ≃ pempty :=
equiv_pempty (λ ⟨e, _⟩, e.rec _)
end
section
open sum
/-- `psum` is equivalent to `sum`. -/
def psum_equiv_sum (α β : Type*) : psum α β ≃ α ⊕ β :=
⟨λ s, psum.cases_on s inl inr,
λ s, sum.cases_on s psum.inl psum.inr,
λ s, by cases s; refl,
λ s, by cases s; refl⟩
/-- If `α ≃ α'` and `β ≃ β'`, then `α ⊕ β ≃ α' ⊕ β'`. -/
def sum_congr {α₁ β₁ α₂ β₂ : Type*} (ea : α₁ ≃ α₂) (eb : β₁ ≃ β₂) : α₁ ⊕ β₁ ≃ α₂ ⊕ β₂ :=
⟨sum.map ea eb, sum.map ea.symm eb.symm, λ x, by simp, λ x, by simp⟩
@[simp] theorem sum_congr_apply {α₁ β₁ α₂ β₂ : Type*} (e₁ : α₁ ≃ α₂) (e₂ : β₁ ≃ β₂) (a : α₁ ⊕ β₁) :
sum_congr e₁ e₂ a = a.map e₁ e₂ :=
rfl
@[simp] lemma sum_congr_symm {α β γ δ : Type u} (e : α ≃ β) (f : γ ≃ δ) :
(equiv.sum_congr e f).symm = (equiv.sum_congr (e.symm) (f.symm)) :=
rfl
/-- `bool` is equivalent the sum of two `punit`s. -/
def bool_equiv_punit_sum_punit : bool ≃ punit.{u+1} ⊕ punit.{v+1} :=
⟨λ b, cond b (inr punit.star) (inl punit.star),
λ s, sum.rec_on s (λ_, ff) (λ_, tt),
λ b, by cases b; refl,
λ s, by rcases s with ⟨⟨⟩⟩ | ⟨⟨⟩⟩; refl⟩
/-- `Prop` is noncomputably equivalent to `bool`. -/
noncomputable def Prop_equiv_bool : Prop ≃ bool :=
⟨λ p, @to_bool p (classical.prop_decidable _),
λ b, b, λ p, by simp, λ b, by simp⟩
/-- Sum of types is commutative up to an equivalence. -/
def sum_comm (α β : Sort*) : α ⊕ β ≃ β ⊕ α :=
⟨sum.swap, sum.swap, sum.swap_swap, sum.swap_swap⟩
@[simp] lemma sum_comm_apply (α β) (a) : sum_comm α β a = a.swap := rfl
@[simp] lemma sum_comm_symm (α β) : (sum_comm α β).symm = sum_comm β α := rfl
/-- Sum of types is associative up to an equivalence. -/
def sum_assoc (α β γ : Sort*) : (α ⊕ β) ⊕ γ ≃ α ⊕ (β ⊕ γ) :=
⟨sum.elim (sum.elim sum.inl (sum.inr ∘ sum.inl)) (sum.inr ∘ sum.inr),
sum.elim (sum.inl ∘ sum.inl) $ sum.elim (sum.inl ∘ sum.inr) sum.inr,
by rintros (⟨_ | _⟩ | _); refl,
by rintros (_ | ⟨_ | _⟩); refl⟩
@[simp] theorem sum_assoc_apply_in1 {α β γ} (a) : sum_assoc α β γ (inl (inl a)) = inl a := rfl
@[simp] theorem sum_assoc_apply_in2 {α β γ} (b) : sum_assoc α β γ (inl (inr b)) = inr (inl b) := rfl
@[simp] theorem sum_assoc_apply_in3 {α β γ} (c) : sum_assoc α β γ (inr c) = inr (inr c) := rfl
/-- Sum with `empty` is equivalent to the original type. -/
def sum_empty (α : Type*) : α ⊕ empty ≃ α :=
⟨sum.elim id (empty.rec _),
inl,
λ s, by { rcases s with _ | ⟨⟨⟩⟩, refl },
λ a, rfl⟩
@[simp] lemma sum_empty_apply_inl {α} (a) : sum_empty α (sum.inl a) = a := rfl
/-- The sum of `empty` with any `Sort*` is equivalent to the right summand. -/
def empty_sum (α : Sort*) : empty ⊕ α ≃ α :=
(sum_comm _ _).trans $ sum_empty _
@[simp] lemma empty_sum_apply_inr {α} (a) : empty_sum α (sum.inr a) = a := rfl
/-- Sum with `pempty` is equivalent to the original type. -/
def sum_pempty (α : Type*) : α ⊕ pempty ≃ α :=
⟨sum.elim id (pempty.rec _),
inl,
λ s, by { rcases s with _ | ⟨⟨⟩⟩, refl },
λ a, rfl⟩
@[simp] lemma sum_pempty_apply_inl {α} (a) : sum_pempty α (sum.inl a) = a := rfl
/-- The sum of `pempty` with any `Sort*` is equivalent to the right summand. -/
def pempty_sum (α : Sort*) : pempty ⊕ α ≃ α :=
(sum_comm _ _).trans $ sum_pempty _
@[simp] lemma pempty_sum_apply_inr {α} (a) : pempty_sum α (sum.inr a) = a := rfl
/-- `option α` is equivalent to `α ⊕ punit` -/
def option_equiv_sum_punit (α : Type*) : option α ≃ α ⊕ punit.{u+1} :=
⟨λ o, match o with none := inr punit.star | some a := inl a end,
λ s, match s with inr _ := none | inl a := some a end,
λ o, by cases o; refl,
λ s, by rcases s with _ | ⟨⟨⟩⟩; refl⟩
@[simp] lemma option_equiv_sum_punit_none {α} : option_equiv_sum_punit α none = sum.inr () := rfl
@[simp] lemma option_equiv_sum_punit_some {α} (a) :
option_equiv_sum_punit α (some a) = sum.inl a := rfl
@[simp] lemma option_equiv_sum_punit_symm_inl {α} (a) :
(option_equiv_sum_punit α).symm (sum.inl a) = a :=
rfl
@[simp] lemma option_equiv_sum_punit_symm_inr {α} (a) :
(option_equiv_sum_punit α).symm (sum.inr a) = none :=
rfl
/-- The set of `x : option α` such that `is_some x` is equivalent to `α`. -/
def option_is_some_equiv (α : Type*) : {x : option α // x.is_some} ≃ α :=
{ to_fun := λ o, option.get o.2,
inv_fun := λ x, ⟨some x, dec_trivial⟩,
left_inv := λ o, subtype.eq $ option.some_get _,
right_inv := λ x, option.get_some _ _ }
/-- `α ⊕ β` is equivalent to a `sigma`-type over `bool`. -/
def sum_equiv_sigma_bool (α β : Sort*) : α ⊕ β ≃ (Σ b: bool, cond b α β) :=
⟨λ s, match s with inl a := ⟨tt, a⟩ | inr b := ⟨ff, b⟩ end,
λ s, match s with ⟨tt, a⟩ := inl a | ⟨ff, b⟩ := inr b end,
λ s, by cases s; refl,
λ s, by rcases s with ⟨_|_, _⟩; refl⟩
/-- `sigma_preimage_equiv f` for `f : α → β` is the natural equivalence between
the type of all fibres of `f` and the total space `α`. -/
def sigma_preimage_equiv {α β : Type*} (f : α → β) :
(Σ y : β, {x // f x = y}) ≃ α :=
⟨λ x, x.2.1, λ x, ⟨f x, x, rfl⟩, λ ⟨y, x, rfl⟩, rfl, λ x, rfl⟩
@[simp] lemma sigma_preimage_equiv_apply {α β : Type*} (f : α → β)
(x : Σ y : β, {x // f x = y}) :
(sigma_preimage_equiv f) x = x.2.1 := rfl
@[simp] lemma sigma_preimage_equiv_symm_apply_fst {α β : Type*} (f : α → β) (a : α) :
((sigma_preimage_equiv f).symm a).1 = f a := rfl
@[simp] lemma sigma_preimage_equiv_symm_apply_snd_fst {α β : Type*} (f : α → β) (a : α) :
((sigma_preimage_equiv f).symm a).2.1 = a := rfl
end
section sum_compl
/-- For any predicate `p` on `α`,
the sum of the two subtypes `{a // p a}` and its complement `{a // ¬ p a}`
is naturally equivalent to `α`. -/
def sum_compl {α : Type*} (p : α → Prop) [decidable_pred p] :
{a // p a} ⊕ {a // ¬ p a} ≃ α :=
{ to_fun := sum.elim coe coe,
inv_fun := λ a, if h : p a then sum.inl ⟨a, h⟩ else sum.inr ⟨a, h⟩,
left_inv := by { rintros (⟨x,hx⟩|⟨x,hx⟩); dsimp; [rw dif_pos, rw dif_neg], },
right_inv := λ a, by { dsimp, split_ifs; refl } }
@[simp] lemma sum_compl_apply_inl {α : Type*} (p : α → Prop) [decidable_pred p]
(x : {a // p a}) :
sum_compl p (sum.inl x) = x := rfl
@[simp] lemma sum_compl_apply_inr {α : Type*} (p : α → Prop) [decidable_pred p]
(x : {a // ¬ p a}) :
sum_compl p (sum.inr x) = x := rfl
@[simp] lemma sum_compl_apply_symm_of_pos {α : Type*} (p : α → Prop) [decidable_pred p]
(a : α) (h : p a) :
(sum_compl p).symm a = sum.inl ⟨a, h⟩ := dif_pos h
@[simp] lemma sum_compl_apply_symm_of_neg {α : Type*} (p : α → Prop) [decidable_pred p]
(a : α) (h : ¬ p a) :
(sum_compl p).symm a = sum.inr ⟨a, h⟩ := dif_neg h
end sum_compl
section subtype_preimage
variables (p : α → Prop) [decidable_pred p] (x₀ : {a // p a} → β)
/-- For a fixed function `x₀ : {a // p a} → β` defined on a subtype of `α`,
the subtype of functions `x : α → β` that agree with `x₀` on the subtype `{a // p a}`
is naturally equivalent to the type of functions `{a // ¬ p a} → β`. -/
def subtype_preimage :
{x : α → β // x ∘ coe = x₀} ≃ ({a // ¬ p a} → β) :=
{ to_fun := λ (x : {x : α → β // x ∘ coe = x₀}) a, (x : α → β) a,
inv_fun := λ x, ⟨λ a, if h : p a then x₀ ⟨a, h⟩ else x ⟨a, h⟩,
funext $ λ ⟨a, h⟩, dif_pos h⟩,
left_inv := λ ⟨x, hx⟩, subtype.val_injective $ funext $ λ a,
(by { dsimp, split_ifs; [ rw ← hx, skip ]; refl }),
right_inv := λ x, funext $ λ ⟨a, h⟩,
show dite (p a) _ _ = _, by { dsimp, rw [dif_neg h] } }
@[simp] lemma subtype_preimage_apply (x : {x : α → β // x ∘ coe = x₀}) :
subtype_preimage p x₀ x = λ a, (x : α → β) a := rfl
@[simp] lemma subtype_preimage_symm_apply_coe (x : {a // ¬ p a} → β) :
((subtype_preimage p x₀).symm x : α → β) =
λ a, if h : p a then x₀ ⟨a, h⟩ else x ⟨a, h⟩ := rfl
lemma subtype_preimage_symm_apply_coe_pos (x : {a // ¬ p a} → β) (a : α) (h : p a) :
((subtype_preimage p x₀).symm x : α → β) a = x₀ ⟨a, h⟩ :=
dif_pos h
lemma subtype_preimage_symm_apply_coe_neg (x : {a // ¬ p a} → β) (a : α) (h : ¬ p a) :
((subtype_preimage p x₀).symm x : α → β) a = x ⟨a, h⟩ :=
dif_neg h
end subtype_preimage
section fun_unique
variables (α β) [unique α]
/-- If `α` has a unique term, then the type of function `α → β` is equivalent to `β`. -/
def fun_unique : (α → β) ≃ β :=
{ to_fun := λ f, f (default α),
inv_fun := λ b a, b,
left_inv := λ f, funext $ λ a, congr_arg f $ subsingleton.elim _ _,
right_inv := λ b, rfl }
variables {α β}
@[simp] lemma fun_unique_apply (f : α → β) :
fun_unique α β f = f (default α) := rfl
@[simp] lemma fun_unique_symm_apply (b : β) (a : α) :
(fun_unique α β).symm b a = b := rfl
end fun_unique
section
/-- A family of equivalences `Π a, β₁ a ≃ β₂ a` generates an equivalence between `Π a, β₁ a` and
`Π a, β₂ a`. -/
def Pi_congr_right {α} {β₁ β₂ : α → Sort*} (F : Π a, β₁ a ≃ β₂ a) : (Π a, β₁ a) ≃ (Π a, β₂ a) :=
⟨λ H a, F a (H a), λ H a, (F a).symm (H a),
λ H, funext $ by simp, λ H, funext $ by simp⟩
/-- Dependent `curry` equivalence: the type of dependent functions on `Σ i, β i` is equivalent
to the type of dependent functions of two arguments (i.e., functions to the space of functions). -/
def Pi_curry {α} {β : α → Sort*} (γ : Π a, β a → Sort*) :
(Π x : Σ i, β i, γ x.1 x.2) ≃ (Π a b, γ a b) :=
{ to_fun := λ f x y, f ⟨x,y⟩,
inv_fun := λ f x, f x.1 x.2,
left_inv := λ f, funext $ λ ⟨x,y⟩, rfl,
right_inv := λ f, funext $ λ x, funext $ λ y, rfl }
end
section
/-- A `psigma`-type is equivalent to the corresponding `sigma`-type. -/
def psigma_equiv_sigma {α} (β : α → Sort*) : (Σ' i, β i) ≃ Σ i, β i :=
⟨λ a, ⟨a.1, a.2⟩, λ a, ⟨a.1, a.2⟩, λ ⟨a, b⟩, rfl, λ ⟨a, b⟩, rfl⟩
@[simp] lemma psigma_equiv_sigma_apply {α} (β : α → Sort*) (x) :
psigma_equiv_sigma β x = ⟨x.1, x.2⟩ :=
rfl
@[simp] lemma psigma_equiv_sigma_symm_apply {α} (β : α → Sort*) (x) :
(psigma_equiv_sigma β).symm x = ⟨x.1, x.2⟩ :=
rfl
/-- A family of equivalences `Π a, β₁ a ≃ β₂ a` generates an equivalence between `Σ a, β₁ a` and
`Σ a, β₂ a`. -/
def sigma_congr_right {α} {β₁ β₂ : α → Sort*} (F : Π a, β₁ a ≃ β₂ a) : (Σ a, β₁ a) ≃ Σ a, β₂ a :=
⟨λ a, ⟨a.1, F a.1 a.2⟩, λ a, ⟨a.1, (F a.1).symm a.2⟩,
λ ⟨a, b⟩, congr_arg (sigma.mk a) $ symm_apply_apply (F a) b,
λ ⟨a, b⟩, congr_arg (sigma.mk a) $ apply_symm_apply (F a) b⟩
@[simp] lemma sigma_congr_right_apply {α} {β₁ β₂ : α → Sort*} (F : Π a, β₁ a ≃ β₂ a) (x) :
sigma_congr_right F x = ⟨x.1, F x.1 x.2⟩ :=
rfl
@[simp] lemma sigma_congr_right_symm_apply {α} {β₁ β₂ : α → Sort*} (F : Π a, β₁ a ≃ β₂ a) (x) :
(sigma_congr_right F).symm x = ⟨x.1, (F x.1).symm x.2⟩ :=
rfl
/-- An equivalence `f : α₁ ≃ α₂` generates an equivalence between `Σ a, β (f a)` and `Σ a, β a`. -/
def sigma_congr_left {α₁ α₂} {β : α₂ → Sort*} (e : α₁ ≃ α₂) : (Σ a:α₁, β (e a)) ≃ (Σ a:α₂, β a) :=
⟨λ a, ⟨e a.1, a.2⟩, λ a, ⟨e.symm a.1, @@eq.rec β a.2 (e.right_inv a.1).symm⟩,
λ ⟨a, b⟩, match e.symm (e a), e.left_inv a : ∀ a' (h : a' = a),
@sigma.mk _ (β ∘ e) _ (@@eq.rec β b (congr_arg e h.symm)) = ⟨a, b⟩ with
| _, rfl := rfl end,
λ ⟨a, b⟩, match e (e.symm a), _ : ∀ a' (h : a' = a), sigma.mk a' (@@eq.rec β b h.symm) = ⟨a, b⟩ with
| _, rfl := rfl end⟩
@[simp] lemma sigma_congr_left_apply {α₁ α₂} {β : α₂ → Sort*} (e : α₁ ≃ α₂) (x : Σ a, β (e a)) :
sigma_congr_left e x = ⟨e x.1, x.2⟩ :=
rfl
/-- Transporting a sigma type through an equivalence of the base -/
def sigma_congr_left' {α₁ α₂} {β : α₁ → Sort*} (f : α₁ ≃ α₂) :
(Σ a:α₁, β a) ≃ (Σ a:α₂, β (f.symm a)) :=
(sigma_congr_left f.symm).symm
/-- Transporting a sigma type through an equivalence of the base and a family of equivalences
of matching fibers -/
def sigma_congr {α₁ α₂} {β₁ : α₁ → Sort*} {β₂ : α₂ → Sort*} (f : α₁ ≃ α₂)
(F : ∀ a, β₁ a ≃ β₂ (f a)) :
sigma β₁ ≃ sigma β₂ :=
(sigma_congr_right F).trans (sigma_congr_left f)
/-- `sigma` type with a constant fiber is equivalent to the product. -/
def sigma_equiv_prod (α β : Type*) : (Σ_:α, β) ≃ α × β :=
⟨λ a, ⟨a.1, a.2⟩, λ a, ⟨a.1, a.2⟩, λ ⟨a, b⟩, rfl, λ ⟨a, b⟩, rfl⟩
@[simp] lemma sigma_equiv_prod_apply {α β : Type*} (x : Σ _:α, β) :
sigma_equiv_prod α β x = ⟨x.1, x.2⟩ :=
rfl
@[simp] lemma sigma_equiv_prod_symm_apply {α β : Type*} (x : α × β) :
(sigma_equiv_prod α β).symm x = ⟨x.1, x.2⟩ :=
rfl
/-- If each fiber of a `sigma` type is equivalent to a fixed type, then the sigma type
is equivalent to the product. -/
def sigma_equiv_prod_of_equiv {α β} {β₁ : α → Sort*} (F : Π a, β₁ a ≃ β) : sigma β₁ ≃ α × β :=
(sigma_congr_right F).trans (sigma_equiv_prod α β)
end
section prod_congr
variables {α₁ β₁ β₂ : Type*} (e : α₁ → β₁ ≃ β₂)
/-- A family of equivalences `Π (a : α₁), β₁ ≃ β₂` generates an equivalence
between `β₁ × α₁` and `β₂ × α₁`. -/
def prod_congr_left : β₁ × α₁ ≃ β₂ × α₁ :=
{ to_fun := λ ab, ⟨e ab.2 ab.1, ab.2⟩,
inv_fun := λ ab, ⟨(e ab.2).symm ab.1, ab.2⟩,
left_inv := by { rintros ⟨a, b⟩, simp },
right_inv := by { rintros ⟨a, b⟩, simp } }
@[simp] lemma prod_congr_left_apply (b : β₁) (a : α₁) :
prod_congr_left e (b, a) = (e a b, a) := rfl
lemma prod_congr_refl_right (e : β₁ ≃ β₂) :
prod_congr e (equiv.refl α₁) = prod_congr_left (λ _, e) :=
by { ext ⟨a, b⟩ : 1, simp }
/-- A family of equivalences `Π (a : α₁), β₁ ≃ β₂` generates an equivalence
between `α₁ × β₁` and `α₁ × β₂`. -/
def prod_congr_right : α₁ × β₁ ≃ α₁ × β₂ :=
{ to_fun := λ ab, ⟨ab.1, e ab.1 ab.2⟩,
inv_fun := λ ab, ⟨ab.1, (e ab.1).symm ab.2⟩,
left_inv := by { rintros ⟨a, b⟩, simp },
right_inv := by { rintros ⟨a, b⟩, simp } }
@[simp] lemma prod_congr_right_apply (a : α₁) (b : β₁) :
prod_congr_right e (a, b) = (a, e a b) := rfl
lemma prod_congr_refl_left (e : β₁ ≃ β₂) :
prod_congr (equiv.refl α₁) e = prod_congr_right (λ _, e) :=
by { ext ⟨a, b⟩ : 1, simp }
@[simp] lemma prod_congr_left_trans_prod_comm :
(prod_congr_left e).trans (prod_comm _ _) = (prod_comm _ _).trans (prod_congr_right e) :=
by { ext ⟨a, b⟩ : 1, simp }
@[simp] lemma prod_congr_right_trans_prod_comm :
(prod_congr_right e).trans (prod_comm _ _) = (prod_comm _ _).trans (prod_congr_left e) :=
by { ext ⟨a, b⟩ : 1, simp }
lemma sigma_congr_right_sigma_equiv_prod :
(sigma_congr_right e).trans (sigma_equiv_prod α₁ β₂) =
(sigma_equiv_prod α₁ β₁).trans (prod_congr_right e) :=
by { ext ⟨a, b⟩ : 1, simp }
lemma sigma_equiv_prod_sigma_congr_right :
(sigma_equiv_prod α₁ β₁).symm.trans (sigma_congr_right e) =
(prod_congr_right e).trans (sigma_equiv_prod α₁ β₂).symm :=
by { ext ⟨a, b⟩ : 1, simp }
end prod_congr
namespace perm
variables {α₁ β₁ β₂ : Type*} [decidable_eq α₁] (a : α₁) (e : perm β₁)
/-- `prod_extend_right a e` extends `e : perm β` to `perm (α × β)` by sending `(a, b)` to
`(a, e b)` and keeping the other `(a', b)` fixed. -/
def prod_extend_right : perm (α₁ × β₁) :=
{ to_fun := λ ab, if ab.fst = a then (a, e ab.snd) else ab,
inv_fun := λ ab, if ab.fst = a then (a, e⁻¹ ab.snd) else ab,
left_inv := by { rintros ⟨k', x⟩, simp only, split_ifs with h; simp [h] },
right_inv := by { rintros ⟨k', x⟩, simp only, split_ifs with h; simp [h] } }
@[simp] lemma prod_extend_right_apply_eq (b : β₁) :
prod_extend_right a e (a, b) = (a, e b) := if_pos rfl
lemma prod_extend_right_apply_ne {a a' : α₁} (h : a' ≠ a) (b : β₁) :
prod_extend_right a e (a', b) = (a', b) := if_neg h
lemma eq_of_prod_extend_right_ne {e : perm β₁} {a a' : α₁} {b : β₁}
(h : prod_extend_right a e (a', b) ≠ (a', b)) : a' = a :=
by { contrapose! h, exact prod_extend_right_apply_ne _ h _ }
@[simp] lemma fst_prod_extend_right (ab : α₁ × β₁) :
(prod_extend_right a e ab).fst = ab.fst :=
begin
rw [prod_extend_right, coe_fn_mk],
split_ifs with h,
{ rw h },
{ refl }
end
end perm
section
/-- The type of functions to a product `α × β` is equivalent to the type of pairs of functions
`γ → α` and `γ → β`. -/
def arrow_prod_equiv_prod_arrow (α β γ : Type*) : (γ → α × β) ≃ (γ → α) × (γ → β) :=
⟨λ f, (λ c, (f c).1, λ c, (f c).2),
λ p c, (p.1 c, p.2 c),
λ f, funext $ λ c, prod.mk.eta,
λ p, by { cases p, refl }⟩
/-- Functions `α → β → γ` are equivalent to functions on `α × β`. -/
def arrow_arrow_equiv_prod_arrow (α β γ : Sort*) : (α → β → γ) ≃ (α × β → γ) :=
⟨uncurry, curry, curry_uncurry, uncurry_curry⟩
open sum
/-- The type of functions on a sum type `α ⊕ β` is equivalent to the type of pairs of functions
on `α` and on `β`. -/
def sum_arrow_equiv_prod_arrow (α β γ : Type*) : ((α ⊕ β) → γ) ≃ (α → γ) × (β → γ) :=
⟨λ f, (f ∘ inl, f ∘ inr),
λ p, sum.elim p.1 p.2,
λ f, by { ext ⟨⟩; refl },
λ p, by { cases p, refl }⟩
/-- Type product is right distributive with respect to type sum up to an equivalence. -/
def sum_prod_distrib (α β γ : Sort*) : (α ⊕ β) × γ ≃ (α × γ) ⊕ (β × γ) :=
⟨λ p, match p with (inl a, c) := inl (a, c) | (inr b, c) := inr (b, c) end,
λ s, match s with inl q := (inl q.1, q.2) | inr q := (inr q.1, q.2) end,
λ p, by rcases p with ⟨_ | _, _⟩; refl,
λ s, by rcases s with ⟨_, _⟩ | ⟨_, _⟩; refl⟩
@[simp] theorem sum_prod_distrib_apply_left {α β γ} (a : α) (c : γ) :
sum_prod_distrib α β γ (sum.inl a, c) = sum.inl (a, c) := rfl
@[simp] theorem sum_prod_distrib_apply_right {α β γ} (b : β) (c : γ) :
sum_prod_distrib α β γ (sum.inr b, c) = sum.inr (b, c) := rfl
/-- Type product is left distributive with respect to type sum up to an equivalence. -/
def prod_sum_distrib (α β γ : Sort*) : α × (β ⊕ γ) ≃ (α × β) ⊕ (α × γ) :=
calc α × (β ⊕ γ) ≃ (β ⊕ γ) × α : prod_comm _ _
... ≃ (β × α) ⊕ (γ × α) : sum_prod_distrib _ _ _
... ≃ (α × β) ⊕ (α × γ) : sum_congr (prod_comm _ _) (prod_comm _ _)
@[simp] theorem prod_sum_distrib_apply_left {α β γ} (a : α) (b : β) :
prod_sum_distrib α β γ (a, sum.inl b) = sum.inl (a, b) := rfl
@[simp] theorem prod_sum_distrib_apply_right {α β γ} (a : α) (c : γ) :
prod_sum_distrib α β γ (a, sum.inr c) = sum.inr (a, c) := rfl
/-- The product of an indexed sum of types (formally, a `sigma`-type `Σ i, α i`) by a type `β` is
equivalent to the sum of products `Σ i, (α i × β)`. -/
def sigma_prod_distrib {ι : Type*} (α : ι → Type*) (β : Type*) :
((Σ i, α i) × β) ≃ (Σ i, (α i × β)) :=
⟨λ p, ⟨p.1.1, (p.1.2, p.2)⟩,
λ p, (⟨p.1, p.2.1⟩, p.2.2),
λ p, by { rcases p with ⟨⟨_, _⟩, _⟩, refl },
λ p, by { rcases p with ⟨_, ⟨_, _⟩⟩, refl }⟩
/-- The product `bool × α` is equivalent to `α ⊕ α`. -/
def bool_prod_equiv_sum (α : Type u) : bool × α ≃ α ⊕ α :=
calc bool × α ≃ (unit ⊕ unit) × α : prod_congr bool_equiv_punit_sum_punit (equiv.refl _)
... ≃ (unit × α) ⊕ (unit × α) : sum_prod_distrib _ _ _
... ≃ α ⊕ α : sum_congr (punit_prod _) (punit_prod _)
/-- The function type `bool → α` is equivalent to `α × α`. -/
def bool_to_equiv_prod (α : Type u) : (bool → α) ≃ α × α :=
calc (bool → α) ≃ ((unit ⊕ unit) → α) : (arrow_congr bool_equiv_punit_sum_punit (equiv.refl α))
... ≃ (unit → α) × (unit → α) : sum_arrow_equiv_prod_arrow _ _ _
... ≃ α × α : prod_congr (punit_arrow_equiv _) (punit_arrow_equiv _)
@[simp] lemma bool_to_equiv_prod_apply {α : Type u} (f : bool → α) :
bool_to_equiv_prod α f = (f ff, f tt) := rfl
@[simp] lemma bool_to_equiv_prod_symm_apply_ff {α : Type u} (p : α × α) :
(bool_to_equiv_prod α).symm p ff = p.1 := rfl
@[simp] lemma bool_to_equiv_prod_symm_apply_tt {α : Type u} (p : α × α) :
(bool_to_equiv_prod α).symm p tt = p.2 := rfl
end
section
open sum nat
/-- The set of natural numbers is equivalent to `ℕ ⊕ punit`. -/
def nat_equiv_nat_sum_punit : ℕ ≃ ℕ ⊕ punit.{u+1} :=
⟨λ n, match n with zero := inr punit.star | succ a := inl a end,
λ s, match s with inl n := succ n | inr punit.star := zero end,
λ n, begin cases n, repeat { refl } end,
λ s, begin cases s with a u, { refl }, {cases u, { refl }} end⟩
/-- `ℕ ⊕ punit` is equivalent to `ℕ`. -/
def nat_sum_punit_equiv_nat : ℕ ⊕ punit.{u+1} ≃ ℕ :=
nat_equiv_nat_sum_punit.symm
/-- The type of integer numbers is equivalent to `ℕ ⊕ ℕ`. -/
def int_equiv_nat_sum_nat : ℤ ≃ ℕ ⊕ ℕ :=
by refine ⟨_, _, _, _⟩; intro z; {cases z; [left, right]; assumption} <|> {cases z; refl}
end
/-- An equivalence between `α` and `β` generates an equivalence between `list α` and `list β`. -/
def list_equiv_of_equiv {α β : Type*} (e : α ≃ β) : list α ≃ list β :=
{ to_fun := list.map e,
inv_fun := list.map e.symm,
left_inv := λ l, by rw [list.map_map, e.symm_comp_self, list.map_id],
right_inv := λ l, by rw [list.map_map, e.self_comp_symm, list.map_id] }
/-- `fin n` is equivalent to `{m // m < n}`. -/
def fin_equiv_subtype (n : ℕ) : fin n ≃ {m // m < n} :=
⟨λ x, ⟨x.1, x.2⟩, λ x, ⟨x.1, x.2⟩, λ ⟨a, b⟩, rfl,λ ⟨a, b⟩, rfl⟩
/-- If `α` is equivalent to `β`, then `unique α` is equivalent to `β`. -/
def unique_congr (e : α ≃ β) : unique α ≃ unique β :=
{ to_fun := λ h, @equiv.unique _ _ h e.symm,
inv_fun := λ h, @equiv.unique _ _ h e,
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
section
open subtype
/-- If `α` is equivalent to `β` and the predicates `p : α → Prop` and `q : β → Prop` are equivalent
at corresponding points, then `{a // p a}` is equivalent to `{b // q b}`. -/
def subtype_congr {p : α → Prop} {q : β → Prop}
(e : α ≃ β) (h : ∀ a, p a ↔ q (e a)) : {a : α // p a} ≃ {b : β // q b} :=
⟨λ x, ⟨e x.1, (h _).1 x.2⟩,
λ y, ⟨e.symm y.1, (h _).2 (by { simp, exact y.2 })⟩,
λ ⟨x, h⟩, subtype.ext_val $ by simp,
λ ⟨y, h⟩, subtype.ext_val $ by simp⟩
/-- If two predicates `p` and `q` are pointwise equivalent, then `{x // p x}` is equivalent to
`{x // q x}`. -/
def subtype_congr_right {p q : α → Prop} (e : ∀x, p x ↔ q x) : {x // p x} ≃ {x // q x} :=
subtype_congr (equiv.refl _) e
@[simp] lemma subtype_congr_right_mk {p q : α → Prop} (e : ∀x, p x ↔ q x)
{x : α} (h : p x) : subtype_congr_right e ⟨x, h⟩ = ⟨x, (e x).1 h⟩ := rfl
/-- If `α ≃ β`, then for any predicate `p : β → Prop` the subtype `{a // p (e a)}` is equivalent
to the subtype `{b // p b}`. -/
def subtype_equiv_of_subtype {p : β → Prop} (e : α ≃ β) :
{a : α // p (e a)} ≃ {b : β // p b} :=
subtype_congr e $ by simp
/-- If `α ≃ β`, then for any predicate `p : α → Prop` the subtype `{a // p a}` is equivalent
to the subtype `{b // p (e.symm b)}`. This version is used by `equiv_rw`. -/
def subtype_equiv_of_subtype' {p : α → Prop} (e : α ≃ β) :
{a : α // p a} ≃ {b : β // p (e.symm b)} :=
e.symm.subtype_equiv_of_subtype.symm
/-- If two predicates are equal, then the corresponding subtypes are equivalent. -/
def subtype_congr_prop {α : Type*} {p q : α → Prop} (h : p = q) : subtype p ≃ subtype q :=
subtype_congr (equiv.refl α) (assume a, h ▸ iff.rfl)
/-- The subtypes corresponding to equal sets are equivalent. -/
def set_congr {α : Type*} {s t : set α} (h : s = t) : s ≃ t :=
subtype_congr_prop h
/-- A subtype of a subtype is equivalent to the subtype of elements satisfying both predicates. This
version allows the “inner” predicate to depend on `h : p a`. -/
def subtype_subtype_equiv_subtype_exists {α : Type u} (p : α → Prop) (q : subtype p → Prop) :
subtype q ≃ {a : α // ∃h:p a, q ⟨a, h⟩ } :=
⟨λ⟨⟨a, ha⟩, ha'⟩, ⟨a, ha, ha'⟩,
λ⟨a, ha⟩, ⟨⟨a, ha.cases_on $ assume h _, h⟩, by { cases ha, exact ha_h }⟩,
assume ⟨⟨a, ha⟩, h⟩, rfl, assume ⟨a, h₁, h₂⟩, rfl⟩
/-- A subtype of a subtype is equivalent to the subtype of elements satisfying both predicates. -/
def subtype_subtype_equiv_subtype_inter {α : Type u} (p q : α → Prop) :
{x : subtype p // q x.1} ≃ subtype (λ x, p x ∧ q x) :=
(subtype_subtype_equiv_subtype_exists p _).trans $
subtype_congr_right $ λ x, exists_prop
/-- If the outer subtype has more restrictive predicate than the inner one,
then we can drop the latter. -/
def subtype_subtype_equiv_subtype {α : Type u} {p q : α → Prop} (h : ∀ {x}, q x → p x) :
{x : subtype p // q x.1} ≃ subtype q :=
(subtype_subtype_equiv_subtype_inter p _).trans $
subtype_congr_right $
assume x,
⟨and.right, λ h₁, ⟨h h₁, h₁⟩⟩
/-- If a proposition holds for all elements, then the subtype is
equivalent to the original type. -/
def subtype_univ_equiv {α : Type u} {p : α → Prop} (h : ∀ x, p x) :
subtype p ≃ α :=
⟨λ x, x, λ x, ⟨x, h x⟩, λ x, subtype.eq rfl, λ x, rfl⟩
/-- A subtype of a sigma-type is a sigma-type over a subtype. -/
def subtype_sigma_equiv {α : Type u} (p : α → Type v) (q : α → Prop) :
{ y : sigma p // q y.1 } ≃ Σ(x : subtype q), p x.1 :=
⟨λ x, ⟨⟨x.1.1, x.2⟩, x.1.2⟩,
λ x, ⟨⟨x.1.1, x.2⟩, x.1.2⟩,
λ ⟨⟨x, h⟩, y⟩, rfl,
λ ⟨⟨x, y⟩, h⟩, rfl⟩
/-- A sigma type over a subtype is equivalent to the sigma set over the original type,
if the fiber is empty outside of the subset -/
def sigma_subtype_equiv_of_subset {α : Type u} (p : α → Type v) (q : α → Prop)
(h : ∀ x, p x → q x) :
(Σ x : subtype q, p x) ≃ Σ x : α, p x :=
(subtype_sigma_equiv p q).symm.trans $ subtype_univ_equiv $ λ x, h x.1 x.2
/-- If a predicate `p : β → Prop` is true on the range of a map `f : α → β`, then
`Σ y : {y // p y}, {x // f x = y}` is equivalent to `α`. -/
def sigma_subtype_preimage_equiv {α : Type u} {β : Type v} (f : α → β) (p : β → Prop)
(h : ∀ x, p (f x)) :
(Σ y : subtype p, {x : α // f x = y}) ≃ α :=
calc _ ≃ Σ y : β, {x : α // f x = y} : sigma_subtype_equiv_of_subset _ p (λ y ⟨x, h'⟩, h' ▸ h x)
... ≃ α : sigma_preimage_equiv f
/-- If for each `x` we have `p x ↔ q (f x)`, then `Σ y : {y // q y}, f ⁻¹' {y}` is equivalent
to `{x // p x}`. -/
def sigma_subtype_preimage_equiv_subtype {α : Type u} {β : Type v} (f : α → β)
{p : α → Prop} {q : β → Prop} (h : ∀ x, p x ↔ q (f x)) :
(Σ y : subtype q, {x : α // f x = y}) ≃ subtype p :=
calc (Σ y : subtype q, {x : α // f x = y}) ≃
Σ y : subtype q, {x : subtype p // subtype.mk (f x) ((h x).1 x.2) = y} :
begin
apply sigma_congr_right,
assume y,
symmetry,
refine (subtype_subtype_equiv_subtype_exists _ _).trans (subtype_congr_right _),
assume x,
exact ⟨λ ⟨hp, h'⟩, congr_arg subtype.val h', λ h', ⟨(h x).2 (h'.symm ▸ y.2), subtype.eq h'⟩⟩
end
... ≃ subtype p : sigma_preimage_equiv (λ x : subtype p, (⟨f x, (h x).1 x.property⟩ : subtype q))
/-- The `pi`-type `Π i, π i` is equivalent to the type of sections `f : ι → Σ i, π i` of the
`sigma` type such that for all `i` we have `(f i).fst = i`. -/
def pi_equiv_subtype_sigma (ι : Type*) (π : ι → Type*) :
(Πi, π i) ≃ {f : ι → Σi, π i | ∀i, (f i).1 = i } :=
⟨ λf, ⟨λi, ⟨i, f i⟩, assume i, rfl⟩, λf i, begin rw ← f.2 i, exact (f.1 i).2 end,
assume f, funext $ assume i, rfl,
assume ⟨f, hf⟩, subtype.eq $ funext $ assume i, sigma.eq (hf i).symm $
eq_of_heq $ rec_heq_of_heq _ $ rec_heq_of_heq _ $ heq.refl _⟩
/-- The set of functions `f : Π a, β a` such that for all `a` we have `p a (f a)` is equivalent
to the set of functions `Π a, {b : β a // p a b}`. -/
def subtype_pi_equiv_pi {α : Sort u} {β : α → Sort v} {p : Πa, β a → Prop} :
{f : Πa, β a // ∀a, p a (f a) } ≃ Πa, { b : β a // p a b } :=
⟨λf a, ⟨f.1 a, f.2 a⟩, λf, ⟨λa, (f a).1, λa, (f a).2⟩,
by { rintro ⟨f, h⟩, refl },
by { rintro f, funext a, exact subtype.ext_val rfl }⟩
/-- A subtype of a product defined by componentwise conditions
is equivalent to a product of subtypes. -/
def subtype_prod_equiv_prod {α : Type u} {β : Type v} {p : α → Prop} {q : β → Prop} :
{c : α × β // p c.1 ∧ q c.2} ≃ ({a // p a} × {b // q b}) :=
⟨λ x, ⟨⟨x.1.1, x.2.1⟩, ⟨x.1.2, x.2.2⟩⟩,
λ x, ⟨⟨x.1.1, x.2.1⟩, ⟨x.1.2, x.2.2⟩⟩,
λ ⟨⟨_, _⟩, ⟨_, _⟩⟩, rfl,
λ ⟨⟨_, _⟩, ⟨_, _⟩⟩, rfl⟩
end
section subtype_equiv_codomain
variables {X : Type*} {Y : Type*} [decidable_eq X] {x : X}
/-- The type of all functions `X → Y` with prescribed values for all `x' ≠ x`
is equivalent to the codomain `Y`. -/
def subtype_equiv_codomain (f : {x' // x' ≠ x} → Y) : {g : X → Y // g ∘ coe = f} ≃ Y :=
(subtype_preimage _ f).trans $
@fun_unique {x' // ¬ x' ≠ x} _ $
show unique {x' // ¬ x' ≠ x}, from @equiv.unique _ _
(show unique {x' // x' = x}, from
{ default := ⟨x, rfl⟩, uniq := λ ⟨x', h⟩, subtype.val_injective h })
(subtype_congr_right $ λ a, not_not)
@[simp] lemma coe_subtype_equiv_codomain (f : {x' // x' ≠ x} → Y) :
(subtype_equiv_codomain f : {g : X → Y // g ∘ coe = f} → Y) = λ g, (g : X → Y) x := rfl
@[simp] lemma subtype_equiv_codomain_apply (f : {x' // x' ≠ x} → Y)
(g : {g : X → Y // g ∘ coe = f}) :
subtype_equiv_codomain f g = (g : X → Y) x := rfl
lemma coe_subtype_equiv_codomain_symm (f : {x' // x' ≠ x} → Y) :
((subtype_equiv_codomain f).symm : Y → {g : X → Y // g ∘ coe = f}) =
λ y, ⟨λ x', if h : x' ≠ x then f ⟨x', h⟩ else y,
by { funext x', dsimp, erw [dif_pos x'.2, subtype.coe_eta] }⟩ := rfl
@[simp] lemma subtype_equiv_codomain_symm_apply (f : {x' // x' ≠ x} → Y) (y : Y) (x' : X) :
((subtype_equiv_codomain f).symm y : X → Y) x' = if h : x' ≠ x then f ⟨x', h⟩ else y :=
rfl
@[simp] lemma subtype_equiv_codomain_symm_apply_eq (f : {x' // x' ≠ x} → Y) (y : Y) :
((subtype_equiv_codomain f).symm y : X → Y) x = y :=
dif_neg (not_not.mpr rfl)
lemma subtype_equiv_codomain_symm_apply_ne (f : {x' // x' ≠ x} → Y) (y : Y) (x' : X) (h : x' ≠ x) :
((subtype_equiv_codomain f).symm y : X → Y) x' = f ⟨x', h⟩ :=
dif_pos h
end subtype_equiv_codomain
namespace set
open set
/-- `univ α` is equivalent to `α`. -/
protected def univ (α) : @univ α ≃ α :=
⟨subtype.val, λ a, ⟨a, trivial⟩, λ ⟨a, _⟩, rfl, λ a, rfl⟩
@[simp] lemma univ_apply {α : Type u} (x : @univ α) :
equiv.set.univ α x = x := rfl
@[simp] lemma univ_symm_apply {α : Type u} (x : α) :
(equiv.set.univ α).symm x = ⟨x, trivial⟩ := rfl
/-- An empty set is equivalent to the `empty` type. -/
protected def empty (α) : (∅ : set α) ≃ empty :=
equiv_empty $ λ ⟨x, h⟩, not_mem_empty x h
/-- An empty set is equivalent to a `pempty` type. -/
protected def pempty (α) : (∅ : set α) ≃ pempty :=
equiv_pempty $ λ ⟨x, h⟩, not_mem_empty x h
/-- If sets `s` and `t` are separated by a decidable predicate, then `s ∪ t` is equivalent to
`s ⊕ t`. -/
protected def union' {α} {s t : set α}
(p : α → Prop) [decidable_pred p]
(hs : ∀ x ∈ s, p x)
(ht : ∀ x ∈ t, ¬ p x) : (s ∪ t : set α) ≃ s ⊕ t :=
{ to_fun := λ x, if hp : p x
then sum.inl ⟨_, x.2.resolve_right (λ xt, ht _ xt hp)⟩
else sum.inr ⟨_, x.2.resolve_left (λ xs, hp (hs _ xs))⟩,
inv_fun := λ o, match o with
| (sum.inl x) := ⟨x, or.inl x.2⟩
| (sum.inr x) := ⟨x, or.inr x.2⟩
end,
left_inv := λ ⟨x, h'⟩, by by_cases p x; simp [union'._match_1, h]; congr,
right_inv := λ o, begin
rcases o with ⟨x, h⟩ | ⟨x, h⟩;
dsimp [union'._match_1];
[simp [hs _ h], simp [ht _ h]]
end }
/-- If sets `s` and `t` are disjoint, then `s ∪ t` is equivalent to `s ⊕ t`. -/
protected def union {α} {s t : set α} [decidable_pred (λ x, x ∈ s)] (H : s ∩ t ⊆ ∅) :
(s ∪ t : set α) ≃ s ⊕ t :=
set.union' (λ x, x ∈ s) (λ _, id) (λ x xt xs, H ⟨xs, xt⟩)
lemma union_apply_left {α} {s t : set α} [decidable_pred (λ x, x ∈ s)] (H : s ∩ t ⊆ ∅)
{a : (s ∪ t : set α)} (ha : ↑a ∈ s) : equiv.set.union H a = sum.inl ⟨a, ha⟩ :=
dif_pos ha
lemma union_apply_right {α} {s t : set α} [decidable_pred (λ x, x ∈ s)] (H : s ∩ t ⊆ ∅)
{a : (s ∪ t : set α)} (ha : ↑a ∈ t) : equiv.set.union H a = sum.inr ⟨a, ha⟩ :=
dif_neg $ λ h, H ⟨h, ha⟩
-- TODO: Any reason to use the same universe?
/-- A singleton set is equivalent to a `punit` type. -/
protected def singleton {α} (a : α) : ({a} : set α) ≃ punit.{u} :=
⟨λ _, punit.star, λ _, ⟨a, mem_singleton _⟩,
λ ⟨x, h⟩, by { simp at h, subst x },
λ ⟨⟩, rfl⟩
/-- Equal sets are equivalent. -/
protected def of_eq {α : Type u} {s t : set α} (h : s = t) : s ≃ t :=
{ to_fun := λ x, ⟨x.1, h ▸ x.2⟩,
inv_fun := λ x, ⟨x.1, h.symm ▸ x.2⟩,
left_inv := λ _, subtype.eq rfl,
right_inv := λ _, subtype.eq rfl }
@[simp] lemma of_eq_apply {α : Type u} {s t : set α} (h : s = t) (a : s) :
equiv.set.of_eq h a = ⟨a, h ▸ a.2⟩ := rfl
@[simp] lemma of_eq_symm_apply {α : Type u} {s t : set α} (h : s = t) (a : t) :
(equiv.set.of_eq h).symm a = ⟨a, h.symm ▸ a.2⟩ := rfl
/-- If `a ∉ s`, then `insert a s` is equivalent to `s ⊕ punit`. -/
protected def insert {α} {s : set.{u} α} [decidable_pred s] {a : α} (H : a ∉ s) :
(insert a s : set α) ≃ s ⊕ punit.{u+1} :=
calc (insert a s : set α) ≃ ↥(s ∪ {a}) : equiv.set.of_eq (by simp)
... ≃ s ⊕ ({a} : set α) : equiv.set.union (by finish [set.subset_def])
... ≃ s ⊕ punit.{u+1} : sum_congr (equiv.refl _) (equiv.set.singleton _)
/-- If `s : set α` is a set with decidable membership, then `s ⊕ sᶜ` is equivalent to `α`. -/
protected def sum_compl {α} (s : set α) [decidable_pred s] : s ⊕ (sᶜ : set α) ≃ α :=
calc s ⊕ (sᶜ : set α) ≃ ↥(s ∪ sᶜ) : (equiv.set.union (by simp [set.ext_iff])).symm
... ≃ @univ α : equiv.set.of_eq (by simp)
... ≃ α : equiv.set.univ _
@[simp] lemma sum_compl_apply_inl {α : Type u} (s : set α) [decidable_pred s] (x : s) :
equiv.set.sum_compl s (sum.inl x) = x := rfl
@[simp] lemma sum_compl_apply_inr {α : Type u} (s : set α) [decidable_pred s] (x : sᶜ) :
equiv.set.sum_compl s (sum.inr x) = x := rfl
lemma sum_compl_symm_apply_of_mem {α : Type u} {s : set α} [decidable_pred s] {x : α}
(hx : x ∈ s) : (equiv.set.sum_compl s).symm x = sum.inl ⟨x, hx⟩ :=
have ↑(⟨x, or.inl hx⟩ : (s ∪ sᶜ : set α)) ∈ s, from hx,
by { rw [equiv.set.sum_compl], simpa using set.union_apply_left _ this }
lemma sum_compl_symm_apply_of_not_mem {α : Type u} {s : set α} [decidable_pred s] {x : α}
(hx : x ∉ s) : (equiv.set.sum_compl s).symm x = sum.inr ⟨x, hx⟩ :=
have ↑(⟨x, or.inr hx⟩ : (s ∪ sᶜ : set α)) ∈ sᶜ, from hx,
by { rw [equiv.set.sum_compl], simpa using set.union_apply_right _ this }
/-- `sum_diff_subset s t` is the natural equivalence between
`s ⊕ (t \ s)` and `t`, where `s` and `t` are two sets. -/
protected def sum_diff_subset {α} {s t : set α} (h : s ⊆ t) [decidable_pred s] :
s ⊕ (t \ s : set α) ≃ t :=
calc s ⊕ (t \ s : set α) ≃ (s ∪ (t \ s) : set α) : (equiv.set.union (by simp [inter_diff_self])).symm
... ≃ t : equiv.set.of_eq (by { simp [union_diff_self, union_eq_self_of_subset_left h] })
@[simp] lemma sum_diff_subset_apply_inl
{α} {s t : set α} (h : s ⊆ t) [decidable_pred s] (x : s) :
equiv.set.sum_diff_subset h (sum.inl x) = inclusion h x := rfl
@[simp] lemma sum_diff_subset_apply_inr
{α} {s t : set α} (h : s ⊆ t) [decidable_pred s] (x : t \ s) :
equiv.set.sum_diff_subset h (sum.inr x) = inclusion (diff_subset t s) x := rfl
lemma sum_diff_subset_symm_apply_of_mem
{α} {s t : set α} (h : s ⊆ t) [decidable_pred s] {x : t} (hx : x.1 ∈ s) :
(equiv.set.sum_diff_subset h).symm x = sum.inl ⟨x, hx⟩ :=
begin
apply (equiv.set.sum_diff_subset h).injective,
simp only [apply_symm_apply, sum_diff_subset_apply_inl],
exact subtype.eq rfl,
end
lemma sum_diff_subset_symm_apply_of_not_mem
{α} {s t : set α} (h : s ⊆ t) [decidable_pred s] {x : t} (hx : x.1 ∉ s) :
(equiv.set.sum_diff_subset h).symm x = sum.inr ⟨x, ⟨x.2, hx⟩⟩ :=
begin
apply (equiv.set.sum_diff_subset h).injective,
simp only [apply_symm_apply, sum_diff_subset_apply_inr],
exact subtype.eq rfl,
end
/-- If `s` is a set with decidable membership, then the sum of `s ∪ t` and `s ∩ t` is equivalent
to `s ⊕ t`. -/
protected def union_sum_inter {α : Type u} (s t : set α) [decidable_pred s] :
(s ∪ t : set α) ⊕ (s ∩ t : set α) ≃ s ⊕ t :=
calc (s ∪ t : set α) ⊕ (s ∩ t : set α)
≃ (s ∪ t \ s : set α) ⊕ (s ∩ t : set α) : by rw [union_diff_self]
... ≃ (s ⊕ (t \ s : set α)) ⊕ (s ∩ t : set α) :
sum_congr (set.union $ subset_empty_iff.2 (inter_diff_self _ _)) (equiv.refl _)
... ≃ s ⊕ (t \ s : set α) ⊕ (s ∩ t : set α) : sum_assoc _ _ _
... ≃ s ⊕ (t \ s ∪ s ∩ t : set α) : sum_congr (equiv.refl _) begin
refine (set.union' (∉ s) _ _).symm,
exacts [λ x hx, hx.2, λ x hx, not_not_intro hx.1]
end
... ≃ s ⊕ t : by { rw (_ : t \ s ∪ s ∩ t = t), rw [union_comm, inter_comm, inter_union_diff] }
/-- The set product of two sets is equivalent to the type product of their coercions to types. -/
protected def prod {α β} (s : set α) (t : set β) :
s.prod t ≃ s × t :=
@subtype_prod_equiv_prod α β s t
/-- If a function `f` is injective on a set `s`, then `s` is equivalent to `f '' s`. -/
protected noncomputable def image_of_inj_on {α β} (f : α → β) (s : set α) (H : inj_on f s) :
s ≃ (f '' s) :=
⟨λ p, ⟨f p, mem_image_of_mem f p.2⟩,
λ p, ⟨classical.some p.2, (classical.some_spec p.2).1⟩,
λ ⟨x, h⟩, subtype.eq (H (classical.some_spec (mem_image_of_mem f h)).1 h
(classical.some_spec (mem_image_of_mem f h)).2),
λ ⟨y, h⟩, subtype.eq (classical.some_spec h).2⟩
/-- If `f` is an injective function, then `s` is equivalent to `f '' s`. -/
protected noncomputable def image {α β} (f : α → β) (s : set α) (H : injective f) : s ≃ (f '' s) :=
equiv.set.image_of_inj_on f s (λ x y hx hy hxy, H hxy)
@[simp] theorem image_apply {α β} (f : α → β) (s : set α) (H : injective f) (a h) :
set.image f s H ⟨a, h⟩ = ⟨f a, mem_image_of_mem _ h⟩ := rfl
lemma image_symm_preimage {α β} {f : α → β} (hf : injective f) (u s : set α) :
(λ x, (set.image f s hf).symm x : f '' s → α) ⁻¹' u = coe ⁻¹' (f '' u) :=
begin
ext ⟨b, a, has, rfl⟩,
have : ∀(h : ∃a', a' ∈ s ∧ a' = a), classical.some h = a := λ h, (classical.some_spec h).2,
simp [equiv.set.image, equiv.set.image_of_inj_on, hf, this],
end
/-- If `f : α → β` is an injective function, then `α` is equivalent to the range of `f`. -/
protected noncomputable def range {α β} (f : α → β) (H : injective f) :
α ≃ range f :=
{ to_fun := λ x, ⟨f x, mem_range_self _⟩,
inv_fun := λ x, classical.some x.2,
left_inv := λ x, H (classical.some_spec (show f x ∈ range f, from mem_range_self _)),
right_inv := λ x, subtype.eq $ classical.some_spec x.2 }
@[simp] theorem range_apply {α β} (f : α → β) (H : injective f) (a) :
set.range f H a = ⟨f a, set.mem_range_self _⟩ := rfl
theorem apply_range_symm {α β} (f : α → β) (H : injective f) (b : range f) :
f ((set.range f H).symm b) = b :=
begin
conv_rhs { rw ←((set.range f H).right_inv b), },
simp,
end
/-- If `α` is equivalent to `β`, then `set α` is equivalent to `set β`. -/
protected def congr {α β : Type*} (e : α ≃ β) : set α ≃ set β :=
⟨λ s, e '' s, λ t, e.symm '' t, symm_image_image e, symm_image_image e.symm⟩
/-- The set `{x ∈ s | t x}` is equivalent to the set of `x : s` such that `t x`. -/
protected def sep {α : Type u} (s : set α) (t : α → Prop) :
({ x ∈ s | t x } : set α) ≃ { x : s | t x } :=
(equiv.subtype_subtype_equiv_subtype_inter s t).symm
end set
/-- If `f` is a bijective function, then its domain is equivalent to its codomain. -/
noncomputable def of_bijective {α β} (f : α → β) (hf : bijective f) : α ≃ β :=
(equiv.set.range f hf.1).trans $ (set_congr hf.2.range_eq).trans $ equiv.set.univ β
@[simp] theorem coe_of_bijective {α β} {f : α → β} (hf : bijective f) :
(of_bijective f hf : α → β) = f := rfl
/-- If `f` is an injective function, then its domain is equivalent to its range. -/
noncomputable def of_injective {α β} (f : α → β) (hf : injective f) : α ≃ _root_.set.range f :=
of_bijective (λ x, ⟨f x, set.mem_range_self x⟩) ⟨λ x y hxy, hf $ by injections, λ ⟨_, x, rfl⟩, ⟨x, rfl⟩⟩
@[simp] lemma of_injective_apply {α β} (f : α → β) (hf : injective f) (x : α) :
of_injective f hf x = ⟨f x, set.mem_range_self x⟩ :=
rfl
/-- Subtype of the quotient is equivalent to the quotient of the subtype. Let `α` be a setoid with
equivalence relation `~`. Let `p₂` be a predicate on the quotient type `α/~`, and `p₁` be the lift
of this predicate to `α`: `p₁ a ↔ p₂ ⟦a⟧`. Let `~₂` be the restriction of `~` to `{x // p₁ x}`.
Then `{x // p₂ x}` is equivalent to the quotient of `{x // p₁ x}` by `~₂`. -/
def subtype_quotient_equiv_quotient_subtype (p₁ : α → Prop) [s₁ : setoid α]
[s₂ : setoid (subtype p₁)] (p₂ : quotient s₁ → Prop) (hp₂ : ∀ a, p₁ a ↔ p₂ ⟦a⟧)
(h : ∀ x y : subtype p₁, @setoid.r _ s₂ x y ↔ (x : α) ≈ y) :
{x // p₂ x} ≃ quotient s₂ :=
{ to_fun := λ a, quotient.hrec_on a.1 (λ a h, ⟦⟨a, (hp₂ _).2 h⟩⟧)
(λ a b hab, hfunext (by rw quotient.sound hab)
(λ h₁ h₂ _, heq_of_eq (quotient.sound ((h _ _).2 hab)))) a.2,
inv_fun := λ a, quotient.lift_on a (λ a, (⟨⟦a.1⟧, (hp₂ _).1 a.2⟩ : {x // p₂ x}))
(λ a b hab, subtype.ext_val (quotient.sound ((h _ _).1 hab))),
left_inv := λ ⟨a, ha⟩, quotient.induction_on a (λ a ha, rfl) ha,
right_inv := λ a, quotient.induction_on a (λ ⟨a, ha⟩, rfl) }
section swap
variable [decidable_eq α]
/-- A helper function for `equiv.swap`. -/
def swap_core (a b r : α) : α :=
if r = a then b
else if r = b then a
else r
theorem swap_core_self (r a : α) : swap_core a a r = r :=
by { unfold swap_core, split_ifs; cc }
theorem swap_core_swap_core (r a b : α) : swap_core a b (swap_core a b r) = r :=
by { unfold swap_core, split_ifs; cc }
theorem swap_core_comm (r a b : α) : swap_core a b r = swap_core b a r :=
by { unfold swap_core, split_ifs; cc }
/-- `swap a b` is the permutation that swaps `a` and `b` and
leaves other values as is. -/
def swap (a b : α) : perm α :=
⟨swap_core a b, swap_core a b, λr, swap_core_swap_core r a b, λr, swap_core_swap_core r a b⟩
theorem swap_self (a : α) : swap a a = equiv.refl _ :=
ext $ λ r, swap_core_self r a
theorem swap_comm (a b : α) : swap a b = swap b a :=
ext $ λ r, swap_core_comm r _ _
theorem swap_apply_def (a b x : α) : swap a b x = if x = a then b else if x = b then a else x :=
rfl
@[simp] theorem swap_apply_left (a b : α) : swap a b a = b :=
if_pos rfl
@[simp] theorem swap_apply_right (a b : α) : swap a b b = a :=
by { by_cases h : b = a; simp [swap_apply_def, h], }
theorem swap_apply_of_ne_of_ne {a b x : α} : x ≠ a → x ≠ b → swap a b x = x :=
by simp [swap_apply_def] {contextual := tt}
@[simp] theorem swap_swap (a b : α) : (swap a b).trans (swap a b) = equiv.refl _ :=
ext $ λ x, swap_core_swap_core _ _ _
theorem swap_comp_apply {a b x : α} (π : perm α) :
π.trans (swap a b) x = if π x = a then b else if π x = b then a else π x :=
by { cases π, refl }
@[simp] lemma swap_inv {α : Type*} [decidable_eq α] (x y : α) :
(swap x y)⁻¹ = swap x y := rfl
@[simp] lemma symm_trans_swap_trans [decidable_eq β] (a b : α)
(e : α ≃ β) : (e.symm.trans (swap a b)).trans e = swap (e a) (e b) :=
equiv.ext (λ x, begin
have : ∀ a, e.symm x = a ↔ x = e a :=
λ a, by { rw @eq_comm _ (e.symm x), split; intros; simp * at * },
simp [swap_apply_def, this],
split_ifs; simp
end)
@[simp] lemma swap_mul_self {α : Type*} [decidable_eq α] (i j : α) : swap i j * swap i j = 1 :=
equiv.swap_swap i j
@[simp] lemma swap_apply_self {α : Type*} [decidable_eq α] (i j a : α) :
swap i j (swap i j a) = a :=
by rw [← perm.mul_apply, swap_mul_self, perm.one_apply]
/-- Augment an equivalence with a prescribed mapping `f a = b` -/
def set_value (f : α ≃ β) (a : α) (b : β) : α ≃ β :=
(swap a (f.symm b)).trans f
@[simp] theorem set_value_eq (f : α ≃ β) (a : α) (b : β) : set_value f a b a = b :=
by { dsimp [set_value], simp [swap_apply_left] }
end swap
protected lemma forall_congr {p : α → Prop} {q : β → Prop} (f : α ≃ β)
(h : ∀{x}, p x ↔ q (f x)) : (∀x, p x) ↔ (∀y, q y) :=
begin
split; intros h₂ x,
{ rw [←f.right_inv x], apply h.mp, apply h₂ },
apply h.mpr, apply h₂
end
protected lemma forall_congr' {p : α → Prop} {q : β → Prop} (f : α ≃ β)
(h : ∀{x}, p (f.symm x) ↔ q x) : (∀x, p x) ↔ (∀y, q y) :=
(equiv.forall_congr f.symm (λ x, h.symm)).symm
-- We next build some higher arity versions of `equiv.forall_congr`.
-- Although they appear to just be repeated applications of `equiv.forall_congr`,
-- unification of metavariables works better with these versions.
-- In particular, they are necessary in `equiv_rw`.
-- (Stopping at ternary functions seems reasonable: at least in 1-categorical mathematics,
-- it's rare to have axioms involving more than 3 elements at once.)
universes ua1 ua2 ub1 ub2 ug1 ug2
variables {α₁ : Sort ua1} {α₂ : Sort ua2}
{β₁ : Sort ub1} {β₂ : Sort ub2}
{γ₁ : Sort ug1} {γ₂ : Sort ug2}
protected lemma forall₂_congr {p : α₁ → β₁ → Prop} {q : α₂ → β₂ → Prop} (eα : α₁ ≃ α₂)
(eβ : β₁ ≃ β₂) (h : ∀{x y}, p x y ↔ q (eα x) (eβ y)) :
(∀x y, p x y) ↔ (∀x y, q x y) :=
begin
apply equiv.forall_congr,
intros,
apply equiv.forall_congr,
intros,
apply h,
end
protected lemma forall₂_congr' {p : α₁ → β₁ → Prop} {q : α₂ → β₂ → Prop} (eα : α₁ ≃ α₂)
(eβ : β₁ ≃ β₂) (h : ∀{x y}, p (eα.symm x) (eβ.symm y) ↔ q x y) :
(∀x y, p x y) ↔ (∀x y, q x y) :=
(equiv.forall₂_congr eα.symm eβ.symm (λ x y, h.symm)).symm
protected lemma forall₃_congr {p : α₁ → β₁ → γ₁ → Prop} {q : α₂ → β₂ → γ₂ → Prop}
(eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (eγ : γ₁ ≃ γ₂)
(h : ∀{x y z}, p x y z ↔ q (eα x) (eβ y) (eγ z)) : (∀x y z, p x y z) ↔ (∀x y z, q x y z) :=
begin
apply equiv.forall₂_congr,
intros,
apply equiv.forall_congr,
intros,
apply h,
end
protected lemma forall₃_congr' {p : α₁ → β₁ → γ₁ → Prop} {q : α₂ → β₂ → γ₂ → Prop}
(eα : α₁ ≃ α₂) (eβ : β₁ ≃ β₂) (eγ : γ₁ ≃ γ₂)
(h : ∀{x y z}, p (eα.symm x) (eβ.symm y) (eγ.symm z) ↔ q x y z) :
(∀x y z, p x y z) ↔ (∀x y z, q x y z) :=
(equiv.forall₃_congr eα.symm eβ.symm eγ.symm (λ x y z, h.symm)).symm
protected lemma forall_congr_left' {p : α → Prop} (f : α ≃ β) :
(∀x, p x) ↔ (∀y, p (f.symm y)) :=
equiv.forall_congr f (λx, by simp)
protected lemma forall_congr_left {p : β → Prop} (f : α ≃ β) :
(∀x, p (f x)) ↔ (∀y, p y) :=
(equiv.forall_congr_left' f.symm).symm
section
variables (P : α → Sort w) (e : α ≃ β)
/--
Transport dependent functions through an equivalence of the base space.
-/
def Pi_congr_left' : (Π a, P a) ≃ (Π b, P (e.symm b)) :=
{ to_fun := λ f x, f (e.symm x),
inv_fun := λ f x, begin rw [← e.symm_apply_apply x], exact f (e x) end,
left_inv := λ f, funext $ λ x, eq_of_heq ((eq_rec_heq _ _).trans
(by { dsimp, rw e.symm_apply_apply })),
right_inv := λ f, funext $ λ x, eq_of_heq ((eq_rec_heq _ _).trans
(by { rw e.apply_symm_apply })) }
@[simp]
lemma Pi_congr_left'_apply (f : Π a, P a) (b : β) : ((Pi_congr_left' P e) f) b = f (e.symm b) :=
rfl
@[simp]
lemma Pi_congr_left'_symm_apply (g : Π b, P (e.symm b)) (a : α) :
((Pi_congr_left' P e).symm g) a = (by { convert g (e a), simp }) :=
rfl
end
section
variables (P : β → Sort w) (e : α ≃ β)
/--
Transporting dependent functions through an equivalence of the base,
expressed as a "simplification".
-/
def Pi_congr_left : (Π a, P (e a)) ≃ (Π b, P b) :=
(Pi_congr_left' P e.symm).symm
end
section
variables
{W : α → Sort w} {Z : β → Sort z} (h₁ : α ≃ β) (h₂ : Π a : α, (W a ≃ Z (h₁ a)))
/--
Transport dependent functions through
an equivalence of the base spaces and a family
of equivalences of the matching fibers.
-/
def Pi_congr : (Π a, W a) ≃ (Π b, Z b) :=
(equiv.Pi_congr_right h₂).trans (equiv.Pi_congr_left _ h₁)
end
section
variables
{W : α → Sort w} {Z : β → Sort z} (h₁ : α ≃ β) (h₂ : Π b : β, (W (h₁.symm b) ≃ Z b))
/--
Transport dependent functions through
an equivalence of the base spaces and a family
of equivalences of the matching fibres.
-/
def Pi_congr' : (Π a, W a) ≃ (Π b, Z b) :=
(Pi_congr h₁.symm (λ b, (h₂ b).symm)).symm
end
end equiv
instance {α} [subsingleton α] : subsingleton (ulift α) := equiv.ulift.subsingleton
instance {α} [subsingleton α] : subsingleton (plift α) := equiv.plift.subsingleton
instance {α} [decidable_eq α] : decidable_eq (ulift α) := equiv.ulift.decidable_eq
instance {α} [decidable_eq α] : decidable_eq (plift α) := equiv.plift.decidable_eq
/-- If both `α` and `β` are singletons, then `α ≃ β`. -/
def equiv_of_unique_of_unique [unique α] [unique β] : α ≃ β :=
{ to_fun := λ _, default β,
inv_fun := λ _, default α,
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
/-- If `α` is a singleton, then it is equivalent to any `punit`. -/
def equiv_punit_of_unique [unique α] : α ≃ punit.{v} :=
equiv_of_unique_of_unique
/-- If `α` is a subsingleton, then it is equivalent to `α × α`. -/
def subsingleton_prod_self_equiv {α : Type*} [subsingleton α] : α × α ≃ α :=
{ to_fun := λ p, p.1,
inv_fun := λ a, (a, a),
left_inv := λ p, subsingleton.elim _ _,
right_inv := λ p, subsingleton.elim _ _, }
/-- To give an equivalence between two subsingleton types, it is sufficient to give any two
functions between them. -/
def equiv_of_subsingleton_of_subsingleton [subsingleton α] [subsingleton β]
(f : α → β) (g : β → α) : α ≃ β :=
{ to_fun := f,
inv_fun := g,
left_inv := λ _, subsingleton.elim _ _,
right_inv := λ _, subsingleton.elim _ _ }
/-- `unique (unique α)` is equivalent to `unique α`. -/
def unique_unique_equiv : unique (unique α) ≃ unique α :=
equiv_of_subsingleton_of_subsingleton (λ h, h.default)
(λ h, { default := h, uniq := λ _, subsingleton.elim _ _ })
namespace quot
/-- An equivalence `e : α ≃ β` generates an equivalence between quotient spaces,
if `ra a₁ a₂ ↔ rb (e a₁) (e a₂). -/
protected def congr {ra : α → α → Prop} {rb : β → β → Prop} (e : α ≃ β)
(eq : ∀a₁ a₂, ra a₁ a₂ ↔ rb (e a₁) (e a₂)) :
quot ra ≃ quot rb :=
{ to_fun := quot.map e (assume a₁ a₂, (eq a₁ a₂).1),
inv_fun := quot.map e.symm
(assume b₁ b₂ h,
(eq (e.symm b₁) (e.symm b₂)).2
((e.apply_symm_apply b₁).symm ▸ (e.apply_symm_apply b₂).symm ▸ h)),
left_inv := by { rintros ⟨a⟩, dunfold quot.map, simp only [equiv.symm_apply_apply] },
right_inv := by { rintros ⟨a⟩, dunfold quot.map, simp only [equiv.apply_symm_apply] } }
/-- Quotients are congruent on equivalences under equality of their relation.
An alternative is just to use rewriting with `eq`, but then computational proofs get stuck. -/
protected def congr_right {r r' : α → α → Prop} (eq : ∀a₁ a₂, r a₁ a₂ ↔ r' a₁ a₂) :
quot r ≃ quot r' :=
quot.congr (equiv.refl α) eq
/-- An equivalence `e : α ≃ β` generates an equivalence between the quotient space of `α`
by a relation `ra` and the quotient space of `β` by the image of this relation under `e`. -/
protected def congr_left {r : α → α → Prop} (e : α ≃ β) :
quot r ≃ quot (λ b b', r (e.symm b) (e.symm b')) :=
@quot.congr α β r (λ b b', r (e.symm b) (e.symm b')) e (λ a₁ a₂, by simp only [e.symm_apply_apply])
end quot
namespace quotient
/-- An equivalence `e : α ≃ β` generates an equivalence between quotient spaces,
if `ra a₁ a₂ ↔ rb (e a₁) (e a₂). -/
protected def congr {ra : setoid α} {rb : setoid β} (e : α ≃ β)
(eq : ∀a₁ a₂, @setoid.r α ra a₁ a₂ ↔ @setoid.r β rb (e a₁) (e a₂)) :
quotient ra ≃ quotient rb :=
quot.congr e eq
/-- Quotients are congruent on equivalences under equality of their relation.
An alternative is just to use rewriting with `eq`, but then computational proofs get stuck. -/
protected def congr_right {r r' : setoid α}
(eq : ∀a₁ a₂, @setoid.r α r a₁ a₂ ↔ @setoid.r α r' a₁ a₂) : quotient r ≃ quotient r' :=
quot.congr_right eq
end quotient
/-- If a function is a bijection between `univ` and a set `s` in the target type, it induces an
equivalence between the original type and the type `↑s`. -/
noncomputable def set.bij_on.equiv {α : Type*} {β : Type*} {s : set β} (f : α → β)
(h : set.bij_on f set.univ s) : α ≃ s :=
begin
have : function.bijective (λ (x : α), (⟨f x, begin exact h.maps_to (set.mem_univ x) end⟩ : s)),
{ split,
{ assume x y hxy,
apply h.inj_on (set.mem_univ x) (set.mem_univ y) (subtype.mk.inj hxy) },
{ assume x,
rcases h.surj_on x.2 with ⟨y, hy⟩,
exact ⟨y, subtype.eq hy.2⟩ } },
exact equiv.of_bijective _ this
end
/-- The composition of an updated function with an equiv on a subset can be expressed as an
updated function. -/
lemma dite_comp_equiv_update {α : Type*} {β : Type*} {γ : Type*} {s : set α} (e : β ≃ s)
(v : β → γ) (w : α → γ) (j : β) (x : γ) [decidable_eq β] [decidable_eq α]
[∀ j, decidable (j ∈ s)] :
(λ (i : α), if h : i ∈ s then (function.update v j x) (e.symm ⟨i, h⟩) else w i) =
function.update (λ (i : α), if h : i ∈ s then v (e.symm ⟨i, h⟩) else w i) (e j) x :=
begin
ext i,
by_cases h : i ∈ s,
{ simp only [h, dif_pos],
have A : e.symm ⟨i, h⟩ = j ↔ i = e j,
by { rw equiv.symm_apply_eq, exact subtype.ext_iff_val },
by_cases h' : i = e j,
{ rw [A.2 h', h'], simp },
{ have : ¬ e.symm ⟨i, h⟩ = j, by simpa [← A] using h',
simp [h, h', this] } },
{ have : i ≠ e j,
by { contrapose! h, have : (e j : α) ∈ s := (e j).2, rwa ← h at this },
simp [h, this] }
end
|
af63cba1e9c01c0c310b72633ee3dcf6cb013f26
|
69d4931b605e11ca61881fc4f66db50a0a875e39
|
/src/data/complex/module.lean
|
f408937c8b1a50a28c06d5e1a77de6e0eef2ffec
|
[
"Apache-2.0"
] |
permissive
|
abentkamp/mathlib
|
d9a75d291ec09f4637b0f30cc3880ffb07549ee5
|
5360e476391508e092b5a1e5210bd0ed22dc0755
|
refs/heads/master
| 1,682,382,954,948
| 1,622,106,077,000
| 1,622,106,077,000
| 149,285,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 8,058
|
lean
|
/-
Copyright (c) 2020 Alexander Bentkamp, Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Alexander Bentkamp, Sébastien Gouëzel
-/
import data.complex.basic
import algebra.algebra.ordered
import data.matrix.notation
import field_theory.tower
import linear_algebra.finite_dimensional
/-!
# Complex number as a vector space over `ℝ`
This file contains the following instances:
* Any `•`-structure (`has_scalar`, `mul_action`, `distrib_mul_action`, `module`, `algebra`) on
`ℝ` imbues a corresponding structure on `ℂ`. This includes the statement that `ℂ` is an `ℝ`
algebra.
* any complex vector space is a real vector space;
* any finite dimensional complex vector space is a finite dimensional real vector space;
* the space of `ℝ`-linear maps from a real vector space to a complex vector space is a complex
vector space.
It also defines three linear maps:
* `complex.re_lm`;
* `complex.im_lm`;
* `complex.of_real_lm`;
* `complex.conj_lm`.
They are bundled versions of the real part, the imaginary part, the embedding of `ℝ` in `ℂ`, and
the complex conjugate as `ℝ`-linear maps.
-/
noncomputable theory
namespace complex
variables {R : Type*} {S : Type*}
section
variables [has_scalar R ℝ]
/- The useless `0` multiplication in `smul` is to make sure that
`restrict_scalars.module ℝ ℂ ℂ = complex.module` definitionally. -/
instance : has_scalar R ℂ :=
{ smul := λ r x, ⟨r • x.re - 0 * x.im, r • x.im + 0 * x.re⟩ }
lemma smul_re (r : R) (z : ℂ) : (r • z).re = r • z.re := by simp [(•)]
lemma smul_im (r : R) (z : ℂ) : (r • z).im = r • z.im := by simp [(•)]
@[simp] lemma smul_coe {x : ℝ} {z : ℂ} : x • z = x * z :=
by ext; simp [smul_re, smul_im]
end
instance [has_scalar R ℝ] [has_scalar S ℝ] [smul_comm_class R S ℝ] : smul_comm_class R S ℂ :=
{ smul_comm := λ r s x, by ext; simp [smul_re, smul_im, smul_comm] }
instance [has_scalar R S] [has_scalar R ℝ] [has_scalar S ℝ] [is_scalar_tower R S ℝ] :
is_scalar_tower R S ℂ :=
{ smul_assoc := λ r s x, by ext; simp [smul_re, smul_im, smul_assoc] }
instance [monoid R] [mul_action R ℝ] : mul_action R ℂ :=
{ one_smul := λ x, by ext; simp [smul_re, smul_im, one_smul],
mul_smul := λ r s x, by ext; simp [smul_re, smul_im, mul_smul] }
instance [semiring R] [distrib_mul_action R ℝ] : distrib_mul_action R ℂ :=
{ smul_add := λ r x y, by ext; simp [smul_re, smul_im, smul_add],
smul_zero := λ r, by ext; simp [smul_re, smul_im, smul_zero] }
instance [semiring R] [module R ℝ] : module R ℂ :=
{ add_smul := λ r s x, by ext; simp [smul_re, smul_im, add_smul],
zero_smul := λ r, by ext; simp [smul_re, smul_im, zero_smul] }
instance [comm_semiring R] [algebra R ℝ] : algebra R ℂ :=
{ smul := (•),
smul_def' := λ r x, by ext; simp [smul_re, smul_im, algebra.smul_def],
commutes' := λ r ⟨xr, xi⟩, by ext; simp [smul_re, smul_im, algebra.commutes],
..complex.of_real.comp (algebra_map R ℝ) }
/-- Complex conjugation as an `ℝ`-algebra isomorphism -/
def conj_alg_equiv : ℂ ≃ₐ[ℝ] ℂ :=
{ inv_fun := complex.conj,
left_inv := complex.conj_conj,
right_inv := complex.conj_conj,
commutes' := complex.conj_of_real,
.. complex.conj }
section
open_locale complex_order
lemma complex_ordered_module : ordered_module ℝ ℂ :=
{ smul_lt_smul_of_pos := λ z w x h₁ h₂,
begin
obtain ⟨y, l, rfl⟩ := lt_def.mp h₁,
refine lt_def.mpr ⟨x * y, _, _⟩,
exact mul_pos h₂ l,
ext; simp [mul_add],
end,
lt_of_smul_lt_smul_of_pos := λ z w x h₁ h₂,
begin
obtain ⟨y, l, e⟩ := lt_def.mp h₁,
by_cases h : x = 0,
{ subst h, exfalso, apply lt_irrefl 0 h₂, },
{ refine lt_def.mpr ⟨y / x, div_pos l h₂, _⟩,
replace e := congr_arg (λ z, (x⁻¹ : ℂ) * z) e,
simp only [mul_add, ←mul_assoc, h, one_mul, of_real_eq_zero, smul_coe, ne.def,
not_false_iff, inv_mul_cancel] at e,
convert e,
simp only [div_eq_iff_mul_eq, h, of_real_eq_zero, of_real_div, ne.def, not_false_iff],
norm_cast,
simp [mul_comm _ y, mul_assoc, h],
},
end }
localized "attribute [instance] complex_ordered_module" in complex_order
end
@[simp] lemma coe_algebra_map : ⇑(algebra_map ℝ ℂ) = complex.of_real := rfl
open submodule finite_dimensional
/-- `ℂ` has a basis over `ℝ` given by `1` and `I`. -/
def basis_one_I : basis (fin 2) ℝ ℂ :=
basis.of_equiv_fun
{ to_fun := λ z, ![z.re, z.im],
inv_fun := λ c, c 0 + c 1 • I,
left_inv := λ z, by simp,
right_inv := λ c, by { ext i, fin_cases i; simp },
map_add' := λ z z', by simp,
map_smul' := λ c z, by simp }
@[simp] lemma coe_basis_one_I_repr (z : ℂ) : ⇑(basis_one_I.repr z) = ![z.re, z.im] := rfl
@[simp] lemma coe_basis_one_I : ⇑basis_one_I = ![1, I] :=
funext $ λ i, basis.apply_eq_iff.mpr $ finsupp.ext $ λ j,
by fin_cases i; fin_cases j;
simp only [coe_basis_one_I_repr, finsupp.single_eq_same, finsupp.single_eq_of_ne,
matrix.cons_val_zero, matrix.cons_val_one, matrix.head_cons,
nat.one_ne_zero, fin.one_eq_zero_iff, fin.zero_eq_one_iff, ne.def, not_false_iff,
one_re, one_im, I_re, I_im]
instance : finite_dimensional ℝ ℂ := of_fintype_basis basis_one_I
@[simp] lemma finrank_real_complex : finite_dimensional.finrank ℝ ℂ = 2 :=
by rw [finrank_eq_card_basis basis_one_I, fintype.card_fin]
@[simp] lemma dim_real_complex : module.rank ℝ ℂ = 2 :=
by simp [← finrank_eq_dim, finrank_real_complex]
lemma {u} dim_real_complex' : cardinal.lift.{0 u} (module.rank ℝ ℂ) = 2 :=
by simp [← finrank_eq_dim, finrank_real_complex, bit0]
/-- `fact` version of the dimension of `ℂ` over `ℝ`, locally useful in the definition of the
circle. -/
lemma finrank_real_complex_fact : fact (finrank ℝ ℂ = 2) := ⟨finrank_real_complex⟩
end complex
/- Register as an instance (with low priority) the fact that a complex vector space is also a real
vector space. -/
@[priority 900]
instance module.complex_to_real (E : Type*) [add_comm_group E] [module ℂ E] : module ℝ E :=
restrict_scalars.module ℝ ℂ E
instance module.real_complex_tower (E : Type*) [add_comm_group E] [module ℂ E] :
is_scalar_tower ℝ ℂ E :=
restrict_scalars.is_scalar_tower ℝ ℂ E
@[priority 100]
instance finite_dimensional.complex_to_real (E : Type*) [add_comm_group E] [module ℂ E]
[finite_dimensional ℂ E] : finite_dimensional ℝ E :=
finite_dimensional.trans ℝ ℂ E
lemma dim_real_of_complex (E : Type*) [add_comm_group E] [module ℂ E] :
module.rank ℝ E = 2 * module.rank ℂ E :=
cardinal.lift_inj.1 $
by { rw [← dim_mul_dim' ℝ ℂ E, complex.dim_real_complex], simp [bit0] }
lemma finrank_real_of_complex (E : Type*) [add_comm_group E] [module ℂ E] :
finite_dimensional.finrank ℝ E = 2 * finite_dimensional.finrank ℂ E :=
by rw [← finite_dimensional.finrank_mul_finrank ℝ ℂ E, complex.finrank_real_complex]
namespace complex
/-- Linear map version of the real part function, from `ℂ` to `ℝ`. -/
def re_lm : ℂ →ₗ[ℝ] ℝ :=
{ to_fun := λx, x.re,
map_add' := add_re,
map_smul' := by simp, }
@[simp] lemma re_lm_coe : ⇑re_lm = re := rfl
/-- Linear map version of the imaginary part function, from `ℂ` to `ℝ`. -/
def im_lm : ℂ →ₗ[ℝ] ℝ :=
{ to_fun := λx, x.im,
map_add' := add_im,
map_smul' := by simp, }
@[simp] lemma im_lm_coe : ⇑im_lm = im := rfl
/-- Linear map version of the canonical embedding of `ℝ` in `ℂ`. -/
def of_real_lm : ℝ →ₗ[ℝ] ℂ :=
{ to_fun := coe,
map_add' := of_real_add,
map_smul' := λc x, by simp [algebra.smul_def] }
@[simp] lemma of_real_lm_coe : ⇑of_real_lm = coe := rfl
/-- `ℝ`-linear map version of the complex conjugation function from `ℂ` to `ℂ`. -/
def conj_lm : ℂ →ₗ[ℝ] ℂ :=
{ map_smul' := by simp [restrict_scalars_smul_def],
..conj }
@[simp] lemma conj_lm_coe : ⇑conj_lm = conj := rfl
end complex
|
b96b31b29b859cf8206c0969ed7611483e55020c
|
aa44b2a5876642f9460205af61a5449b74465655
|
/src/linarith.lean
|
e41ff4d59e4c63fbfd6b3c9ca09d4abc44ae6ccc
|
[] |
no_license
|
robertylewis/mathematica_examples
|
d129d67de147dc2792dcf0b6b70fac9b2eaf8274
|
e317381c49db032accef2a92e7650d029952ad76
|
refs/heads/master
| 1,632,630,516,240
| 1,631,905,726,000
| 1,631,905,726,000
| 80,952,455
| 2
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,961
|
lean
|
/-
Copyright (c) 2020 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Robert Y. Lewis
-/
import tactic.core
import mathematica
import tactic.linarith
/-!
This demo shows how we can use Mathematica as a certificate oracle for mathlib's `linarith` tactic.
`linarith` implements a certificate checker, and it doesn't matter where this certificate comes from.
If the checker succeeds then we have a complete proof (with no new axioms).
-/
section
open native tactic tactic.mathematica mathematica linarith
/-- Auxiliary function that produces a string to pass to Mathematica. -/
meta def sum_for_var_i (l : list (ℕ × comp)) (i : ℕ) : string :=
let ls : list string := l.map $ λ ⟨hs, c⟩, to_string (c.coeffs.zfind i) ++ "* x" ++ to_string hs in
" + ".intercalate ls ++ " == 0"
/--
A `linarith.certificate_oracle` is a function `list_comp → ℕ → rb_map ℕ ℕ`.
Given a list of comparisons, it tries to find a coefficient for each comparison
such that summing together the linear combination of these comparisons
gives a proof of `0 < 0`.
-/
meta def mm_oracle : linarith.certificate_oracle :=
λ hs max_var,
let cstrs := (list.range max_var.succ).map (λ i, (sum_for_var_i hs.enum i)),
cstrs := " && ".intercalate cstrs,
nngs := (list.range hs.length).map $ λ i, "x" ++ to_string i ++ " >= 0",
nngs := " && ".intercalate nngs,
poss := hs.enum.filter_map $ λ ⟨i, c⟩,
if c.str = ineq.lt then some ("x" ++ to_string i) else none,
poss := " + ".intercalate poss ++ " > 0",
vars := ", ".intercalate $ (list.range hs.length).map (λ i, "x" ++ to_string i),
cmd := to_string $ format!"L = Part[#, 2] & /@ FindInstance[{cstrs} && {nngs} && {poss}, " ++ "{" ++ vars ++ "}, Rationals][[1]]; (LCM @@ Denominator@L)*L" in
do v ← execute_and_eval cmd,
e ← pexpr_of_mmexpr trans_env.empty v,
e ← to_expr e,
e ← eval_expr (list ℕ) e,
return $ rb_map.of_list e.enum
end
/- A simple example of `linarith` using our new oracle. -/
example (x y z : ℚ) (h1 : 2*x < 3*y) (h2 : -4*x + 2*z < 0)
(h3 : 12*y - 4* z < 0) : false :=
by linarith {oracle := mm_oracle}
/- The `nlinarith` variant creates bigger problems,
where more efficient algorithms in Mathematica can shine. -/
example (u v x y A B : ℚ) : (0 < A) → (A ≤ 1) → (1 ≤ B)
→ (x ≤ B) → ( y ≤ B)
→ (0 ≤ u ) → (0 ≤ v )
→ (u < A) → ( v < A)
→ (u * y + v * x + u * v < 3 * A * B) :=
begin
intros,
nlinarith {oracle := mm_oracle}
end
/- `linarith` works over arbitrary types with the right algebraic and order structure. -/
example {α : Type}
[linear_ordered_field α]
{a b c : α}
(ha : a < 0)
(hb : ¬b = 0)
(hc' : c = 0)
(h : (1 - a) * (b * b) ≤ 0)
(hc : 0 ≤ 0)
(this : -(a * -b * -b + b * -b + 0) = (1 - a) * (b * b))
(h : (1 - a) * (b * b) ≤ 0) :
0 < 1 - a :=
begin
linarith {oracle := mm_oracle}
end
|
53a0ff0c9e175857215be266d0ac2500c4fb1a63
|
36938939954e91f23dec66a02728db08a7acfcf9
|
/lean4/app/SMT.lean
|
4f2b61360925d95350d34957662bf18a0bcaff6b
|
[] |
no_license
|
pnwamk/reopt-vcg
|
f8b56dd0279392a5e1c6aee721be8138e6b558d3
|
c9f9f185fbefc25c36c4b506bbc85fd1a03c3b6d
|
refs/heads/master
| 1,631,145,017,772
| 1,593,549,019,000
| 1,593,549,143,000
| 254,191,418
| 0
| 0
| null | 1,586,377,077,000
| 1,586,377,077,000
| null |
UTF-8
|
Lean
| false
| false
| 15,557
|
lean
|
import Galois.Data.SExp
import Galois.Init.Io
import ReoptVCG.SMTParser
import ReoptVCG.MCStdLib
import ReoptVCG.Types
import ReoptVCG.VCGBackend
import ReoptVCG.WordSize
import SMTLIB.Syntax
import X86Semantics.Common
namespace ReoptVCG
open SMT
universe u
def defaultCVC4Args : List String :=
-- N.B., as of CVC4 46591b1c92fc9ecd4a0997242030a1a48166301b the `--arrays-exp` flag
-- enables `--fmf-bound` by default to help with some `sat` queries; it shouldn't affect
-- `unsat` queries allegedly but appears to currently (i.e., some which should produce
-- `unsat` instead produce `unknown`) so we manually pass the `--no-fmf-bound` flag to
-- avoid the `unknown`s.
["--lang=smt2", "--arrays-exp", "--no-fmf-bound", "--incremental"]
/-- Abstracts out the _specifics_ of how certain BlockExpr terms
should be emitted as SMT terms, so that the underlying SMT
architecture can generate these ahead of time in whatever way is
appropriate and then simply parameterize SMT generation of
precondition expressions accordingly. --/
class BlockExprEnv (α : Type u) :=
(initGPReg64 : α → x86.reg64 → term sort.bv64)
(fnStartRegState : α → x86.reg64 → term sort.bv64)
(evalVar : α → LLVM.Ident → Option (Sigma term))
(readMem : α → ∀(w : WordSize), x86.vcg.memaddr → term w.sort)
structure BlockVCGExprEnv :=
(evalVar : LLVM.Ident → Option (Sigma term)) -- FIXME, this may just be state.llvmIdentMap.find? =\
(context : BlockVCGContext)
(state : BlockVCGState)
namespace BlockVCGExprEnv
variable (e : BlockVCGExprEnv)
def initGPReg64 (r : x86.reg64) : term sort.bv64 :=
e.state.mcCurRegs.get_reg64 r
def fnStartRegState (r : x86.reg64) : term sort.bv64 :=
e.context.mcStdLib.funStartRegs.get_reg64 r
def readMem (w:WordSize) (addr : x86.vcg.memaddr) : term w.sort :=
(e.context.mcStdLib.memOps w).readMem e.state.mcCurMem addr
end BlockVCGExprEnv
instance BlockVCGExprEnv.isBlockExprEnv : BlockExprEnv BlockVCGExprEnv :=
{initGPReg64 := BlockVCGExprEnv.initGPReg64,
fnStartRegState := BlockVCGExprEnv.fnStartRegState,
evalVar := BlockVCGExprEnv.evalVar,
readMem := BlockVCGExprEnv.readMem
}
namespace BlockExpr
open WellFormedSExp
def ppLLVMIdent : LLVM.Ident → String
| LLVM.Ident.named nm => nm
| LLVM.Ident.anon n => "%"++(repr n)
def toSExp : ∀ {tp : sort}, BlockExpr tp → SExp String
| _, stackHigh => SExp.atom "stack_high"
| _, initGPReg64 r => SExp.atom r.name
| _, fnStartGPReg64 r => SExp.list [SExp.atom "fnstart", SExp.atom r.name]
| _, mcStack a w =>
SExp.list [SExp.atom "mcstack",
toSExp a,
SExp.list [SExp.atom "_", SExp.atom "BitVec", SExp.atom (repr w.bits)]
]
| _, llvmVar nm tp => SExp.list [SExp.atom "llvm", SExp.atom (ppLLVMIdent nm)]
| _, eq e1 e2 => SExp.list [SExp.atom "=", toSExp e1, toSExp e2]
| _, bvAdd e1 e2 => SExp.list [SExp.atom "bvadd", toSExp e1, toSExp e2]
| _, bvSub e1 e2 => SExp.list [SExp.atom "bvsub", toSExp e1, toSExp e2]
| _, bvDecimal n width => SExp.list [SExp.atom "_", SExp.atom ("bv"++(repr n)), SExp.atom (repr width)]
def toString : ∀ {tp : sort}, BlockExpr tp → String
| _, e => (BlockExpr.toSExp e).toString
-- Converts an Expr into an SMT term given an environment. AMK: it's currently in IO
-- to handle some partiality (doh!) and because we want to use it in an IO context
-- at the moment any, so it's a convenient hack for the moment. TODO: maybe we
-- can guarantee all the SMT terms an llvmVar could be are inhabited and use lean4's
-- panic! and at least not force this to be in IO.
def toSMT {α : Type u} [BlockExprEnv α] (env: α) : ∀ {tp : sort}, BlockExpr tp → Except BlockVCGError (term tp)
| _, stackHigh => pure $ BlockExprEnv.fnStartRegState env x86.reg64.rsp
| _, initGPReg64 r => pure $ BlockExprEnv.initGPReg64 env r
| _, fnStartGPReg64 r => pure $ BlockExprEnv.fnStartRegState env r
| _, mcStack a w => do
t ← toSMT a;
pure $ BlockExprEnv.readMem env w t
| _, llvmVar nm tp =>
match BlockExprEnv.evalVar env nm with
| some ⟨tp', t⟩ =>
if h : tp = tp'
then
let hEq : term tp' = term tp := h ▸ rfl;
pure $ cast hEq t
else
Except.error $ BlockVCGError.globalErr $
"Error while translating precondition to SMT! LLVM variable `"
++ nm.asString ++ "` had no entry in the phi variable map!"
| none => Except.error $ BlockVCGError.globalErr $
"Error while translating precondition to SMT! LLVM variable `"
++ nm.asString ++ "` had no entry in the phi variable map!"
| _, eq e1 e2 => do
t1 ← toSMT e1;
t2 ← toSMT e2;
pure $ SMT.eq t1 t2
| _, bvAdd e1 e2 => do
t1 ← toSMT e1;
t2 ← toSMT e2;
pure $ SMT.bvadd t1 t2
| _, bvSub e1 e2 => do
t1 ← toSMT e1;
t2 ← toSMT e2;
pure $ SMT.bvsub t1 t2
| _, bvDecimal n width => pure $ SMT.bvimm width n
end BlockExpr
/-- Converts a BlockExpr into an SMT term in the BlockVCG context. --/
def evalPrecondition {tp : sort} (evalVar : LLVM.Ident → Option (Sigma term)) (expr : BlockExpr tp) : BlockVCG (term tp) := do
ctx ← read;
state ← get;
let env := BlockVCGExprEnv.mk evalVar ctx state;
match BlockExpr.toSMT env expr with
| Except.error err => throw err
| Except.ok res => pure res
def ppBlockLabel (lbl:LLVM.BlockLabel) : String :=
match lbl.label with
| LLVM.Ident.named str => str
| LLVM.Ident.anon n => "anon" ++ n.repr
-- | Pretty print an error that occurs at the start of an instruction.
def renderMCInstError (fnm : String) (blockLbl : LLVM.BlockLabel) (llvmInstrIdx : Nat) (addr : Nat) (msg : String) : String :=
fnm++"."++(ppBlockLabel blockLbl)++"."++llvmInstrIdx.repr++" @ "++addr.ppHex++": "++msg
def standaloneGoalFilename (fnName : String) (lbl : LLVM.BlockLabel) (goalIndex : Nat) : String :=
fnName ++ "_" ++ (ppBlockLabel lbl) ++ "_" ++ goalIndex.repr ++ ".smt2"
/-- Return the absolute path to the directory where we can stash
temporary files during IO computations. --/
def getTemporaryDirectory : IO String := do
if !System.Platform.isWindows
then pure "/tmp"
else do
let validateDir : String → String → IO String := (λ envVar dir => do
isValid ← IO.isDir dir;
if isValid
then pure dir
else throw $ IO.userError $ "Temporary directory specified by `"++envVar
++"` environment variable (i.e., `"++dir++"`) does not exist.");
tempMaybeStr ← IO.getEnv "TEMP";
tmpMaybeStr ← IO.getEnv "TMP";
match tempMaybeStr, tmpMaybeStr with
| some tmpDir, _ => validateDir "TEMP" tmpDir
| _, some tmpDir => validateDir "TMP" tmpDir
| _, _ => throw $ IO.userError $ "FATAL ERROR! On Windows, a temporary directory "
++ "must be specified in the environment variable `TEMP` (or `TMP`)."
/-- Like `standaloneGoalFilename`, but gives an absolute path to a filename in the OS's temporary directory.--/
def temporaryStandaloneGoalFilepath (fnName : String) (lbl : LLVM.BlockLabel) (goalIndex : Nat) : IO String := do
tempDir ← getTemporaryDirectory;
pure $ System.mkFilePath [tempDir, standaloneGoalFilename fnName lbl goalIndex]
/-- Common things appearing at the top of every --/
def checkNegatedGoalInContext (goalName : String) (negatedGoal : term sort.smt_bool) (ctx : smtM Unit) : smtM Unit := do
comment goalName;
setLogic Raw.logic.all;
setProduceModels true;
ctx;
checkSatAssuming [negatedGoal];
exit
/-- Write assert the negated goal and write out the resulting script
of commands to a file. -/
def exportCheckSatProblem
(outputDir fnName : String)
(blockLabel : LLVM.BlockLabel)
(goalCounter : IO.Ref Nat)
(cmdRef : IO.Ref (smtM Unit))
(negatedGoal : term sort.smt_bool)
(goalName : String)
: IO Unit := do
cnt ← goalCounter.get;
smtCtx ← cmdRef.get;
goalCounter.modify Nat.succ;
let (_, _, cmds) := runsmtM IdGen.empty (checkNegatedGoalInContext goalName negatedGoal smtCtx);
let filePath := System.mkFilePath [outputDir, standaloneGoalFilename fnName blockLabel cnt];
file ← IO.FS.Handle.mk filePath IO.FS.Mode.write;
cmds.forM (λ c => file.putStr c.toLine)
def defaultAddSMTCallback (cmdRef : IO.Ref (smtM Unit)) : SMT.smtM Unit → IO Unit :=
λ action => cmdRef.modify (λ cmds => cmds *> action)
def defaultAddCommandCallback (cmdRef : IO.Ref (smtM Unit)) : command → IO Unit :=
λ cmd => cmdRef.modify (λ cmds => cmds *> liftCommand cmd)
def exportCallbacks
{α}
(outputDir fnName : String)
(blockLabel : LLVM.BlockLabel)
(action : ProverInterface → IO α)
: IO α
:= do
goalCounter <- IO.mkRef 0;
let initSmtM : smtM Unit := pure ();
cmdRef <- IO.mkRef initSmtM;
action
{addSMTCallback := defaultAddSMTCallback cmdRef,
addCommandCallback := defaultAddCommandCallback cmdRef,
proveFalseCallback := λ p msg =>
exportCheckSatProblem outputDir fnName blockLabel goalCounter cmdRef p msg,
proveTrueCallback := λ p msg =>
exportCheckSatProblem outputDir fnName blockLabel goalCounter cmdRef (SMT.not p) msg,
blockErrorCallback := λ i a msg =>
-- FIXME stderr and other handles are in Lean4, fix when we bump next
IO.println $ "Error: " ++ renderMCInstError fnName blockLabel i a msg
}
------------------------------------------------------------------------
-- Interactive session
-- | Information needed for interatively verifying goal.
structure InteractiveContext :=
(annFile : String)
-- ^ Annotation file (for error-reporting purposes)
(fnName : FnName)
-- ^ Name of function to verify
(blockLabel : LLVM.BlockLabel)
-- ^ Label of block
(allGoalCounter : IO.Ref Nat)
-- ^ Counter for all goals (i.e., the total number)
(verifiedGoalCounter : IO.Ref Nat)
-- ^ Counter for all successfully verified goals.
(blockGoalCounter : IO.Ref Nat)
-- ^ Index of goal to discharge within block
(solverCommand : String)
-- ^ Command line contents, which when followed up a file name, can be executed
-- to see if the resulting script is satisfiable or not.
-- | Function to verify an SMT proposition is provable in the given
-- | context and print the result to the user.
def interactiveVerifyGoal
(ictx : InteractiveContext)
-- ^ Context for verifying goals
(cmdRef : IO.Ref (smtM Unit))
-- ^ The SMT context the goal should be proven in
(negGoal : SMT.term SMT.sort.smt_bool)
-- ^ Negation of goal to verify
(propName : String)
-- ^ Name of proposition for reporting purposes.
: IO Unit := do
cnt ← ictx.blockGoalCounter.get;
ictx.allGoalCounter.modify Nat.succ;
ictx.blockGoalCounter.modify Nat.succ;
let fname := standaloneGoalFilename ictx.fnName ictx.blockLabel cnt;
smtFilePath ← temporaryStandaloneGoalFilepath ictx.fnName ictx.blockLabel cnt;
let resultFilePath := smtFilePath ++ ".result";
-- FIXME was stderr, fix with next Lean4 bump
IO.print $ " Verifying " ++ propName ++ "... ";
-- FIXME, uncomment this line after next lean4 bump
-- IO.stdout.flush;
smtCtx ← cmdRef.get;
let (_, _, cmds) := runsmtM IdGen.empty (checkNegatedGoalInContext propName negGoal smtCtx);
IO.FS.withFile smtFilePath IO.FS.Mode.write (λ file => do
cmds.forM (λ c => file.putStr c.toLine);
file.flush);
Galois.IO.system $ ictx.solverCommand++" "++smtFilePath++" > " ++resultFilePath;
smtResult ← IO.FS.lines resultFilePath;
-- FIXME, this assumes the file has a single word in it essentially... might want to
-- make it slightly more complicated if
let printExportInstructions : IO Unit := (do
let filePath := "<dir>" ++ [System.FilePath.pathSeparator].asString++(standaloneGoalFilename ictx.fnName ictx.blockLabel cnt);
-- FIXME print to stderr after next lean4 bump
IO.println $ " To see the output, run `reopt-vcg "++ictx.annFile++" --export <dir>`";
IO.println $ " The SMT query will be stored in "++filePath;
IO.println $ " The default invocation of CVC4 for these queries is as follows:";
IO.println $ " `" ++ (String.intercalate " " ("cvc4"::defaultCVC4Args ++ [filePath]))++"`");
match smtResult.get? 0 with
| none => do
-- FIXME print to stderr on next lean4 bump (these and all below printlns in this function)
IO.println "ERROR";
IO.println "";
IO.println " Verification failed: no response from the SMT solver was detected.";
printExportInstructions
| some checkSatRes =>
match parseCheckSatResult checkSatRes with
| CheckSatResult.unsat => do
ictx.verifiedGoalCounter.modify Nat.succ;
IO.println $ "OK"
| CheckSatResult.sat => do
IO.println $ "FAIL";
IO.println $ " Verification failed: the SMT query returned `sat`";
IO.println $ "";
printExportInstructions
| CheckSatResult.unknown => do
IO.println $ "FAIL";
IO.println $ " Verification failed: the SMT query returned `unknown`";
IO.println $ "";
printExportInstructions
| CheckSatResult.unsupported => do
IO.println $ "ERROR";
IO.println $ " Verification failed: the SMT query returned `unsupported`";
IO.println $ "";
printExportInstructions
| CheckSatResult.unrecognized msg => do
IO.println $ "ERROR";
IO.println $ " Verification failed: the SMT solver did not return a recognized response to `check-sat-assuming`.";
IO.println $ "";
IO.println $ " Response: ";
smtResult.forM IO.println;
IO.println $ "";
IO.println $ "";
IO.println $ " This failure likely reflects a bug in reopt-vcg.";
IO.println $ "";
printExportInstructions
def newInteractiveSession
(annFile solverPath : String)
(solverArgs : List String)
(allGoalCounter verifiedGoalCounter errorCounter : IO.Ref Nat)
(fnName : FnName)
(lbl : LLVM.BlockLabel)
(action : ProverInterface → IO Unit)
: IO Unit := do
-- Create Goal counter for just this block.
blockGoalCounter ← IO.mkRef 0;
let solverCmd := String.intercalate " " (solverPath::solverArgs);
let initSmtM : smtM Unit := pure ();
cmdRef <- IO.mkRef initSmtM;
let ictx : InteractiveContext:=
{ annFile := annFile,
fnName := fnName,
blockLabel := lbl,
allGoalCounter := allGoalCounter,
verifiedGoalCounter := verifiedGoalCounter,
blockGoalCounter := blockGoalCounter,
solverCommand := solverCmd
};
let fns : ProverInterface :=
{addSMTCallback := defaultAddSMTCallback cmdRef,
addCommandCallback := defaultAddCommandCallback cmdRef,
proveFalseCallback := λ p => interactiveVerifyGoal ictx cmdRef p,
proveTrueCallback := λ p => interactiveVerifyGoal ictx cmdRef (SMT.not p),
blockErrorCallback := λ i a msg => do
-- FIXME stderr and other handles are in Lean4, fix when we bump next
IO.println $ "Error: " ++ (renderMCInstError fnName lbl i a msg);
errorCounter.modify Nat.succ
};
action fns
def interactiveSMTGenerator (annFile solverPath : String) (solverArgs : List String) : IO ProverSessionGenerator := do
-- Counter for all goals
allGoalCounter ← IO.mkRef 0;
-- Counter for goals successfully verified.
verifiedGoalCounter <- IO.mkRef 0;
-- Counter for errors
errorCounter <- IO.mkRef 0;
let whenDone : IO UInt32 := (do
allCnt ← allGoalCounter.get;
verCnt ← verifiedGoalCounter.get;
errorCnt ← errorCounter.get;
let verSuccess := errorCnt == 0 && verCnt == allCnt;
if verSuccess then
IO.println "Verification of all goals succeeded."
else
IO.println "Verification of all goals failed.";
IO.println $ "Verified "++(repr verCnt)++"/"++(repr allCnt)++" goal(s).";
when (errorCnt > 0) $
IO.println $ "Encountered"++(repr errorCnt)++"error(s).";
pure $ if verSuccess then 0 else 1);
pure { blockCallback :=
newInteractiveSession annFile solverPath solverArgs
allGoalCounter verifiedGoalCounter errorCounter,
sessionComplete := whenDone
}
end ReoptVCG
|
cd560d00ceffce74dcc24ebf2f1e3b143fabc49f
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/tactic/localized.lean
|
46d35b636173d2d810f1f1e83379e1adf800fc61
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 5,597
|
lean
|
/-
Copyright (c) 2019 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import tactic.core
/-!
# Localized notation
This consists of two user-commands which allow you to declare notation and commands localized to a
namespace.
* Declare notation which is localized to a namespace using:
```lean
localized "infix ` ⊹ `:60 := my_add" in my.add
```
* After this command it will be available in the same section/namespace/file, just as if you wrote
`local infix ` ⊹ `:60 := my_add`
* You can open it in other places. The following command will declare the notation again as local
notation in that section/namespace/files:
```lean
open_locale my.add
```
* More generally, the following will declare all localized notation in the specified namespaces.
```lean
open_locale namespace1 namespace2 ...
```
* You can also declare other localized commands, like local attributes
```lean
localized "attribute [simp] le_refl" in le
```
The code is inspired by code from Gabriel Ebner from the
[hott3 repository](https://github.com/gebner/hott3).
-/
open lean lean.parser interactive tactic native
reserve notation `localized`
@[user_attribute]
meta def localized_attr : user_attribute (rb_lmap name string) unit := {
name := "_localized",
descr := "(interal) attribute that flags localized commands",
cache_cfg := ⟨λ ns, (do dcls ← ns.mmap (λ n, mk_const n >>= eval_expr (name × string)),
return $ rb_lmap.of_list dcls), []⟩
}
/-- Get all commands in the given notation namespace and return them as a list of strings -/
meta def get_localized (ns : list name) : tactic (list string) :=
do m ← localized_attr.get_cache,
ns.mfoldl (λ l nm, match m.find nm with
| [] := fail format!"locale {nm} does not exist"
| new_l := return $ l.append new_l
end) []
/-- Execute all commands in the given notation namespace -/
@[user_command] meta def open_locale_cmd (_ : parse $ tk "open_locale") : parser unit :=
do ns ← many ident,
cmds ← get_localized ns,
cmds.mmap' emit_code_here
/-- Add a new command to a notation namespace and execute it right now.
The new command is added as a declaration to the environment with name `_localized_decl.<number>`.
This declaration has attribute `_localized` and as value a name-string pair. -/
@[user_command] meta def localized_cmd (_ : parse $ tk "localized") : parser unit :=
do cmd ← parser.pexpr, cmd ← i_to_expr cmd, cmd ← eval_expr string cmd,
let cmd := "local " ++ cmd,
emit_code_here cmd,
tk "in",
nm ← ident,
env ← get_env,
let dummy_decl_name := mk_num_name `_localized_decl
((string.hash (cmd ++ nm.to_string) + env.fingerprint) % unsigned_sz),
add_decl (declaration.defn dummy_decl_name [] `(name × string)
(reflect (⟨nm, cmd⟩ : name × string)) (reducibility_hints.regular 1 tt) ff),
localized_attr.set dummy_decl_name unit.star tt
/--
This consists of two user-commands which allow you to declare notation and commands localized to a
namespace.
* Declare notation which is localized to a namespace using:
```lean
localized "infix ` ⊹ `:60 := my_add" in my.add
```
* After this command it will be available in the same section/namespace/file, just as if you wrote
`local infix ` ⊹ `:60 := my_add`
* You can open it in other places. The following command will declare the notation again as local
notation in that section/namespace/files:
```lean
open_locale my.add
```
* More generally, the following will declare all localized notation in the specified namespaces.
```lean
open_locale namespace1 namespace2 ...
```
* You can also declare other localized commands, like local attributes
```lean
localized "attribute [simp] le_refl" in le
```
* To see all localized commands in a given namespace, run:
```lean
run_cmd print_localized_commands [`my.add].
```
* To see a list of all namespaces with localized commands, run:
```lean
run_cmd do
m ← localized_attr.get_cache,
tactic.trace m.keys -- change to `tactic.trace m.to_list`
-- to list all the commands in each namespace
```
* Warning 1: as a limitation on user commands, you cannot put `open_locale` directly after your
imports. You have to write another command first (e.g. `open`, `namespace`, `universe variables`,
`noncomputable theory`, `run_cmd tactic.skip`, ...).
* Warning 2: You have to fully specify the names used in localized notation, so that the localized
notation also works when the appropriate namespaces are not opened.
-/
add_tactic_doc
{ name := "localized notation",
category := doc_category.cmd,
decl_names := [`localized_cmd, `open_locale_cmd],
tags := ["notation", "type classes"] }
/-- Print all commands in a given notation namespace -/
meta def print_localized_commands (ns : list name) : tactic unit :=
do cmds ← get_localized ns, cmds.mmap' trace
-- you can run `open_locale classical` to get the decidability of all propositions, and downgrade
-- the priority of decidability instances that make Lean run through all the algebraic hierarchy
-- whenever it wants to solve a decidability question
localized "attribute [instance, priority 9] classical.prop_decidable" in classical
localized "attribute [instance, priority 8] eq.decidable decidable_eq_of_decidable_le" in classical
localized "postfix `?`:9001 := optional" in parser
localized "postfix *:9001 := lean.parser.many" in parser
|
902078916760a53fa3725ef65987251792a8ac6c
|
aac33c518959cd0633fdc254edbbf27b2f581c31
|
/src/tactic/interactive.lean
|
ccb91641aebf32c3aef5954685d69fbb98894fcd
|
[
"Apache-2.0"
] |
permissive
|
digama0/mathlib-ITP2019
|
992c4f9ac02260fca4a14860813c3ecbd5ca1ae6
|
5cbd0362e04e671ef5db1284870592af6950197c
|
refs/heads/master
| 1,588,517,123,478
| 1,554,081,078,000
| 1,554,081,078,000
| 178,686,466
| 2
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 27,738
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Simon Hudon, Sebastien Gouezel, Scott Morrison
-/
import data.dlist data.dlist.basic data.prod category.basic
tactic.basic tactic.rcases tactic.generalize_proofs
tactic.split_ifs logic.basic tactic.ext tactic.tauto
tactic.replacer tactic.simpa tactic.squeeze tactic.library_search
open lean
open lean.parser
local postfix `?`:9001 := optional
local postfix *:9001 := many
namespace tactic
namespace interactive
open interactive interactive.types expr
/--
The `rcases` tactic is the same as `cases`, but with more flexibility in the
`with` pattern syntax to allow for recursive case splitting. The pattern syntax
uses the following recursive grammar:
```
patt ::= (patt_list "|")* patt_list
patt_list ::= id | "_" | "⟨" (patt ",")* patt "⟩"
```
A pattern like `⟨a, b, c⟩ | ⟨d, e⟩` will do a split over the inductive datatype,
naming the first three parameters of the first constructor as `a,b,c` and the
first two of the second constructor `d,e`. If the list is not as long as the
number of arguments to the constructor or the number of constructors, the
remaining variables will be automatically named. If there are nested brackets
such as `⟨⟨a⟩, b | c⟩ | d` then these will cause more case splits as necessary.
If there are too many arguments, such as `⟨a, b, c⟩` for splitting on
`∃ x, ∃ y, p x`, then it will be treated as `⟨a, ⟨b, c⟩⟩`, splitting the last
parameter as necessary.
`rcases` also has special support for quotient types: quotient induction into Prop works like
matching on the constructor `quot.mk`.
`rcases? e` will perform case splits on `e` in the same way as `rcases e`,
but rather than accepting a pattern, it does a maximal cases and prints the
pattern that would produce this case splitting. The default maximum depth is 5,
but this can be modified with `rcases? e : n`.
-/
meta def rcases : parse rcases_parse → tactic unit
| (p, sum.inl ids) := tactic.rcases p ids
| (p, sum.inr depth) := do
patt ← tactic.rcases_hint p depth,
pe ← pp p,
trace $ ↑"snippet: rcases " ++ pe ++ " with " ++ to_fmt patt
/--
The `rintro` tactic is a combination of the `intros` tactic with `rcases` to
allow for destructuring patterns while introducing variables. See `rcases` for
a description of supported patterns. For example, `rintros (a | ⟨b, c⟩) ⟨d, e⟩`
will introduce two variables, and then do case splits on both of them producing
two subgoals, one with variables `a d e` and the other with `b c d e`.
`rintro?` will introduce and case split on variables in the same way as
`rintro`, but will also print the `rintro` invocation that would have the same
result. Like `rcases?`, `rintro? : n` allows for modifying the
depth of splitting; the default is 5.
-/
meta def rintro : parse rintro_parse → tactic unit
| (sum.inl []) := intros []
| (sum.inl l) := tactic.rintro l
| (sum.inr depth) := do
ps ← tactic.rintro_hint depth,
trace $ ↑"snippet: rintro" ++ format.join (ps.map $ λ p,
format.space ++ format.group (p.format tt))
/-- Alias for `rintro`. -/
meta def rintros := rintro
/-- `try_for n { tac }` executes `tac` for `n` ticks, otherwise uses `sorry` to close the goal.
Never fails. Useful for debugging. -/
meta def try_for (max : parse parser.pexpr) (tac : itactic) : tactic unit :=
do max ← i_to_expr_strict max >>= tactic.eval_expr nat,
λ s, match _root_.try_for max (tac s) with
| some r := r
| none := (tactic.trace "try_for timeout, using sorry" >> admit) s
end
/-- Multiple subst. `substs x y z` is the same as `subst x, subst y, subst z`. -/
meta def substs (l : parse ident*) : tactic unit :=
l.mmap' (λ h, get_local h >>= tactic.subst) >> try (tactic.reflexivity reducible)
/-- Unfold coercion-related definitions -/
meta def unfold_coes (loc : parse location) : tactic unit :=
unfold [
``coe, ``coe_t, ``has_coe_t.coe, ``coe_b,``has_coe.coe,
``lift, ``has_lift.lift, ``lift_t, ``has_lift_t.lift,
``coe_fn, ``has_coe_to_fun.coe, ``coe_sort, ``has_coe_to_sort.coe] loc
/-- Unfold auxiliary definitions associated with the current declaration. -/
meta def unfold_aux : tactic unit :=
do tgt ← target,
name ← decl_name,
let to_unfold := (tgt.list_names_with_prefix name),
guard (¬ to_unfold.empty),
-- should we be using simp_lemmas.mk_default?
simp_lemmas.mk.dsimplify to_unfold.to_list tgt >>= tactic.change
/-- For debugging only. This tactic checks the current state for any
missing dropped goals and restores them. Useful when there are no
goals to solve but "result contains meta-variables". -/
meta def recover : tactic unit :=
metavariables >>= tactic.set_goals
/-- Like `try { tac }`, but in the case of failure it continues
from the failure state instead of reverting to the original state. -/
meta def continue (tac : itactic) : tactic unit :=
λ s, result.cases_on (tac s)
(λ a, result.success ())
(λ e ref, result.success ())
/-- Move goal `n` to the front. -/
meta def swap (n := 2) : tactic unit :=
do gs ← get_goals,
match gs.nth (n-1) with
| (some g) := set_goals (g :: gs.remove_nth (n-1))
| _ := skip
end
/-- Generalize proofs in the goal, naming them with the provided list. -/
meta def generalize_proofs : parse ident_* → tactic unit :=
tactic.generalize_proofs
/-- Clear all hypotheses starting with `_`, like `_match` and `_let_match`. -/
meta def clear_ : tactic unit := tactic.repeat $ do
l ← local_context,
l.reverse.mfirst $ λ h, do
name.mk_string s p ← return $ local_pp_name h,
guard (s.front = '_'),
cl ← infer_type h >>= is_class, guard (¬ cl),
tactic.clear h
meta def apply_iff_congr_core (tgt : expr) : tactic unit :=
do applyc ``iff_of_eq,
(lhs, rhs) ← target >>= match_eq,
guard lhs.is_app,
clemma ← mk_specialized_congr_lemma lhs,
apply_congr_core clemma
meta def congr_core' : tactic unit :=
do tgt ← target,
apply_eq_congr_core tgt
<|> apply_heq_congr_core
<|> apply_iff_congr_core tgt
<|> fail "congr tactic failed"
/--
Same as the `congr` tactic, but takes an optional argument which gives
the depth of recursive applications. This is useful when `congr`
is too aggressive in breaking down the goal. For example, given
`⊢ f (g (x + y)) = f (g (y + x))`, `congr'` produces the goals `⊢ x = y`
and `⊢ y = x`, while `congr' 2` produces the intended `⊢ x + y = y + x`. -/
meta def congr' : parse (with_desc "n" small_nat)? → tactic unit
| (some 0) := failed
| o := focus1 (assumption <|> (congr_core' >>
all_goals (reflexivity <|> `[apply proof_irrel_heq] <|>
`[apply proof_irrel] <|> try (congr' (nat.pred <$> o)))))
/--
Acts like `have`, but removes a hypothesis with the same name as
this one. For example if the state is `h : p ⊢ goal` and `f : p → q`,
then after `replace h := f h` the goal will be `h : q ⊢ goal`,
where `have h := f h` would result in the state `h : p, h : q ⊢ goal`.
This can be used to simulate the `specialize` and `apply at` tactics
of Coq. -/
meta def replace (h : parse ident?) (q₁ : parse (tk ":" *> texpr)?) (q₂ : parse $ (tk ":=" *> texpr)?) : tactic unit :=
do let h := h.get_or_else `this,
old ← try_core (get_local h),
«have» h q₁ q₂,
match old, q₂ with
| none, _ := skip
| some o, some _ := tactic.clear o
| some o, none := swap >> tactic.clear o >> swap
end
/--
`apply_assumption` looks for an assumption of the form `... → ∀ _, ... → head`
where `head` matches the current goal.
alternatively, when encountering an assumption of the form `sg₀ → ¬ sg₁`,
after the main approach failed, the goal is dismissed and `sg₀` and `sg₁`
are made into the new goal.
optional arguments:
- asms: list of rules to consider instead of the local constants
- tac: a tactic to run on each subgoals after applying an assumption; if
this tactic fails, the corresponding assumption will be rejected and
the next one will be attempted.
-/
meta def apply_assumption
(asms : tactic (list expr) := local_context)
(tac : tactic unit := return ()) : tactic unit :=
tactic.apply_assumption asms tac
open nat
meta def mk_assumption_set (no_dflt : bool) (hs : list simp_arg_type) (attr : list name): tactic (list expr) :=
do (hs, gex, hex, all_hyps) ← decode_simp_arg_list hs,
hs ← hs.mmap i_to_expr_for_apply,
l ← attr.mmap $ λ a, attribute.get_instances a,
let l := l.join,
m ← list.mmap mk_const l,
let hs := (hs ++ m).filter $ λ h, expr.const_name h ∉ gex,
hs ← if no_dflt then
return hs
else
do { congr_fun ← mk_const `congr_fun,
congr_arg ← mk_const `congr_arg,
return (congr_fun :: congr_arg :: hs) },
if ¬ no_dflt ∨ all_hyps then do
ctx ← local_context,
return $ hs.append (ctx.filter (λ h, h.local_uniq_name ∉ hex)) -- remove local exceptions
else return hs
/--
`solve_by_elim` calls `apply_assumption` on the main goal to find an assumption whose head matches
and then repeatedly calls `apply_assumption` on the generated subgoals until no subgoals remain,
performing at most `max_rep` recursive steps.
`solve_by_elim` discharges the current goal or fails
`solve_by_elim` performs back-tracking if `apply_assumption` chooses an unproductive assumption
By default, the assumptions passed to apply_assumption are the local context, `congr_fun` and
`congr_arg`.
`solve_by_elim [h₁, h₂, ..., hᵣ]` also applies the named lemmas.
`solve_by_elim with attr₁ ... attrᵣ also applied all lemmas tagged with the specified attributes.
`solve_by_elim only [h₁, h₂, ..., hᵣ]` does not include the local context, `congr_fun`, or `congr_arg`
unless they are explicitly included.
`solve_by_elim [-id]` removes a specified assumption.
`solve_by_elim*` tries to solve all goals together, using backtracking if a solution for one goal
makes other goals impossible.
optional arguments:
- discharger: a subsidiary tactic to try at each step (e.g. `cc` may be helpful)
- max_rep: number of attempts at discharging generated sub-goals
-/
meta def solve_by_elim (all_goals : parse $ (tk "*")?) (no_dflt : parse only_flag) (hs : parse simp_arg_list) (attr_names : parse with_ident_list) (opt : by_elim_opt := { }) : tactic unit :=
do asms ← mk_assumption_set no_dflt hs attr_names,
tactic.solve_by_elim { all_goals := all_goals.is_some, assumptions := return asms, ..opt }
/--
`tautology` breaks down assumptions of the form `_ ∧ _`, `_ ∨ _`, `_ ↔ _` and `∃ _, _`
and splits a goal of the form `_ ∧ _`, `_ ↔ _` or `∃ _, _` until it can be discharged
using `reflexivity` or `solve_by_elim`
-/
meta def tautology (c : parse $ (tk "!")?) := tactic.tautology c.is_some
/-- Shorter name for the tactic `tautology`. -/
meta def tauto (c : parse $ (tk "!")?) := tautology c
/-- Make every propositions in the context decidable -/
meta def classical := tactic.classical
private meta def generalize_arg_p_aux : pexpr → parser (pexpr × name)
| (app (app (macro _ [const `eq _ ]) h) (local_const x _ _ _)) := pure (h, x)
| _ := fail "parse error"
private meta def generalize_arg_p : parser (pexpr × name) :=
with_desc "expr = id" $ parser.pexpr 0 >>= generalize_arg_p_aux
lemma {u} generalize_a_aux {α : Sort u}
(h : ∀ x : Sort u, (α → x) → x) : α := h α id
/--
Like `generalize` but also considers assumptions
specified by the user. The user can also specify to
omit the goal.
-/
meta def generalize_hyp (h : parse ident?) (_ : parse $ tk ":")
(p : parse generalize_arg_p)
(l : parse location) :
tactic unit :=
do h' ← get_unused_name `h,
x' ← get_unused_name `x,
g ← if ¬ l.include_goal then
do refine ``(generalize_a_aux _),
some <$> (prod.mk <$> tactic.intro x' <*> tactic.intro h')
else pure none,
n ← l.get_locals >>= tactic.revert_lst,
generalize h () p,
intron n,
match g with
| some (x',h') :=
do tactic.apply h',
tactic.clear h',
tactic.clear x'
| none := return ()
end
/--
Similar to `refine` but generates equality proof obligations
for every discrepancy between the goal and the type of the rule.
-/
meta def convert (sym : parse (with_desc "←" (tk "<-")?)) (r : parse texpr) (n : parse (tk "using" *> small_nat)?) : tactic unit :=
do v ← mk_mvar,
if sym.is_some
then refine ``(eq.mp %%v %%r)
else refine ``(eq.mpr %%v %%r),
gs ← get_goals,
set_goals [v],
congr' n,
gs' ← get_goals,
set_goals $ gs' ++ gs
meta def clean_ids : list name :=
[``id, ``id_rhs, ``id_delta, ``hidden]
/--
Remove identity functions from a term. These are normally
automatically generated with terms like `show t, from p` or
`(p : t)` which translate to some variant on `@id t p` in
order to retain the type. -/
meta def clean (q : parse texpr) : tactic unit :=
do tgt : expr ← target,
e ← i_to_expr_strict ``(%%q : %%tgt),
tactic.exact $ e.replace (λ e n,
match e with
| (app (app (const n _) _) e') :=
if n ∈ clean_ids then some e' else none
| (app (lam _ _ _ (var 0)) e') := some e'
| _ := none
end)
meta def source_fields (missing : list name) (e : pexpr) : tactic (list (name × pexpr)) :=
do e ← to_expr e,
t ← infer_type e,
let struct_n : name := t.get_app_fn.const_name,
fields ← expanded_field_list struct_n,
let exp_fields := fields.filter (λ x, x.2 ∈ missing),
exp_fields.mmap $ λ ⟨p,n⟩,
(prod.mk n ∘ to_pexpr) <$> mk_mapp (n.update_prefix p) [none,some e]
meta def collect_struct' : pexpr → state_t (list $ expr×structure_instance_info) tactic pexpr | e :=
do some str ← pure (e.get_structure_instance_info)
| e.traverse collect_struct',
v ← monad_lift mk_mvar,
modify (list.cons (v,str)),
pure $ to_pexpr v
meta def collect_struct (e : pexpr) : tactic $ pexpr × list (expr×structure_instance_info) :=
prod.map id list.reverse <$> (collect_struct' e).run []
meta def refine_one (str : structure_instance_info) :
tactic $ list (expr×structure_instance_info) :=
do tgt ← target,
let struct_n : name := tgt.get_app_fn.const_name,
exp_fields ← expanded_field_list struct_n,
let missing_f := exp_fields.filter (λ f, (f.2 : name) ∉ str.field_names),
(src_field_names,src_field_vals) ← (@list.unzip name _ ∘ list.join) <$> str.sources.mmap (source_fields $ missing_f.map prod.snd),
let provided := exp_fields.filter (λ f, (f.2 : name) ∈ str.field_names),
let missing_f' := missing_f.filter (λ x, x.2 ∉ src_field_names),
vs ← mk_mvar_list missing_f'.length,
(field_values,new_goals) ← list.unzip <$> (str.field_values.mmap collect_struct : tactic _),
e' ← to_expr $ pexpr.mk_structure_instance
{ struct := some struct_n
, field_names := str.field_names ++ missing_f'.map prod.snd ++ src_field_names
, field_values := field_values ++ vs.map to_pexpr ++ src_field_vals },
tactic.exact e',
gs ← with_enable_tags (
mzip_with (λ (n : name × name) v, do
set_goals [v],
try (interactive.unfold (provided.map $ λ ⟨s,f⟩, f.update_prefix s) (loc.ns [none])),
apply_auto_param
<|> apply_opt_param
<|> (set_main_tag [`_field,n.2,n.1]),
get_goals)
missing_f' vs),
set_goals gs.join,
return new_goals.join
meta def refine_recursively : expr × structure_instance_info → tactic (list expr) | (e,str) :=
do set_goals [e],
rs ← refine_one str,
gs ← get_goals,
gs' ← rs.mmap refine_recursively,
return $ gs'.join ++ gs
/--
`refine_struct { .. }` acts like `refine` but works only with structure instance
literals. It creates a goal for each missing field and tags it with the name of the
field so that `have_field` can be used to generically refer to the field currently
being refined.
As an example, we can use `refine_struct` to automate the construction semigroup
instances:
```
refine_struct ( { .. } : semigroup α ),
-- case semigroup, mul
-- α : Type u,
-- ⊢ α → α → α
-- case semigroup, mul_assoc
-- α : Type u,
-- ⊢ ∀ (a b c : α), a * b * c = a * (b * c)
```
-/
meta def refine_struct : parse texpr → tactic unit | e :=
do (x,xs) ← collect_struct e,
refine x,
gs ← get_goals,
xs' ← xs.mmap refine_recursively,
set_goals (xs'.join ++ gs)
/--
`guard_hyp h := t` fails if the hypothesis `h` does not have type `t`.
We use this tactic for writing tests.
Fixes `guard_hyp` by instantiating meta variables
-/
meta def guard_hyp' (n : parse ident) (p : parse $ tk ":=" *> texpr) : tactic unit :=
do h ← get_local n >>= infer_type >>= instantiate_mvars, guard_expr_eq h p
meta def guard_hyp_nums (n : ℕ) : tactic unit :=
do k ← local_context,
guard (n = k.length) <|> fail format!"{k.length} hypotheses found"
meta def guard_tags (tags : parse ident*) : tactic unit :=
do (t : list name) ← get_main_tag,
guard (t = tags)
meta def get_current_field : tactic name :=
do [_,field,str] ← get_main_tag,
expr.const_name <$> resolve_name (field.update_prefix str)
meta def field (n : parse ident) (tac : itactic) : tactic unit :=
do gs ← get_goals,
ts ← gs.mmap get_tag,
([g],gs') ← pure $ (list.zip gs ts).partition (λ x, x.snd.nth 1 = some n),
set_goals [g.1],
tac, done,
set_goals $ gs'.map prod.fst
/--
`have_field`, used after `refine_struct _` poses `field` as a local constant
with the type of the field of the current goal:
```
refine_struct ({ .. } : semigroup α),
{ have_field, ... },
{ have_field, ... },
```
behaves like
```
refine_struct ({ .. } : semigroup α),
{ have field := @semigroup.mul, ... },
{ have field := @semigroup.mul_assoc, ... },
```
-/
meta def have_field : tactic unit :=
propagate_tags $
get_current_field
>>= mk_const
>>= note `field none
>> return ()
/-- `apply_field` functions as `have_field, apply field, clear field` -/
meta def apply_field : tactic unit :=
propagate_tags $
get_current_field >>= applyc
/--`apply_rules hs n`: apply the list of rules `hs` (given as pexpr) and `assumption` on the
first goal and the resulting subgoals, iteratively, at most `n` times.
`n` is 50 by default. `hs` can contain user attributes: in this case all theorems with this
attribute are added to the list of rules.
example, with or without user attribute:
```
@[user_attribute]
meta def mono_rules : user_attribute :=
{ name := `mono_rules,
descr := "lemmas usable to prove monotonicity" }
attribute [mono_rules] add_le_add mul_le_mul_of_nonneg_right
lemma my_test {a b c d e : real} (h1 : a ≤ b) (h2 : c ≤ d) (h3 : 0 ≤ e) :
a + c * e + a + c + 0 ≤ b + d * e + b + d + e :=
by apply_rules mono_rules
-- any of the following lines would also work:
-- add_le_add (add_le_add (add_le_add (add_le_add h1 (mul_le_mul_of_nonneg_right h2 h3)) h1 ) h2) h3
-- by apply_rules [add_le_add, mul_le_mul_of_nonneg_right]
-- by apply_rules [mono_rules]
```
-/
meta def apply_rules (hs : parse pexpr_list_or_texpr) (n : nat := 50) : tactic unit :=
tactic.apply_rules hs n
meta def return_cast (f : option expr) (t : option (expr × expr))
(es : list (expr × expr × expr))
(e x x' eq_h : expr) :
tactic (option (expr × expr) × list (expr × expr × expr)) :=
(do guard (¬ e.has_var),
unify x x',
u ← mk_meta_univ,
f ← f <|> mk_mapp ``_root_.id [(expr.sort u : expr)],
t' ← infer_type e,
some (f',t) ← pure t | return (some (f,t'), (e,x',eq_h) :: es),
infer_type e >>= is_def_eq t,
unify f f',
return (some (f,t), (e,x',eq_h) :: es)) <|>
return (t, es)
meta def list_cast_of_aux (x : expr) (t : option (expr × expr))
(es : list (expr × expr × expr)) :
expr → tactic (option (expr × expr) × list (expr × expr × expr))
| e@`(cast %%eq_h %%x') := return_cast none t es e x x' eq_h
| e@`(eq.mp %%eq_h %%x') := return_cast none t es e x x' eq_h
| e@`(eq.mpr %%eq_h %%x') := mk_eq_symm eq_h >>= return_cast none t es e x x'
| e@`(@eq.subst %%α %%p %%a %%b %%eq_h %%x') := return_cast p t es e x x' eq_h
| e@`(@eq.substr %%α %%p %%a %%b %%eq_h %%x') := mk_eq_symm eq_h >>= return_cast p t es e x x'
| e@`(@eq.rec %%α %%a %%f %%x' _ %%eq_h) := return_cast f t es e x x' eq_h
| e@`(@eq.rec_on %%α %%a %%f %%b %%eq_h %%x') := return_cast f t es e x x' eq_h
| e := return (t,es)
meta def list_cast_of (x tgt : expr) : tactic (list (expr × expr × expr)) :=
(list.reverse ∘ prod.snd) <$> tgt.mfold (none, []) (λ e i es, list_cast_of_aux x es.1 es.2 e)
private meta def h_generalize_arg_p_aux : pexpr → parser (pexpr × name)
| (app (app (macro _ [const `heq _ ]) h) (local_const x _ _ _)) := pure (h, x)
| _ := fail "parse error"
private meta def h_generalize_arg_p : parser (pexpr × name) :=
with_desc "expr == id" $ parser.pexpr 0 >>= h_generalize_arg_p_aux
/--
`h_generalize Hx : e == x` matches on `cast _ e` in the goal and replaces it with
`x`. It also adds `Hx : e == x` as an assumption. If `cast _ e` appears multiple
times (not necessarily with the same proof), they are all replaced by `x`. `cast`
`eq.mp`, `eq.mpr`, `eq.subst`, `eq.substr`, `eq.rec` and `eq.rec_on` are all treated
as casts.
`h_generalize Hx : e == x with h` adds hypothesis `α = β` with `e : α, x : β`.
`h_generalize Hx : e == x with _` chooses automatically chooses the name of
assumption `α = β`.
`h_generalize! Hx : e == x` reverts `Hx`.
when `Hx` is omitted, assumption `Hx : e == x` is not added.
-/
meta def h_generalize (rev : parse (tk "!")?)
(h : parse ident_?)
(_ : parse (tk ":"))
(arg : parse h_generalize_arg_p)
(eqs_h : parse ( (tk "with" >> pure <$> ident_) <|> pure [])) :
tactic unit :=
do let (e,n) := arg,
let h' := if h = `_ then none else h,
h' ← (h' : tactic name) <|> get_unused_name ("h" ++ n.to_string : string),
e ← to_expr e,
tgt ← target,
((e,x,eq_h)::es) ← list_cast_of e tgt | fail "no cast found",
interactive.generalize h' () (to_pexpr e, n),
asm ← get_local h',
v ← get_local n,
hs ← es.mmap (λ ⟨e,_⟩, mk_app `eq [e,v]),
(eqs_h.zip [e]).mmap' (λ ⟨h,e⟩, do
h ← if h ≠ `_ then pure h else get_unused_name `h,
() <$ note h none eq_h ),
hs.mmap' (λ h,
do h' ← assert `h h,
tactic.exact asm,
try (rewrite_target h'),
tactic.clear h' ),
when h.is_some (do
(to_expr ``(heq_of_eq_rec_left %%eq_h %%asm)
<|> to_expr ``(heq_of_eq_mp %%eq_h %%asm))
>>= note h' none >> pure ()),
tactic.clear asm,
when rev.is_some (interactive.revert [n])
/-- `choose a b h using hyp` takes an hypothesis `hyp` of the form
`∀ (x : X) (y : Y), ∃ (a : A) (b : B), P x y a b` for some `P : X → Y → A → B → Prop` and outputs
into context a function `a : X → Y → A`, `b : X → Y → B` and a proposition `h` stating
`∀ (x : X) (y : Y), P x y (a x y) (b x y)`. It presumably also works with dependent versions.
Example:
```lean
example (h : ∀n m : ℕ, ∃i j, m = n + i ∨ m + j = n) : true :=
begin
choose i j h using h,
guard_hyp i := ℕ → ℕ → ℕ,
guard_hyp j := ℕ → ℕ → ℕ,
guard_hyp h := ∀ (n m : ℕ), m = n + i n m ∨ m + j n m = n,
trivial
end
```
-/
meta def choose (first : parse ident) (names : parse ident*) (tgt : parse (tk "using" *> texpr)?) :
tactic unit := do
tgt ← match tgt with
| none := get_local `this
| some e := tactic.i_to_expr_strict e
end,
tactic.choose tgt (first :: names),
try (tactic.clear tgt)
meta def guard_expr_eq' (t : expr) (p : parse $ tk ":=" *> texpr) : tactic unit :=
do e ← to_expr p, is_def_eq t e
/--
`guard_target t` fails if the target of the main goal is not `t`.
We use this tactic for writing tests.
-/
meta def guard_target' (p : parse texpr) : tactic unit :=
do t ← target, guard_expr_eq' t p
/--
a weaker version of `trivial` that tries to solve the goal by reflexivity or by reducing it to true,
unfolding only `reducible` constants. -/
meta def triv : tactic unit :=
tactic.triv' <|> tactic.reflexivity reducible <|> tactic.contradiction <|> fail "triv tactic failed"
/--
Similar to `existsi`. `use x` will instantiate the first term of an `∃` or `Σ` goal with `x`.
Unlike `existsi`, `x` is elaborated with respect to the expected type.
`use` will alternatively take a list of terms `[x0, ..., xn]`.
`use` will work with constructors of arbitrary inductive types.
Examples:
example (α : Type) : ∃ S : set α, S = S :=
by use ∅
example : ∃ x : ℤ, x = x :=
by use 42
example : ∃ a b c : ℤ, a + b + c = 6 :=
by use [1, 2, 3]
example : ∃ p : ℤ × ℤ, p.1 = 1 :=
by use ⟨1, 42⟩
example : Σ x y : ℤ, (ℤ × ℤ) × ℤ :=
by use [1, 2, 3, 4, 5]
inductive foo
| mk : ℕ → bool × ℕ → ℕ → foo
example : foo :=
by use [100, tt, 4, 3]
-/
meta def use (l : parse pexpr_list_or_texpr) : tactic unit :=
tactic.use l >> try triv
/--
`clear_aux_decl` clears every `aux_decl` in the local context for the current goal.
This includes the induction hypothesis when using the equation compiler and
`_let_match` and `_fun_match`.
It is useful when using a tactic such as `finish`, `simp *` or `subst` that may use these
auxiliary declarations, and produce an error saying the recursion is not well founded.
-/
meta def clear_aux_decl : tactic unit := tactic.clear_aux_decl
meta def loc.get_local_pp_names : loc → tactic (list name)
| loc.wildcard := list.map expr.local_pp_name <$> local_context
| (loc.ns l) := return l.reduce_option
meta def loc.get_local_uniq_names (l : loc) : tactic (list name) :=
list.map expr.local_uniq_name <$> l.get_locals
/--
The logic of `change x with y at l` fails when there are dependencies.
`change'` mimics the behavior of `change`, except in the case of `change x with y at l`.
In this case, it will correctly replace occurences of `x` with `y` at all possible hypotheses in `l`.
As long as `x` and `y` are defeq, it should never fail.
-/
meta def change' (q : parse texpr) : parse (tk "with" *> texpr)? → parse location → tactic unit
| none (loc.ns [none]) := do e ← i_to_expr q, change_core e none
| none (loc.ns [some h]) := do eq ← i_to_expr q, eh ← get_local h, change_core eq (some eh)
| none _ := fail "change-at does not support multiple locations"
| (some w) l :=
do l' ← loc.get_local_pp_names l,
l'.mmap' (λ e, try (change_with_at q w e)),
when l.include_goal $ change q w (loc.ns [none])
private meta def opt_dir_with : parser (option (bool × name)) :=
(do tk "with",
arrow ← (tk "<-")?,
h ← ident,
return (arrow.is_some, h)) <|> return none
/--
`set a := t with h` is a variant of `let a := t`.
It adds the hypothesis `h : a = t` to the local context and replaces `t` with `a` everywhere it can.
`set a := t with ←h` will add `h : t = a` instead.
`set! a := t with h` does not do any replacing.
-/
meta def set (h_simp : parse (tk "!")?) (a : parse ident) (tp : parse ((tk ":") >> texpr)?) (_ : parse (tk ":=")) (pv : parse texpr)
(rev_name : parse opt_dir_with) :=
do let vt := match tp with | some t := t | none := pexpr.mk_placeholder end,
let pv := ``(%%pv : %%vt),
v ← to_expr pv,
tp ← infer_type v,
definev a tp v,
when h_simp.is_none $ change' pv (some (expr.const a [])) loc.wildcard,
match rev_name with
| some (flip, id) :=
do nv ← get_local a,
pf ← to_expr (cond flip ``(%%pv = %%nv) ``(%%nv = %%pv)) >>= assert id,
reflexivity
| none := skip
end
end interactive
end tactic
|
b935efc7b6fd9bc256a980dfae8c15626a825ea9
|
d7189ea2ef694124821b033e533f18905b5e87ef
|
/galois/bitvec/default.lean
|
dda4efcf119315253c4f0902bfa75da41b4020da
|
[
"Apache-2.0"
] |
permissive
|
digama0/lean-protocol-support
|
eaa7e6f8b8e0d5bbfff1f7f52bfb79a3b11b0f59
|
cabfa3abedbdd6fdca6e2da6fbbf91a13ed48dda
|
refs/heads/master
| 1,625,421,450,627
| 1,506,035,462,000
| 1,506,035,462,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,256
|
lean
|
import .simplify_eq
import galois.list
import galois.nat
namespace bitvec
-- Unsigned extension
def uext {m :ℕ } (x:bitvec m) (n:ℕ) (pr : m ≤ n) : bitvec n :=
let q : n - m + m = n := nat.sub_add_cancel pr in
bitvec.cong q (bitvec.append (bitvec.zero (n-m)) x)
-- Return all bitvectors with a given size.
def all : Π(k:ℕ), list (bitvec k)
| 0 := [bitvec.zero 0]
| (nat.succ k)
:= list.map (vector.cons ff) (all k)
++ list.map (vector.cons tt) (all k)
protected def to_string {n : nat} (x : bitvec n) : string := has_to_string.to_string x.to_nat
instance (n : nat) : has_to_string (bitvec n) := ⟨bitvec.to_string⟩
-- Return the maximum unsigned number.
def max_unsigned : ℕ → ℕ
| 0 := 0
| (nat.succ x) := 2 * max_unsigned x + 1
-- Lemma about upper bounds of bits_to_nat
lemma bits_to_nat_upper_bound (v : list bool) : bits_to_nat v ≤ max_unsigned (list.length v) :=
-- Show 'cond e 1 0' is at most 1.
let cond_le : Π (e : bool), cond e 1 0 ≤ 1 := λe,
match e with
| bool.tt := nat.le_refl _
| bool.ff := nat.zero_le _
end in
begin
unfold bits_to_nat,
-- Induct over the structure of foldl
apply (list.foldl_rec (λl r, r ≤ max_unsigned (list.length l))),
{ intros l t e p,
unfold add_lsb,
-- Replace cond e 1 0 with 1
transitivity,
{ apply add_le_add_left,
apply (cond_le e)
},
-- Simplify to recursive case
simp [nat.add_succ, max_unsigned, add_lsb, nat.mul_succ],
apply nat.succ_le_succ,
apply nat.add_le_add_both,
all_goals { exact p },
},
{ exact dec_trivial,
}
end
-- Upper bounds of bitvec.to_nat
theorem to_nat_upper_bound {n : ℕ} : ∀(x : bitvec n), bitvec.to_nat x ≤ max_unsigned n
| ⟨ x, p ⟩ :=
begin
simp [eq.symm p],
apply bitvec.bits_to_nat_upper_bound
end
-- Show that prepending zero to a number does not change it.
theorem to_nat_append_zero (m : ℕ) {n : ℕ} (k : bitvec n)
: bitvec.to_nat (bitvec.append (0 : bitvec m) k) = bitvec.to_nat k :=
begin
cases k,
induction m with m ind,
refl,
{
revert ind,
simp [ bitvec.zero, bitvec.append],
simp [vector.repeat, vector.append, bitvec.to_nat],
simp [ bits_to_nat, list.foldl, add_lsb],
exact id,
}
end
end bitvec
|
cc15e20effa41669f32a6bae4fba0d98a57ec6d9
|
6dc0c8ce7a76229dd81e73ed4474f15f88a9e294
|
/stage0/src/Lean/Util/PPExt.lean
|
8564b4ceb868bec02d59fbd0386cbbf836cf2c42
|
[
"Apache-2.0"
] |
permissive
|
williamdemeo/lean4
|
72161c58fe65c3ad955d6a3050bb7d37c04c0d54
|
6d00fcf1d6d873e195f9220c668ef9c58e9c4a35
|
refs/heads/master
| 1,678,305,356,877
| 1,614,708,995,000
| 1,614,708,995,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,576
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import Lean.Environment
import Lean.Syntax
import Lean.MetavarContext
import Lean.Data.OpenDecl
namespace Lean
builtin_initialize
registerOption `pp.raw { defValue := false, group := "pp", descr := "(pretty printer) print raw expression/syntax tree" }
registerOption `pp.raw.showInfo { defValue := false, group := "pp", descr := "(pretty printer) print `SourceInfo` metadata with raw printer" }
registerOption `pp.raw.maxDepth { defValue := (32 : Nat), group := "pp", descr := "(pretty printer) maximum `Syntax` depth for raw printer" };
def getSyntaxMaxDepth (opts : Options) : Nat :=
opts.getNat `pp.raw.maxDepth 32
def getPPRaw (opts : Options) : Bool :=
opts.getBool `pp.raw false
def getPPRawShowInfo (opts : Options) : Bool :=
opts.getBool `pp.raw.showInfo false
structure PPContext where
env : Environment
mctx : MetavarContext := {}
lctx : LocalContext := {}
opts : Options := {}
currNamespace : Name := Name.anonymous
openDecls : List OpenDecl := []
structure PPFns where
ppExpr : PPContext → Expr → IO Format
ppTerm : PPContext → Syntax → IO Format
ppGoal : PPContext → MVarId → IO Format
instance : Inhabited PPFns := ⟨⟨arbitrary, arbitrary, arbitrary⟩⟩
builtin_initialize ppFnsRef : IO.Ref PPFns ←
IO.mkRef {
ppExpr := fun ctx e => return format (toString e),
ppTerm := fun ctx stx => return stx.formatStx (getSyntaxMaxDepth ctx.opts)
ppGoal := fun ctx mvarId => return "goal"
}
builtin_initialize ppExt : EnvExtension PPFns ←
registerEnvExtension ppFnsRef.get
def ppExpr (ctx : PPContext) (e : Expr) : IO Format := do
let e := ctx.mctx.instantiateMVars e |>.1
if getPPRaw ctx.opts then
return format (toString e)
else
try
ppExt.getState ctx.env |>.ppExpr ctx e
catch ex =>
pure f!"[Error pretty printing expression: {ex}. Falling back to raw printer.]{Format.line}{e}"
def ppTerm (ctx : PPContext) (stx : Syntax) : IO Format :=
if getPPRaw ctx.opts then
return stx.formatStx (getSyntaxMaxDepth ctx.opts) (getPPRawShowInfo ctx.opts)
else
try
ppExt.getState ctx.env |>.ppTerm ctx stx
catch ex =>
pure f!"[Error pretty printing syntax: {ex}. Falling back to raw printer.]{Format.line}{stx}"
def ppGoal (ctx : PPContext) (mvarId : MVarId) : IO Format :=
ppExt.getState ctx.env |>.ppGoal ctx mvarId
end Lean
|
b336be72f40ac74a8b9a97b911fa9d84a7327c9c
|
e5c11e5a7d990ce404047c2bd848eeafac3c0a85
|
/src/unique_factorization_domain.lean
|
e7c49119b14228c61d1d37d954a82778aab9f33b
|
[
"LPPL-1.3c"
] |
permissive
|
lean-forward/class-number
|
9ec63c24845e46efc8fa8b15324d0815918292c7
|
4fccf36d5e0e16accae84c16df77a3839ad964e4
|
refs/heads/main
| 1,686,927,014,542
| 1,624,886,724,000
| 1,624,886,724,000
| 327,319,245
| 2
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 52,535
|
lean
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jens Wagemaker, Aaron Anderson
-/
import algebra.gcd_monoid
import ring_theory.integral_domain
import ring_theory.noetherian
/--
# Unique factorization
## Main Definitions
* `wf_dvd_monoid` holds for `monoid`s for which a strict divisibility relation is
well-founded.
* `unique_factorization_monoid` holds for `wf_dvd_monoid`s where
`irreducible` is equivalent to `prime`
## To do
* set up the complete lattice structure on `factor_set`.
-/
variables {α : Type*}
local infix ` ~ᵤ ` : 50 := associated
/-- Well-foundedness of the strict version of |, which is equivalent to the descending chain
condition on divisibility and to the ascending chain condition on
principal ideals in an integral domain.
-/
class wf_dvd_monoid (α : Type*) [comm_monoid_with_zero α] : Prop :=
(well_founded_dvd_not_unit : well_founded (@dvd_not_unit α _))
export wf_dvd_monoid (well_founded_dvd_not_unit)
@[priority 100] -- see Note [lower instance priority]
instance is_noetherian_ring.wf_dvd_monoid [integral_domain α] [is_noetherian_ring α] :
wf_dvd_monoid α :=
⟨by { convert inv_image.wf (λ a, ideal.span ({a} : set α)) (well_founded_submodule_gt _ _),
ext,
exact ideal.span_singleton_lt_span_singleton.symm }⟩
namespace wf_dvd_monoid
variables [comm_monoid_with_zero α]
open associates nat
theorem of_wf_dvd_monoid_associates (h : wf_dvd_monoid (associates α)): wf_dvd_monoid α :=
⟨begin
haveI := h,
refine (surjective.well_founded_iff mk_surjective _).2 wf_dvd_monoid.well_founded_dvd_not_unit,
intros, rw mk_dvd_not_unit_mk_iff
end⟩
variables [wf_dvd_monoid α]
instance wf_dvd_monoid_associates : wf_dvd_monoid (associates α) :=
⟨begin
refine (surjective.well_founded_iff mk_surjective _).1 wf_dvd_monoid.well_founded_dvd_not_unit,
intros, rw mk_dvd_not_unit_mk_iff
end⟩
theorem well_founded_associates : well_founded ((<) : associates α → associates α → Prop) :=
subrelation.wf (λ x y, dvd_not_unit_of_lt) wf_dvd_monoid.well_founded_dvd_not_unit
local attribute [elab_as_eliminator] well_founded.fix
lemma exists_irreducible_factor {a : α} (ha : ¬ is_unit a) (ha0 : a ≠ 0) :
∃ i, irreducible i ∧ i ∣ a :=
(irreducible_or_factor a ha).elim (λ hai, ⟨a, hai, dvd_refl _⟩)
(well_founded.fix
wf_dvd_monoid.well_founded_dvd_not_unit
(λ a ih ha ha0 ⟨x, y, hx, hy, hxy⟩,
have hx0 : x ≠ 0, from λ hx0, ha0 (by rw [← hxy, hx0, zero_mul]),
(irreducible_or_factor x hx).elim
(λ hxi, ⟨x, hxi, hxy ▸ by simp⟩)
(λ hxf, let ⟨i, hi⟩ := ih x ⟨hx0, y, hy, hxy.symm⟩ hx hx0 hxf in
⟨i, hi.1, dvd.trans hi.2 (hxy ▸ by simp)⟩)) a ha ha0)
@[elab_as_eliminator] lemma induction_on_irreducible {P : α → Prop} (a : α)
(h0 : P 0) (hu : ∀ u : α, is_unit u → P u)
(hi : ∀ a i : α, a ≠ 0 → irreducible i → P a → P (i * a)) :
P a :=
by haveI := classical.dec; exact
well_founded.fix wf_dvd_monoid.well_founded_dvd_not_unit
(λ a ih, if ha0 : a = 0 then ha0.symm ▸ h0
else if hau : is_unit a then hu a hau
else let ⟨i, hii, ⟨b, hb⟩⟩ := exists_irreducible_factor hau ha0 in
have hb0 : b ≠ 0, from λ hb0, by simp * at *,
hb.symm ▸ hi _ _ hb0 hii (ih _ ⟨hb0, i,
hii.1, by rw [hb, mul_comm]⟩))
a
lemma exists_factors (a : α) : a ≠ 0 →
∃f : multiset α, (∀b ∈ f, irreducible b) ∧ associated f.prod a :=
wf_dvd_monoid.induction_on_irreducible a
(λ h, (h rfl).elim)
(λ u hu _, ⟨0, ⟨by simp [hu], associated.symm (by simp [hu, associated_one_iff_is_unit])⟩⟩)
(λ a i ha0 hii ih hia0,
let ⟨s, hs⟩ := ih ha0 in
⟨i ::ₘ s, ⟨by clear _let_match; finish,
by { rw multiset.prod_cons,
exact associated_mul_mul (by refl) hs.2 }⟩⟩)
end wf_dvd_monoid
theorem wf_dvd_monoid.of_well_founded_associates [comm_cancel_monoid_with_zero α]
(h : well_founded ((<) : associates α → associates α → Prop)) : wf_dvd_monoid α :=
wf_dvd_monoid.of_wf_dvd_monoid_associates ⟨by { convert h, ext, exact associates.dvd_not_unit_iff_lt }⟩
theorem wf_dvd_monoid.iff_well_founded_associates [comm_cancel_monoid_with_zero α] :
wf_dvd_monoid α ↔ well_founded ((<) : associates α → associates α → Prop) :=
⟨by apply wf_dvd_monoid.well_founded_associates, wf_dvd_monoid.of_well_founded_associates⟩
section prio
set_option default_priority 100 -- see Note [default priority]
/-- unique factorization monoids.
These are defined as `comm_cancel_monoid_with_zero`s with well-founded strict divisibility
relations, but this is equivalent to more familiar definitions:
Each element (except zero) is uniquely represented as a multiset of irreducible factors.
Uniqueness is only up to associated elements.
Each element (except zero) is non-uniquely represented as a multiset
of prime factors.
To define a UFD using the definition in terms of multisets
of irreducible factors, use the definition `of_exists_unique_irreducible_factors`
To define a UFD using the definition in terms of multisets
of prime factors, use the definition `of_exists_prime_factors`
-/
class unique_factorization_monoid (α : Type*) [comm_cancel_monoid_with_zero α] extends wf_dvd_monoid α :
Prop :=
(irreducible_iff_prime : ∀ {a : α}, irreducible a ↔ prime a)
instance ufm_of_gcd_of_wf_dvd_monoid [nontrivial α] [comm_cancel_monoid_with_zero α]
[wf_dvd_monoid α] [gcd_monoid α] : unique_factorization_monoid α :=
{ irreducible_iff_prime := λ _, gcd_monoid.irreducible_iff_prime
.. ‹wf_dvd_monoid α› }
instance associates.ufm [comm_cancel_monoid_with_zero α]
[unique_factorization_monoid α] : unique_factorization_monoid (associates α) :=
{ irreducible_iff_prime := by { rw ← associates.irreducible_iff_prime_iff,
apply unique_factorization_monoid.irreducible_iff_prime, }
.. (wf_dvd_monoid.wf_dvd_monoid_associates : wf_dvd_monoid (associates α)) }
end prio
namespace unique_factorization_monoid
variables [comm_cancel_monoid_with_zero α] [unique_factorization_monoid α]
theorem exists_prime_factors (a : α) : a ≠ 0 →
∃ f : multiset α, (∀b ∈ f, prime b) ∧ f.prod ~ᵤ a :=
by { simp_rw ← unique_factorization_monoid.irreducible_iff_prime,
apply wf_dvd_monoid.exists_factors a }
@[elab_as_eliminator] lemma induction_on_prime {P : α → Prop}
(a : α) (h₁ : P 0) (h₂ : ∀ x : α, is_unit x → P x)
(h₃ : ∀ a p : α, a ≠ 0 → prime p → P a → P (p * a)) : P a :=
begin
simp_rw ← unique_factorization_monoid.irreducible_iff_prime at h₃,
exact wf_dvd_monoid.induction_on_irreducible a h₁ h₂ h₃,
end
lemma factors_unique : ∀{f g : multiset α},
(∀x∈f, irreducible x) → (∀x∈g, irreducible x) → f.prod ~ᵤ g.prod →
multiset.rel associated f g :=
by haveI := classical.dec_eq α; exact
λ f, multiset.induction_on f
(λ g _ hg h,
multiset.rel_zero_left.2 $
multiset.eq_zero_of_forall_not_mem (λ x hx,
have is_unit g.prod, by simpa [associated_one_iff_is_unit] using h.symm,
(hg x hx).not_unit (is_unit_iff_dvd_one.2 (dvd.trans (multiset.dvd_prod hx)
(is_unit_iff_dvd_one.1 this)))))
(λ p f ih g hf hg hfg,
let ⟨b, hbg, hb⟩ := exists_associated_mem_of_dvd_prod
(irreducible_iff_prime.1 (hf p (by simp)))
(λ q hq, irreducible_iff_prime.1 (hg _ hq)) $
(dvd_iff_dvd_of_rel_right hfg).1
(show p ∣ (p ::ₘ f).prod, by simp) in
begin
rw ← multiset.cons_erase hbg,
exact multiset.rel.cons hb (ih (λ q hq, hf _ (by simp [hq]))
(λ q (hq : q ∈ g.erase b), hg q (multiset.mem_of_mem_erase hq))
(associated_mul_left_cancel
(by rwa [← multiset.prod_cons, ← multiset.prod_cons, multiset.cons_erase hbg]) hb
(hf p (by simp)).ne_zero))
end)
end unique_factorization_monoid
lemma prime_factors_unique [comm_cancel_monoid_with_zero α] : ∀ {f g : multiset α},
(∀ x ∈ f, prime x) → (∀ x ∈ g, prime x) → f.prod ~ᵤ g.prod →
multiset.rel associated f g :=
by haveI := classical.dec_eq α; exact
λ f, multiset.induction_on f
(λ g _ hg h,
multiset.rel_zero_left.2 $
multiset.eq_zero_of_forall_not_mem $ λ x hx,
have is_unit g.prod, by simpa [associated_one_iff_is_unit] using h.symm,
(irreducible_of_prime $ hg x hx).not_unit $ is_unit_iff_dvd_one.2 $
dvd.trans (multiset.dvd_prod hx) (is_unit_iff_dvd_one.1 this))
(λ p f ih g hf hg hfg,
let ⟨b, hbg, hb⟩ := exists_associated_mem_of_dvd_prod
(hf p (by simp)) (λ q hq, hg _ hq) $
(dvd_iff_dvd_of_rel_right hfg).1
(show p ∣ (p ::ₘ f).prod, by simp) in
begin
rw ← multiset.cons_erase hbg,
exact multiset.rel.cons hb (ih (λ q hq, hf _ (by simp [hq]))
(λ q (hq : q ∈ g.erase b), hg q (multiset.mem_of_mem_erase hq))
(associated_mul_left_cancel
(by rwa [← multiset.prod_cons, ← multiset.prod_cons, multiset.cons_erase hbg]) hb
(hf p (by simp)).ne_zero))
end)
/-- If an irreducible has a prime factorization,
then it is an associate of one of its prime factors. -/
lemma prime_factors_irreducible [comm_cancel_monoid_with_zero α] {a : α} {f : multiset α}
(ha : irreducible a) (pfa : (∀b ∈ f, prime b) ∧ f.prod ~ᵤ a) :
∃ p, a ~ᵤ p ∧ f = p ::ₘ 0 :=
begin
haveI := classical.dec_eq α,
refine multiset.induction_on f (λ h, (ha.not_unit
(associated_one_iff_is_unit.1 (associated.symm h))).elim) _ pfa.2 pfa.1,
rintros p s _ ⟨u, hu⟩ hs,
use p,
have hs0 : s = 0,
{ by_contra hs0,
obtain ⟨q, hq⟩ := multiset.exists_mem_of_ne_zero hs0,
apply (hs q (by simp [hq])).2.1,
refine (ha.is_unit_or_is_unit (_ : _ = ((p * ↑u) * (s.erase q).prod) * _)).resolve_left _,
{ rw [mul_right_comm _ _ q, mul_assoc, ← multiset.prod_cons, multiset.cons_erase hq, ← hu,
mul_comm, mul_comm p _, mul_assoc],
simp, },
apply mt is_unit_of_mul_is_unit_left (mt is_unit_of_mul_is_unit_left _),
apply (hs p (multiset.mem_cons_self _ _)).2.1 },
simp only [mul_one, multiset.prod_cons, multiset.prod_zero, hs0] at *,
exact ⟨associated.symm ⟨u, hu⟩, rfl⟩,
end
section exists_prime_factors
variables [comm_cancel_monoid_with_zero α]
variables (pf : ∀ (a : α), a ≠ 0 → ∃ f : multiset α, (∀b ∈ f, prime b) ∧ f.prod ~ᵤ a)
include pf
lemma wf_dvd_monoid.of_exists_prime_factors : wf_dvd_monoid α :=
⟨begin
classical,
apply rel_hom.well_founded (rel_hom.mk _ _) (with_top.well_founded_lt nat.lt_wf),
{ intro a,
by_cases h : a = 0, { exact ⊤ },
exact (classical.some (pf a h)).card },
rintros a b ⟨ane0, ⟨c, hc, b_eq⟩⟩,
rw dif_neg ane0,
by_cases h : b = 0, { simp [h, lt_top_iff_ne_top] },
rw [dif_neg h, with_top.coe_lt_coe],
have cne0 : c ≠ 0, { refine mt (λ con, _) h, rw [b_eq, con, mul_zero] },
calc multiset.card (classical.some (pf a ane0))
< _ + multiset.card (classical.some (pf c cne0)) :
lt_add_of_pos_right _ (multiset.card_pos.mpr (λ con, hc (associated_one_iff_is_unit.mp _)))
... = multiset.card (classical.some (pf a ane0) + classical.some (pf c cne0)) :
(multiset.card_add _ _).symm
... = multiset.card (classical.some (pf b h)) :
multiset.card_eq_card_of_rel (prime_factors_unique _ (classical.some_spec (pf _ h)).1 _),
{ convert (classical.some_spec (pf c cne0)).2.symm,
rw [con, multiset.prod_zero] },
{ intros x hadd,
rw multiset.mem_add at hadd,
cases hadd; apply (classical.some_spec (pf _ _)).1 _ hadd },
{ rw multiset.prod_add,
transitivity a * c,
{ apply associated_mul_mul; apply (classical.some_spec (pf _ _)).2 },
{ rw ← b_eq,
apply (classical.some_spec (pf _ _)).2.symm, } }
end⟩
lemma irreducible_iff_prime_of_exists_prime_factors {p : α} : irreducible p ↔ prime p :=
begin
by_cases hp0 : p = 0,
{ simp [hp0] },
refine ⟨λ h, _, irreducible_of_prime⟩,
obtain ⟨f, hf⟩ := pf p hp0,
obtain ⟨q, hq, rfl⟩ := prime_factors_irreducible h hf,
rw prime_iff_of_associated hq,
exact hf.1 q (multiset.mem_cons_self _ _)
end
theorem unique_factorization_monoid.of_exists_prime_factors :
unique_factorization_monoid α :=
{ irreducible_iff_prime := λ _, irreducible_iff_prime_of_exists_prime_factors pf,
.. wf_dvd_monoid.of_exists_prime_factors pf }
end exists_prime_factors
theorem unique_factorization_monoid.iff_exists_prime_factors [comm_cancel_monoid_with_zero α] :
unique_factorization_monoid α ↔
(∀ (a : α), a ≠ 0 → ∃ f : multiset α, (∀b ∈ f, prime b) ∧ f.prod ~ᵤ a) :=
⟨λ h, @unique_factorization_monoid.exists_prime_factors _ _ h,
unique_factorization_monoid.of_exists_prime_factors⟩
theorem irreducible_iff_prime_of_exists_unique_irreducible_factors [comm_cancel_monoid_with_zero α]
(eif : ∀ (a : α), a ≠ 0 → ∃ f : multiset α, (∀b ∈ f, irreducible b) ∧ f.prod ~ᵤ a)
(uif : ∀ (f g : multiset α),
(∀ x ∈ f, irreducible x) → (∀ x ∈ g, irreducible x) → f.prod ~ᵤ g.prod → multiset.rel associated f g)
(p : α) : irreducible p ↔ prime p :=
⟨by letI := classical.dec_eq α; exact λ hpi,
⟨hpi.ne_zero, hpi.1,
λ a b ⟨x, hx⟩,
if hab0 : a * b = 0
then (eq_zero_or_eq_zero_of_mul_eq_zero hab0).elim
(λ ha0, by simp [ha0])
(λ hb0, by simp [hb0])
else
have hx0 : x ≠ 0, from λ hx0, by simp * at *,
have ha0 : a ≠ 0, from left_ne_zero_of_mul hab0,
have hb0 : b ≠ 0, from right_ne_zero_of_mul hab0,
begin
cases eif x hx0 with fx hfx,
cases eif a ha0 with fa hfa,
cases eif b hb0 with fb hfb,
have h : multiset.rel associated (p ::ₘ fx) (fa + fb),
{ apply uif,
{ exact λ i hi, (multiset.mem_cons.1 hi).elim (λ hip, hip.symm ▸ hpi) (hfx.1 _), },
{ exact λ i hi, (multiset.mem_add.1 hi).elim (hfa.1 _) (hfb.1 _), },
calc multiset.prod (p ::ₘ fx)
~ᵤ a * b : by rw [hx, multiset.prod_cons];
exact associated_mul_mul (by refl) hfx.2
... ~ᵤ (fa).prod * (fb).prod :
associated_mul_mul hfa.2.symm hfb.2.symm
... = _ : by rw multiset.prod_add, },
exact let ⟨q, hqf, hq⟩ := multiset.exists_mem_of_rel_of_mem h
(multiset.mem_cons_self p _) in
(multiset.mem_add.1 hqf).elim
(λ hqa, or.inl $ (dvd_iff_dvd_of_rel_left hq).2 $
(dvd_iff_dvd_of_rel_right hfa.2).1
(multiset.dvd_prod hqa))
(λ hqb, or.inr $ (dvd_iff_dvd_of_rel_left hq).2 $
(dvd_iff_dvd_of_rel_right hfb.2).1
(multiset.dvd_prod hqb))
end⟩, irreducible_of_prime⟩
theorem unique_factorization_monoid.of_exists_unique_irreducible_factors
[comm_cancel_monoid_with_zero α]
(eif : ∀ (a : α), a ≠ 0 → ∃ f : multiset α, (∀b ∈ f, irreducible b) ∧ f.prod ~ᵤ a)
(uif : ∀ (f g : multiset α),
(∀ x ∈ f, irreducible x) → (∀ x ∈ g, irreducible x) → f.prod ~ᵤ g.prod → multiset.rel associated f g) :
unique_factorization_monoid α :=
unique_factorization_monoid.of_exists_prime_factors (by
{ convert eif,
simp_rw irreducible_iff_prime_of_exists_unique_irreducible_factors eif uif })
namespace unique_factorization_monoid
variables [comm_cancel_monoid_with_zero α] [decidable_eq α] [nontrivial α] [normalization_monoid α]
variables [unique_factorization_monoid α]
/-- Noncomputably determines the multiset of prime factors. -/
noncomputable def factors (a : α) : multiset α := if h : a = 0 then 0 else
multiset.map normalize $ classical.some (unique_factorization_monoid.exists_prime_factors a h)
theorem factors_prod {a : α} (ane0 : a ≠ 0) : associated (factors a).prod a :=
begin
rw [factors, dif_neg ane0],
refine associated.trans _ (classical.some_spec (exists_prime_factors a ane0)).2,
rw [← associates.mk_eq_mk_iff_associated, ← associates.prod_mk, ← associates.prod_mk,
multiset.map_map],
congr' 2,
ext,
rw [function.comp_apply, associates.mk_normalize],
end
theorem prime_of_factor {a : α} : ∀ (x : α), x ∈ factors a → prime x :=
begin
rw [factors],
split_ifs with ane0, { simp },
intros x hx, rcases multiset.mem_map.1 hx with ⟨y, ⟨hy, rfl⟩⟩,
rw prime_iff_of_associated (normalize_associated),
exact (classical.some_spec (unique_factorization_monoid.exists_prime_factors a ane0)).1 y hy,
end
theorem irreducible_of_factor {a : α} : ∀ (x : α), x ∈ factors a → irreducible x :=
λ x h, irreducible_of_prime (prime_of_factor x h)
theorem normalize_factor {a : α} : ∀ (x : α), x ∈ factors a → normalize x = x :=
begin
rw factors,
split_ifs with h, { simp },
intros x hx,
obtain ⟨y, hy, rfl⟩ := multiset.mem_map.1 hx,
apply normalize_idem
end
lemma factors_irreducible {a : α} (ha : irreducible a) :
factors a = normalize a ::ₘ 0 :=
begin
obtain ⟨p, a_assoc, hp⟩ := prime_factors_irreducible ha
⟨prime_of_factor, factors_prod ha.ne_zero⟩,
have p_mem : p ∈ factors a,
{ rw hp, apply multiset.mem_cons_self },
convert hp,
rwa [← normalize_factor p p_mem, normalize_eq_normalize_iff, dvd_dvd_iff_associated]
end
lemma exists_mem_factors_of_dvd {a p : α} (ha0 : a ≠ 0) (hp : irreducible p) : p ∣ a →
∃ q ∈ factors a, p ~ᵤ q :=
λ ⟨b, hb⟩,
have hb0 : b ≠ 0, from λ hb0, by simp * at *,
have multiset.rel associated (p ::ₘ factors b) (factors a),
from factors_unique
(λ x hx, (multiset.mem_cons.1 hx).elim (λ h, h.symm ▸ hp)
(irreducible_of_factor _))
irreducible_of_factor
(associated.symm $ calc multiset.prod (factors a) ~ᵤ a : factors_prod ha0
... = p * b : hb
... ~ᵤ multiset.prod (p ::ₘ factors b) :
by rw multiset.prod_cons; exact associated_mul_mul
(associated.refl _)
(associated.symm (factors_prod hb0))),
multiset.exists_mem_of_rel_of_mem this (by simp)
@[simp] lemma factors_zero : factors (0 : α) = 0 := dif_pos rfl
@[simp] lemma factors_one : factors (1 : α) = 0 :=
begin
rw ← multiset.rel_zero_right,
apply factors_unique irreducible_of_factor,
{ intros x hx,
exfalso,
apply multiset.not_mem_zero x hx },
{ simp [factors_prod one_ne_zero] },
apply_instance
end
@[simp] lemma factors_mul {x y : α} (hx : x ≠ 0) (hy : y ≠ 0) :
factors (x * y) = factors x + factors y :=
begin
have h : (normalize : α → α) = associates.out ∘ associates.mk,
{ ext, rw [function.comp_apply, associates.out_mk], },
rw [← multiset.map_id' (factors (x * y)), ← multiset.map_id' (factors x),
← multiset.map_id' (factors y), ← multiset.map_congr normalize_factor,
← multiset.map_congr normalize_factor, ← multiset.map_congr normalize_factor,
← multiset.map_add, h, ← multiset.map_map associates.out, eq_comm,
← multiset.map_map associates.out],
refine congr rfl _,
apply multiset.map_mk_eq_map_mk_of_rel,
apply factors_unique,
{ intros x hx,
rcases multiset.mem_add.1 hx with hx | hx;
exact irreducible_of_factor x hx },
{ exact irreducible_of_factor },
{ rw multiset.prod_add,
exact associated.trans (associated_mul_mul (factors_prod hx) (factors_prod hy))
(factors_prod (mul_ne_zero hx hy)).symm, }
end
@[simp] lemma factors_pow {x : α} (n : ℕ) :
factors (x ^ n) = n •ℕ factors x :=
begin
induction n with n ih,
{ simp },
by_cases h0 : x = 0,
{ simp [h0, zero_pow n.succ_pos, smul_zero] },
rw [pow_succ, succ_nsmul, factors_mul h0 (pow_ne_zero _ h0), ih],
end
lemma dvd_iff_factors_le_factors {x y : α} (hx : x ≠ 0) (hy : y ≠ 0) :
x ∣ y ↔ factors x ≤ factors y :=
begin
split,
{ rintro ⟨c, rfl⟩,
simp [hx, right_ne_zero_of_mul hy] },
{ rw [← dvd_iff_dvd_of_rel_left (factors_prod hx), ← dvd_iff_dvd_of_rel_right (factors_prod hy)],
apply multiset.prod_dvd_prod }
end
@[simp] lemma factors_eq_factors_iff {x y : α} (hx : x ≠ 0) (hy : y ≠ 0) :
factors x = factors y ↔ x ~ᵤ y :=
by rw [le_antisymm_iff, ← dvd_dvd_iff_associated,
dvd_iff_factors_le_factors hx hy, dvd_iff_factors_le_factors hy hx]
end unique_factorization_monoid
namespace unique_factorization_monoid
open_locale classical
open multiset associates
noncomputable theory
variables [comm_cancel_monoid_with_zero α] [nontrivial α] [unique_factorization_monoid α]
/-- Noncomputably defines a `normalization_monoid` structure on a `unique_factorization_monoid`. -/
protected def normalization_monoid : normalization_monoid α :=
normalization_monoid_of_monoid_hom_right_inverse {
to_fun := λ a : associates α, if a = 0 then 0 else ((factors a).map
(classical.some mk_surjective.has_right_inverse : associates α → α)).prod,
map_one' := by simp,
map_mul' := λ x y, by {
by_cases hx : x = 0, { simp [hx] },
by_cases hy : y = 0, { simp [hy] },
simp [hx, hy] } } begin
intro x,
dsimp,
by_cases hx : x = 0, { simp [hx] },
have h : associates.mk_monoid_hom ∘ (classical.some mk_surjective.has_right_inverse) =
(id : associates α → associates α),
{ ext x,
rw [function.comp_apply, mk_monoid_hom_apply,
classical.some_spec mk_surjective.has_right_inverse x],
refl },
rw [if_neg hx, ← mk_monoid_hom_apply, monoid_hom.map_multiset_prod, map_map, h, map_id,
← associated_iff_eq],
apply factors_prod hx
end
instance : inhabited (normalization_monoid α) := ⟨unique_factorization_monoid.normalization_monoid⟩
end unique_factorization_monoid
namespace unique_factorization_monoid
variables {R : Type*} [comm_cancel_monoid_with_zero R] [unique_factorization_monoid R]
lemma no_factors_of_no_prime_factors {a b : R} (ha : a ≠ 0)
(h : (∀ {d}, d ∣ a → d ∣ b → ¬ prime d)) : ∀ {d}, d ∣ a → d ∣ b → is_unit d :=
λ d, induction_on_prime d
(by { simp only [zero_dvd_iff], intros, contradiction })
(λ x hx _ _, hx)
(λ d q hp hq ih dvd_a dvd_b,
absurd hq (h (dvd_of_mul_right_dvd dvd_a) (dvd_of_mul_right_dvd dvd_b)))
/-- Euclid's lemma: if `a ∣ b * c` and `a` and `c` have no common prime factors, `a ∣ b`.
Compare `is_coprime.dvd_of_dvd_mul_left`. -/
lemma dvd_of_dvd_mul_left_of_no_prime_factors {a b c : R} (ha : a ≠ 0) :
(∀ {d}, d ∣ a → d ∣ c → ¬ prime d) → a ∣ b * c → a ∣ b :=
begin
refine induction_on_prime c _ _ _,
{ intro no_factors,
simp only [dvd_zero, mul_zero, forall_prop_of_true],
haveI := classical.prop_decidable,
exact is_unit_iff_forall_dvd.mp
(no_factors_of_no_prime_factors ha @no_factors (dvd_refl a) (dvd_zero a)) _ },
{ rintros _ ⟨x, rfl⟩ _ a_dvd_bx,
apply units.dvd_mul_right.mp a_dvd_bx },
{ intros c p hc hp ih no_factors a_dvd_bpc,
apply ih (λ q dvd_a dvd_c hq, no_factors dvd_a (dvd_mul_of_dvd_right dvd_c _) hq),
rw mul_left_comm at a_dvd_bpc,
refine or.resolve_left (left_dvd_or_dvd_right_of_dvd_prime_mul hp a_dvd_bpc) (λ h, _),
exact no_factors h (dvd_mul_right p c) hp }
end
/-- Euclid's lemma: if `a ∣ b * c` and `a` and `b` have no common prime factors, `a ∣ c`.
Compare `is_coprime.dvd_of_dvd_mul_right`. -/
lemma dvd_of_dvd_mul_right_of_no_prime_factors {a b c : R} (ha : a ≠ 0)
(no_factors : ∀ {d}, d ∣ a → d ∣ b → ¬ prime d) : a ∣ b * c → a ∣ c :=
by simpa [mul_comm b c] using dvd_of_dvd_mul_left_of_no_prime_factors ha @no_factors
/-- If `a ≠ 0, b` are elements of a unique factorization domain, then dividing
out their common factor `c'` gives `a'` and `b'` with no factors in common. -/
lemma exists_reduced_factors : ∀ (a ≠ (0 : R)) b,
∃ a' b' c', (∀ {d}, d ∣ a' → d ∣ b' → is_unit d) ∧ c' * a' = a ∧ c' * b' = b :=
begin
haveI := classical.prop_decidable,
intros a,
refine induction_on_prime a _ _ _,
{ intros, contradiction },
{ intros a a_unit a_ne_zero b,
use [a, b, 1],
split,
{ intros p p_dvd_a _,
exact is_unit_of_dvd_unit p_dvd_a a_unit },
{ simp } },
{ intros a p a_ne_zero p_prime ih_a pa_ne_zero b,
by_cases p ∣ b,
{ rcases h with ⟨b, rfl⟩,
obtain ⟨a', b', c', no_factor, ha', hb'⟩ := ih_a a_ne_zero b,
refine ⟨a', b', p * c', @no_factor, _, _⟩,
{ rw [mul_assoc, ha'] },
{ rw [mul_assoc, hb'] } },
{ obtain ⟨a', b', c', coprime, rfl, rfl⟩ := ih_a a_ne_zero b,
refine ⟨p * a', b', c', _, mul_left_comm _ _ _, rfl⟩,
intros q q_dvd_pa' q_dvd_b',
cases left_dvd_or_dvd_right_of_dvd_prime_mul p_prime q_dvd_pa' with p_dvd_q q_dvd_a',
{ have : p ∣ c' * b' := dvd_mul_of_dvd_right (dvd_trans p_dvd_q q_dvd_b') _,
contradiction },
exact coprime q_dvd_a' q_dvd_b' } }
end
lemma exists_reduced_factors' (a b : R) (hb : b ≠ 0) :
∃ a' b' c', (∀ {d}, d ∣ a' → d ∣ b' → is_unit d) ∧ c' * a' = a ∧ c' * b' = b :=
let ⟨b', a', c', no_factor, hb, ha⟩ := exists_reduced_factors b hb a
in ⟨a', b', c', λ _ hpb hpa, no_factor hpa hpb, ha, hb⟩
section multiplicity
variables [nontrivial R] [normalization_monoid R] [decidable_eq R]
variables [decidable_rel (has_dvd.dvd : R → R → Prop)]
open multiplicity multiset
lemma le_multiplicity_iff_repeat_le_factors {a b : R} {n : ℕ} (ha : irreducible a) (hb : b ≠ 0) :
↑n ≤ multiplicity a b ↔ repeat (normalize a) n ≤ factors b :=
begin
rw ← pow_dvd_iff_le_multiplicity,
revert b,
induction n with n ih, { simp },
intros b hb,
split,
{ rintro ⟨c, rfl⟩,
rw [ne.def, pow_succ, mul_assoc, mul_eq_zero, decidable.not_or_iff_and_not] at hb,
rw [pow_succ, mul_assoc, factors_mul hb.1 hb.2, repeat_succ, factors_irreducible ha,
cons_add, cons_le_cons_iff, zero_add, ← ih hb.2],
apply dvd.intro _ rfl },
{ rw [multiset.le_iff_exists_add],
rintro ⟨u, hu⟩,
rw [← dvd_iff_dvd_of_rel_right (factors_prod hb), hu, prod_add, prod_repeat],
apply dvd.trans (dvd_of_associated (associated_pow_pow _)) (dvd.intro u.prod rfl),
apply associated_normalize }
end
lemma multiplicity_eq_count_factors {a b : R} (ha : irreducible a) (hb : b ≠ 0) :
multiplicity a b = (factors b).count (normalize a) :=
begin
apply le_antisymm,
{ apply enat.le_of_lt_add_one,
rw [← enat.coe_one, ← enat.coe_add, lt_iff_not_ge, ge_iff_le,
le_multiplicity_iff_repeat_le_factors ha hb, ← le_count_iff_repeat_le],
simp },
rw [le_multiplicity_iff_repeat_le_factors ha hb, ← le_count_iff_repeat_le],
end
end multiplicity
end unique_factorization_monoid
namespace associates
open unique_factorization_monoid associated multiset
variables [comm_cancel_monoid_with_zero α]
/-- `factor_set α` representation elements of unique factorization domain as multisets.
`multiset α` produced by `factors` are only unique up to associated elements, while the multisets in
`factor_set α` are unqiue by equality and restricted to irreducible elements. This gives us a
representation of each element as a unique multisets (or the added ⊤ for 0), which has a complete
lattice struture. Infimum is the greatest common divisor and supremum is the least common multiple.
-/
@[reducible] def {u} factor_set (α : Type u) [comm_cancel_monoid_with_zero α] :
Type u :=
with_top (multiset { a : associates α // irreducible a })
local attribute [instance] associated.setoid
theorem factor_set.coe_add {a b : multiset { a : associates α // irreducible a }} :
(↑(a + b) : factor_set α) = a + b :=
by norm_cast
lemma factor_set.sup_add_inf_eq_add [decidable_eq (associates α)] :
∀(a b : factor_set α), a ⊔ b + a ⊓ b = a + b
| none b := show ⊤ ⊔ b + ⊤ ⊓ b = ⊤ + b, by simp
| a none := show a ⊔ ⊤ + a ⊓ ⊤ = a + ⊤, by simp
| (some a) (some b) := show (a : factor_set α) ⊔ b + a ⊓ b = a + b, from
begin
rw [← with_top.coe_sup, ← with_top.coe_inf, ← with_top.coe_add, ← with_top.coe_add,
with_top.coe_eq_coe],
exact multiset.union_add_inter _ _
end
/-- Evaluates the product of a `factor_set` to be the product of the corresponding multiset,
or `0` if there is none. -/
def factor_set.prod : factor_set α → associates α
| none := 0
| (some s) := (s.map coe).prod
@[simp] theorem prod_top : (⊤ : factor_set α).prod = 0 := rfl
@[simp] theorem prod_coe {s : multiset { a : associates α // irreducible a }} :
(s : factor_set α).prod = (s.map coe).prod :=
rfl
@[simp] theorem prod_add : ∀(a b : factor_set α), (a + b).prod = a.prod * b.prod
| none b := show (⊤ + b).prod = (⊤:factor_set α).prod * b.prod, by simp
| a none := show (a + ⊤).prod = a.prod * (⊤:factor_set α).prod, by simp
| (some a) (some b) :=
show (↑a + ↑b:factor_set α).prod = (↑a:factor_set α).prod * (↑b:factor_set α).prod,
by rw [← factor_set.coe_add, prod_coe, prod_coe, prod_coe, multiset.map_add, multiset.prod_add]
theorem prod_mono : ∀{a b : factor_set α}, a ≤ b → a.prod ≤ b.prod
| none b h := have b = ⊤, from top_unique h, by rw [this, prod_top]; exact le_refl _
| a none h := show a.prod ≤ (⊤ : factor_set α).prod, by simp; exact le_top
| (some a) (some b) h := prod_le_prod $ multiset.map_le_map $ with_top.coe_le_coe.1 $ h
/-- `bcount p s` is the multiplicity of `p` in the factor_set `s` (with bundled `p`)-/
def bcount [decidable_eq (associates α)] (p : {a : associates α // irreducible a}) :
factor_set α → ℕ
| none := 0
| (some s) := s.count p
variables [dec_irr : Π (p : associates α), decidable (irreducible p)]
include dec_irr
/-- `count p s` is the multiplicity of the irreducible `p` in the factor_set `s`.
If `p` is not irreducible, `count p s` is defined to be `0`. -/
def count [decidable_eq (associates α)] (p : associates α) :
factor_set α → ℕ :=
if hp : irreducible p then bcount ⟨p, hp⟩ else 0
@[simp] lemma count_some [decidable_eq (associates α)] {p : associates α} (hp : irreducible p)
(s : multiset _) : count p (some s) = s.count ⟨p, hp⟩:=
by { dunfold count, split_ifs, refl }
@[simp] lemma count_zero [decidable_eq (associates α)] {p : associates α} (hp : irreducible p) :
count p (0 : factor_set α) = 0 :=
by { dunfold count, split_ifs, refl }
lemma count_reducible [decidable_eq (associates α)] {p : associates α} (hp : ¬ irreducible p) :
count p = 0 := dif_neg hp
omit dec_irr
/-- membership in a factor_set (bundled version) -/
def bfactor_set_mem : {a : associates α // irreducible a} → (factor_set α) → Prop
| _ ⊤ := true
| p (some l) := p ∈ l
include dec_irr
/-- `factor_set_mem p s` is the predicate that the irreducible `p` is a member of `s : factor_set α`.
If `p` is not irreducible, `p` is not a member of any `factor_set`. -/
def factor_set_mem (p : associates α) (s : factor_set α) : Prop :=
if hp : irreducible p then bfactor_set_mem ⟨p, hp⟩ s else false
instance : has_mem (associates α) (factor_set α) := ⟨factor_set_mem⟩
@[simp] lemma factor_set_mem_eq_mem (p : associates α) (s : factor_set α) :
factor_set_mem p s = (p ∈ s) := rfl
lemma mem_factor_set_top {p : associates α} {hp : irreducible p} :
p ∈ (⊤ : factor_set α) :=
begin
dunfold has_mem.mem, dunfold factor_set_mem, split_ifs, exact trivial
end
lemma mem_factor_set_some {p : associates α} {hp : irreducible p}
{l : multiset {a : associates α // irreducible a }} :
p ∈ (l : factor_set α) ↔ subtype.mk p hp ∈ l :=
begin
dunfold has_mem.mem, dunfold factor_set_mem, split_ifs, refl
end
lemma reducible_not_mem_factor_set {p : associates α} (hp : ¬ irreducible p)
(s : factor_set α) : ¬ p ∈ s :=
λ (h : if hp : irreducible p then bfactor_set_mem ⟨p, hp⟩ s else false),
by rwa [dif_neg hp] at h
omit dec_irr
variable [unique_factorization_monoid α]
theorem unique' {p q : multiset (associates α)} :
(∀a∈p, irreducible a) → (∀a∈q, irreducible a) → p.prod = q.prod → p = q :=
begin
apply multiset.induction_on_multiset_quot p,
apply multiset.induction_on_multiset_quot q,
assume s t hs ht eq,
refine multiset.map_mk_eq_map_mk_of_rel (unique_factorization_monoid.factors_unique _ _ _),
{ exact assume a ha, ((irreducible_mk _).1 $ hs _ $ multiset.mem_map_of_mem _ ha) },
{ exact assume a ha, ((irreducible_mk _).1 $ ht _ $ multiset.mem_map_of_mem _ ha) },
simpa [quot_mk_eq_mk, prod_mk, mk_eq_mk_iff_associated] using eq
end
variables [nontrivial α] [normalization_monoid α]
private theorem forall_map_mk_factors_irreducible [decidable_eq α] (x : α) (hx : x ≠ 0) :
∀(a : associates α), a ∈ multiset.map associates.mk (factors x) → irreducible a :=
begin
assume a ha,
rcases multiset.mem_map.1 ha with ⟨c, hc, rfl⟩,
exact (irreducible_mk c).2 (irreducible_of_factor _ hc)
end
theorem prod_le_prod_iff_le {p q : multiset (associates α)}
(hp : ∀a∈p, irreducible a) (hq : ∀a∈q, irreducible a) :
p.prod ≤ q.prod ↔ p ≤ q :=
iff.intro
begin
classical,
rintros ⟨⟨c⟩, eqc⟩,
have : c ≠ 0, from (mt mk_eq_zero.2 $
assume (hc : quot.mk setoid.r c = 0),
have (0 : associates α) ∈ q, from prod_eq_zero_iff.1 $ eqc.symm ▸ hc.symm ▸ mul_zero _,
not_irreducible_zero ((irreducible_mk 0).1 $ hq _ this)),
have : associates.mk (factors c).prod = quot.mk setoid.r c,
from mk_eq_mk_iff_associated.2 (factors_prod this),
refine multiset.le_iff_exists_add.2 ⟨(factors c).map associates.mk, unique' hq _ _⟩,
{ assume x hx,
rcases multiset.mem_add.1 hx with h | h,
exact hp x h,
exact forall_map_mk_factors_irreducible c ‹c ≠ 0› _ h },
{ simp [multiset.prod_add, prod_mk, *] at * }
end
prod_le_prod
variables [dec : decidable_eq α] [dec' : decidable_eq (associates α)]
include dec
/-- This returns the multiset of irreducible factors as a `factor_set`,
a multiset of irreducible associates `with_top`. -/
noncomputable def factors' (a : α) :
multiset { a : associates α // irreducible a } :=
(factors a).pmap (λa ha, ⟨associates.mk a, (irreducible_mk _).2 ha⟩)
(irreducible_of_factor)
@[simp] theorem map_subtype_coe_factors' {a : α} :
(factors' a).map coe = (factors a).map associates.mk :=
by simp [factors', multiset.map_pmap, multiset.pmap_eq_map]
theorem factors'_cong {a b : α} (ha : a ≠ 0) (hb : b ≠ 0) (h : a ~ᵤ b) :
factors' a = factors' b :=
have multiset.rel associated (factors a) (factors b), from
factors_unique irreducible_of_factor irreducible_of_factor
((factors_prod ha).trans $ h.trans $ (factors_prod hb).symm),
by simpa [(multiset.map_eq_map subtype.coe_injective).symm, rel_associated_iff_map_eq_map.symm]
include dec'
/-- This returns the multiset of irreducible factors of an associate as a `factor_set`,
a multiset of irreducible associates `with_top`. -/
noncomputable def factors (a : associates α) :
factor_set α :=
begin
refine (if h : a = 0 then ⊤ else
quotient.hrec_on a (λx h, some $ factors' x) _ h),
assume a b hab,
apply function.hfunext,
{ have : a ~ᵤ 0 ↔ b ~ᵤ 0, from
iff.intro (assume ha0, hab.symm.trans ha0) (assume hb0, hab.trans hb0),
simp only [associated_zero_iff_eq_zero] at this,
simp only [quotient_mk_eq_mk, this, mk_eq_zero] },
exact (assume ha hb eq, heq_of_eq $ congr_arg some $ factors'_cong
(λ c, ha (mk_eq_zero.2 c)) (λ c, hb (mk_eq_zero.2 c)) hab)
end
@[simp] theorem factors_0 : (0 : associates α).factors = ⊤ :=
dif_pos rfl
@[simp] theorem factors_mk (a : α) (h : a ≠ 0) :
(associates.mk a).factors = factors' a :=
by { classical, apply dif_neg, apply (mt mk_eq_zero.1 h) }
theorem prod_factors : ∀(s : factor_set α), s.prod.factors = s
| none := by simp [factor_set.prod]; refl
| (some s) :=
begin
unfold factor_set.prod,
generalize eq_a : (s.map coe).prod = a,
rcases a with ⟨a⟩,
rw quot_mk_eq_mk at *,
have : (s.map (coe : _ → associates α)).prod ≠ 0, from assume ha,
let ⟨⟨a, ha⟩, h, eq⟩ := multiset.mem_map.1 (prod_eq_zero_iff.1 ha) in
have irreducible (0 : associates α), from eq ▸ ha,
not_irreducible_zero ((irreducible_mk _).1 this),
have ha : a ≠ 0, by simp [*] at *,
suffices : (unique_factorization_monoid.factors a).map associates.mk = s.map coe,
{ rw [factors_mk a ha],
apply congr_arg some _,
simpa [(multiset.map_eq_map subtype.coe_injective).symm] },
refine unique'
(forall_map_mk_factors_irreducible _ ha)
(assume a ha, let ⟨⟨x, hx⟩, ha, eq⟩ := multiset.mem_map.1 ha in eq ▸ hx)
_,
rw [prod_mk, eq_a, mk_eq_mk_iff_associated],
exact factors_prod ha
end
@[simp]
theorem factors_prod (a : associates α) : a.factors.prod = a :=
quotient.induction_on a $ assume a, decidable.by_cases
(assume : associates.mk a = 0, by simp [quotient_mk_eq_mk, this])
(assume : associates.mk a ≠ 0,
have a ≠ 0, by simp * at *,
by simp [this, quotient_mk_eq_mk, prod_mk, mk_eq_mk_iff_associated.2 (factors_prod this)])
theorem eq_of_factors_eq_factors {a b : associates α} (h : a.factors = b.factors) : a = b :=
have a.factors.prod = b.factors.prod, by rw h,
by rwa [factors_prod, factors_prod] at this
omit dec dec'
theorem eq_of_prod_eq_prod {a b : factor_set α} (h : a.prod = b.prod) : a = b :=
begin
classical,
have : a.prod.factors = b.prod.factors, by rw h,
rwa [prod_factors, prod_factors] at this
end
include dec dec'
@[simp] theorem factors_mul (a b : associates α) : (a * b).factors = a.factors + b.factors :=
eq_of_prod_eq_prod $ eq_of_factors_eq_factors $
by rw [prod_add, factors_prod, factors_prod, factors_prod]
theorem factors_mono : ∀{a b : associates α}, a ≤ b → a.factors ≤ b.factors
| s t ⟨d, rfl⟩ := by rw [factors_mul] ; exact le_add_of_nonneg_right bot_le
theorem factors_le {a b : associates α} : a.factors ≤ b.factors ↔ a ≤ b :=
iff.intro
(assume h, have a.factors.prod ≤ b.factors.prod, from prod_mono h,
by rwa [factors_prod, factors_prod] at this)
factors_mono
omit dec dec'
theorem prod_le {a b : factor_set α} : a.prod ≤ b.prod ↔ a ≤ b :=
begin
classical,
exact iff.intro
(assume h, have a.prod.factors ≤ b.prod.factors, from factors_mono h,
by rwa [prod_factors, prod_factors] at this)
prod_mono
end
include dec dec'
noncomputable instance : has_sup (associates α) := ⟨λa b, (a.factors ⊔ b.factors).prod⟩
noncomputable instance : has_inf (associates α) := ⟨λa b, (a.factors ⊓ b.factors).prod⟩
noncomputable instance : bounded_lattice (associates α) :=
{ sup := (⊔),
inf := (⊓),
sup_le :=
assume a b c hac hbc, factors_prod c ▸ prod_mono (sup_le (factors_mono hac) (factors_mono hbc)),
le_sup_left := assume a b,
le_trans (le_of_eq (factors_prod a).symm) $ prod_mono $ le_sup_left,
le_sup_right := assume a b,
le_trans (le_of_eq (factors_prod b).symm) $ prod_mono $ le_sup_right,
le_inf :=
assume a b c hac hbc, factors_prod a ▸ prod_mono (le_inf (factors_mono hac) (factors_mono hbc)),
inf_le_left := assume a b,
le_trans (prod_mono inf_le_left) (le_of_eq (factors_prod a)),
inf_le_right := assume a b,
le_trans (prod_mono inf_le_right) (le_of_eq (factors_prod b)),
.. associates.partial_order,
.. associates.order_top,
.. associates.order_bot }
lemma sup_mul_inf (a b : associates α) : (a ⊔ b) * (a ⊓ b) = a * b :=
show (a.factors ⊔ b.factors).prod * (a.factors ⊓ b.factors).prod = a * b,
begin
refine eq_of_factors_eq_factors _,
rw [← prod_add, prod_factors, factors_mul, factor_set.sup_add_inf_eq_add]
end
include dec_irr
lemma dvd_of_mem_factors {a p : associates α} {hp : irreducible p}
(hm : p ∈ factors a) : p ∣ a :=
begin
by_cases ha0 : a = 0, { rw ha0, exact dvd_zero p },
obtain ⟨a0, nza, ha'⟩ := exists_non_zero_rep ha0,
rw [← associates.factors_prod a],
rw [← ha', factors_mk a0 nza] at hm ⊢,
erw prod_coe,
apply multiset.dvd_prod, apply multiset.mem_map.mpr,
exact ⟨⟨p, hp⟩, mem_factor_set_some.mp hm, rfl⟩
end
omit dec'
lemma dvd_of_mem_factors' {a : α} {p : associates α} {hp : irreducible p} {hz : a ≠ 0}
(h_mem : subtype.mk p hp ∈ factors' a) : p ∣ associates.mk a :=
by { haveI := classical.dec_eq (associates α),
apply @dvd_of_mem_factors _ _ _ _ _ _ _ _ _ _ hp,
rw factors_mk _ hz,
apply mem_factor_set_some.2 h_mem }
omit dec_irr
lemma mem_factors'_of_dvd {a p : α} (ha0 : a ≠ 0) (hp : irreducible p) (hd : p ∣ a) :
subtype.mk (associates.mk p) ((irreducible_mk _).2 hp) ∈ factors' a :=
begin
obtain ⟨q, hq, hpq⟩ := exists_mem_factors_of_dvd ha0 hp hd,
apply multiset.mem_pmap.mpr, use q, use hq,
exact subtype.eq (eq.symm (mk_eq_mk_iff_associated.mpr hpq))
end
include dec_irr
lemma mem_factors'_iff_dvd {a p : α} (ha0 : a ≠ 0) (hp : irreducible p) :
subtype.mk (associates.mk p) ((irreducible_mk _).2 hp) ∈ factors' a ↔ p ∣ a :=
begin
split,
{ rw ← mk_dvd_mk, apply dvd_of_mem_factors', apply ha0 },
{ apply mem_factors'_of_dvd ha0 }
end
include dec'
lemma mem_factors_of_dvd {a p : α} (ha0 : a ≠ 0) (hp : irreducible p) (hd : p ∣ a) :
(associates.mk p) ∈ factors (associates.mk a) :=
begin
rw factors_mk _ ha0, exact mem_factor_set_some.mpr (mem_factors'_of_dvd ha0 hp hd)
end
lemma mem_factors_iff_dvd {a p : α} (ha0 : a ≠ 0) (hp : irreducible p) :
(associates.mk p) ∈ factors (associates.mk a) ↔ p ∣ a :=
begin
split,
{ rw ← mk_dvd_mk, apply dvd_of_mem_factors, exact (irreducible_mk p).mpr hp },
{ apply mem_factors_of_dvd ha0 hp }
end
lemma exists_prime_dvd_of_not_inf_one {a b : α}
(ha : a ≠ 0) (hb : b ≠ 0) (h : (associates.mk a) ⊓ (associates.mk b) ≠ 1) :
∃ (p : α), prime p ∧ p ∣ a ∧ p ∣ b :=
begin
have hz : (factors (associates.mk a)) ⊓ (factors (associates.mk b)) ≠ 0,
{ contrapose! h with hf,
change ((factors (associates.mk a)) ⊓ (factors (associates.mk b))).prod = 1,
rw hf,
exact multiset.prod_zero },
rw [factors_mk a ha, factors_mk b hb, ← with_top.coe_inf] at hz,
obtain ⟨⟨p0, p0_irr⟩, p0_mem⟩ := multiset.exists_mem_of_ne_zero ((mt with_top.coe_eq_coe.mpr) hz),
rw multiset.inf_eq_inter at p0_mem,
obtain ⟨p, rfl⟩ : ∃ p, associates.mk p = p0 := quot.exists_rep p0,
refine ⟨p, _, _, _⟩,
{ rw [← irreducible_iff_prime, ← irreducible_mk],
exact p0_irr },
{ apply dvd_of_mk_le_mk,
apply dvd_of_mem_factors' (multiset.mem_inter.mp p0_mem).left,
apply ha, },
{ apply dvd_of_mk_le_mk,
apply dvd_of_mem_factors' (multiset.mem_inter.mp p0_mem).right,
apply hb }
end
theorem coprime_iff_inf_one {a b : α} (ha0 : a ≠ 0) (hb0 : b ≠ 0) :
(associates.mk a) ⊓ (associates.mk b) = 1 ↔ ∀ {d : α}, d ∣ a → d ∣ b → ¬ prime d :=
begin
split,
{ intros hg p ha hb hp,
refine ((associates.prime_mk _).mpr hp).not_unit (is_unit_of_dvd_one _ _),
rw ← hg,
exact le_inf (mk_le_mk_of_dvd ha) (mk_le_mk_of_dvd hb) },
{ contrapose,
intros hg hc,
obtain ⟨p, hp, hpa, hpb⟩ := exists_prime_dvd_of_not_inf_one ha0 hb0 hg,
exact hc hpa hpb hp }
end
omit dec_irr
theorem factors_prime_pow {p : associates α} (hp : irreducible p)
(k : ℕ) : factors (p ^ k) = some (multiset.repeat ⟨p, hp⟩ k) :=
eq_of_prod_eq_prod (by rw [associates.factors_prod, factor_set.prod, multiset.map_repeat,
multiset.prod_repeat, subtype.coe_mk])
include dec_irr
theorem prime_pow_dvd_iff_le {m p : associates α} (h₁ : m ≠ 0)
(h₂ : irreducible p) {k : ℕ} : p ^ k ≤ m ↔ k ≤ count p m.factors :=
begin
obtain ⟨a, nz, rfl⟩ := associates.exists_non_zero_rep h₁,
rw [factors_mk _ nz, ← with_top.some_eq_coe, count_some, multiset.le_count_iff_repeat_le,
← factors_le, factors_prime_pow h₂, factors_mk _ nz],
exact with_top.coe_le_coe
end
theorem le_of_count_ne_zero {m p : associates α} (h0 : m ≠ 0)
(hp : irreducible p) : count p m.factors ≠ 0 → p ≤ m :=
begin
rw [← pos_iff_ne_zero],
intro h,
rw [← pow_one p],
apply (prime_pow_dvd_iff_le h0 hp).2,
simpa only
end
theorem count_mul {a : associates α} (ha : a ≠ 0) {b : associates α} (hb : b ≠ 0)
{p : associates α} (hp : irreducible p) :
count p (factors (a * b)) = count p a.factors + count p b.factors :=
begin
obtain ⟨a0, nza, ha'⟩ := exists_non_zero_rep ha,
obtain ⟨b0, nzb, hb'⟩ := exists_non_zero_rep hb,
rw [factors_mul, ← ha', ← hb', factors_mk a0 nza, factors_mk b0 nzb, ← factor_set.coe_add,
← with_top.some_eq_coe, ← with_top.some_eq_coe, ← with_top.some_eq_coe, count_some hp,
multiset.count_add, count_some hp, count_some hp]
end
theorem count_of_coprime {a : associates α} (ha : a ≠ 0) {b : associates α} (hb : b ≠ 0)
(hab : ∀ d, d ∣ a → d ∣ b → ¬ prime d) {p : associates α} (hp : irreducible p) :
count p a.factors = 0 ∨ count p b.factors = 0 :=
begin
rw [or_iff_not_imp_left, ← ne.def],
intro hca,
contrapose! hab with hcb,
exact ⟨p, le_of_count_ne_zero ha hp hca, le_of_count_ne_zero hb hp hcb,
(irreducible_iff_prime.mp hp)⟩,
end
theorem count_mul_of_coprime {a : associates α} (ha : a ≠ 0) {b : associates α} (hb : b ≠ 0)
{p : associates α} (hp : irreducible p) (hab : ∀ d, d ∣ a → d ∣ b → ¬ prime d) :
count p a.factors = 0 ∨ count p a.factors = count p (a * b).factors :=
begin
cases count_of_coprime ha hb hab hp with hz hb0, { tauto },
apply or.intro_right,
rw [count_mul ha hb hp, hb0, add_zero]
end
theorem count_mul_of_coprime' {a : associates α} (ha : a ≠ 0) {b : associates α} (hb : b ≠ 0)
{p : associates α} (hp : irreducible p) (hab : ∀ d, d ∣ a → d ∣ b → ¬ prime d) :
count p (a * b).factors = count p a.factors
∨ count p (a * b).factors = count p b.factors :=
begin
rw [count_mul ha hb hp],
cases count_of_coprime ha hb hab hp with ha0 hb0,
{ apply or.intro_right, rw [ha0, zero_add] },
{ apply or.intro_left, rw [hb0, add_zero] }
end
theorem dvd_count_of_dvd_count_mul {a b : associates α} (ha : a ≠ 0) (hb : b ≠ 0)
{p : associates α} (hp : irreducible p) (hab : ∀ d, d ∣ a → d ∣ b → ¬ prime d)
{k : ℕ} (habk : k ∣ count p (a * b).factors) : k ∣ count p a.factors :=
begin
cases count_of_coprime ha hb hab hp with hz h,
{ rw hz, exact dvd_zero k },
{ rw [count_mul ha hb hp, h] at habk, exact habk }
end
omit dec_irr
@[simp] lemma factors_one : factors (1 : associates α) = 0 :=
begin
apply eq_of_prod_eq_prod,
rw associates.factors_prod,
exact multiset.prod_zero,
end
@[simp] theorem pow_factors {a : associates α} {k : ℕ} : (a ^ k).factors = k •ℕ a.factors :=
begin
induction k with n h,
{ rw [zero_nsmul, pow_zero], exact factors_one },
{ rw [pow_succ, succ_nsmul, factors_mul, h] }
end
include dec_irr
lemma count_pow {a : associates α} (ha : a ≠ 0) {p : associates α} (hp : irreducible p)
(k : ℕ) : count p (a ^ k).factors = k * count p a.factors :=
begin
induction k with n h,
{ rw [pow_zero, factors_one, zero_mul, count_zero hp] },
{ rw [pow_succ, count_mul ha (pow_ne_zero _ ha) hp, h, nat.succ_eq_add_one], ring }
end
theorem dvd_count_pow {a : associates α} (ha : a ≠ 0) {p : associates α} (hp : irreducible p)
(k : ℕ) : k ∣ count p (a ^ k).factors := by { rw count_pow ha hp, apply dvd_mul_right }
theorem is_pow_of_dvd_count {a : associates α} (ha : a ≠ 0) {k : ℕ}
(hk : ∀ (p : associates α) (hp : irreducible p), k ∣ count p a.factors) :
∃ (b : associates α), a = b ^ k :=
begin
obtain ⟨a0, hz, rfl⟩ := exists_non_zero_rep ha,
rw [factors_mk a0 hz] at hk,
have hk' : ∀ (p : {a : associates α // irreducible a}), k ∣ (factors' a0).count p,
{ intro p,
have pp : p = ⟨p.val, p.2⟩, { simp only [subtype.coe_eta, subtype.val_eq_coe] },
rw [pp, ← count_some p.2], exact hk p.val p.2 },
obtain ⟨u, hu⟩ := multiset.exists_smul_of_dvd_count _ hk',
use (u : factor_set α).prod,
apply eq_of_factors_eq_factors,
rw [pow_factors, prod_factors, factors_mk a0 hz, ← with_top.some_eq_coe, hu],
exact with_bot.coe_nsmul u k
end
omit dec
omit dec_irr
omit dec'
theorem eq_pow_of_mul_eq_pow {a b c : associates α} (ha : a ≠ 0) (hb : b ≠ 0)
(hab : ∀ d, d ∣ a → d ∣ b → ¬ prime d) {k : ℕ} (h : a * b = c ^ k) :
∃ (d : associates α), a = d ^ k :=
begin
classical,
by_cases hk0 : k = 0,
{ use 1,
rw [hk0, pow_zero] at h ⊢,
apply (mul_eq_one_iff.1 h).1 },
{ refine is_pow_of_dvd_count ha _,
intros p hp,
apply dvd_count_of_dvd_count_mul ha hb hp hab,
rw h,
apply dvd_count_pow _ hp,
rintros rfl,
rw zero_pow' _ hk0 at h,
cases mul_eq_zero.mp h; contradiction }
end
end associates
section
open associates unique_factorization_monoid
/-- `to_gcd_monoid` constructs a GCD monoid out of a normalization on a
unique factorization domain. -/
noncomputable def unique_factorization_monoid.to_gcd_monoid
(α : Type*) [comm_cancel_monoid_with_zero α] [nontrivial α] [unique_factorization_monoid α]
[normalization_monoid α] [decidable_eq (associates α)] [decidable_eq α] : gcd_monoid α :=
{ gcd := λa b, (associates.mk a ⊓ associates.mk b).out,
lcm := λa b, (associates.mk a ⊔ associates.mk b).out,
gcd_dvd_left := assume a b, (out_dvd_iff a (associates.mk a ⊓ associates.mk b)).2 $ inf_le_left,
gcd_dvd_right := assume a b, (out_dvd_iff b (associates.mk a ⊓ associates.mk b)).2 $ inf_le_right,
dvd_gcd := assume a b c hac hab, show a ∣ (associates.mk c ⊓ associates.mk b).out,
by rw [dvd_out_iff, le_inf_iff, mk_le_mk_iff_dvd_iff, mk_le_mk_iff_dvd_iff]; exact ⟨hac, hab⟩,
lcm_zero_left := assume a, show (⊤ ⊔ associates.mk a).out = 0, by simp,
lcm_zero_right := assume a, show (associates.mk a ⊔ ⊤).out = 0, by simp,
gcd_mul_lcm := assume a b,
show (associates.mk a ⊓ associates.mk b).out * (associates.mk a ⊔ associates.mk b).out =
normalize (a * b),
by rw [← out_mk, ← out_mul, mul_comm, sup_mul_inf]; refl,
normalize_gcd := assume a b, by convert normalize_out _,
.. ‹normalization_monoid α› }
end
namespace unique_factorization_monoid
variables {R : Type*} [comm_cancel_monoid_with_zero R] [unique_factorization_monoid R]
lemma is_unit_of_no_prime_factor {a : R} :
a ≠ 0 → (∀ {p}, prime p → ¬ p ∣ a) → is_unit a :=
induction_on_prime a (absurd rfl) (λ a ha _ _, ha)
(λ a p _ hp ih _ no_factor, absurd (dvd_mul_right p a) (no_factor hp))
/-- In a unique factorization monoid, there are finitely many divisors of `x`, up to units. -/
noncomputable def finite_divisors
{x : R} (x_ne : x ≠ 0) : fintype {y : associates R // y ∣ associates.mk x} :=
begin
haveI : nontrivial R := ⟨⟨x, 0, x_ne⟩⟩,
haveI := @unique_factorization_monoid.normalization_monoid R _ _ _,
haveI := classical.dec_eq R,
haveI := classical.dec_eq (associates R),
set divisors := ((factors x).powerset.map (λ (s : multiset R), associates.mk s.prod)).to_finset,
apply fintype.of_finset divisors,
intro y,
obtain ⟨y, rfl⟩ := associates.mk_surjective y,
simp only [exists_prop, multiset.mem_to_finset, multiset.mem_powerset, exists_eq_right,
multiset.mem_map, associates.mk_eq_mk_iff_associated],
split,
{ rintros ⟨s, hs, hy⟩,
apply associates.mk_dvd_mk.mpr,
have prod_s_ne : s.prod ≠ 0,
{ intro hz,
apply x_ne (eq_zero_of_zero_dvd _),
rw multiset.prod_eq_zero_iff at hz,
rw ← dvd_iff_dvd_of_rel_right (factors_prod x_ne),
exact multiset.dvd_prod (multiset.mem_of_le hs hz) },
have y_ne : y ≠ 0,
{ intro y_eq,
rw [y_eq, associated_zero_iff_eq_zero] at hy,
exact prod_s_ne hy },
rw [← dvd_iff_dvd_of_rel_left hy, ← dvd_iff_dvd_of_rel_right (factors_prod x_ne)],
exact multiset.prod_dvd_prod hs },
{ rintro (h : associates.mk y ∣ associates.mk x),
rw associates.mk_dvd_mk at h,
have hy : y ≠ 0, { refine mt (λ hy, _) x_ne, rwa [hy, zero_dvd_iff] at h },
refine ⟨factors y, _, factors_prod hy⟩,
exact (dvd_iff_factors_le_factors hy x_ne).mp h },
end
end unique_factorization_monoid
|
5eff7a626f65534b1fd0ec4d3b3bacacfdeaa9c3
|
a0e23cfdd129a671bf3154ee1a8a3a72bf4c7940
|
/src/Init/System/ST.lean
|
2ddda42a140b6a9f606a6fe5130f3d0f6ad5abb4
|
[
"Apache-2.0"
] |
permissive
|
WojciechKarpiel/lean4
|
7f89706b8e3c1f942b83a2c91a3a00b05da0e65b
|
f6e1314fa08293dea66a329e05b6c196a0189163
|
refs/heads/master
| 1,686,633,402,214
| 1,625,821,189,000
| 1,625,821,258,000
| 384,640,886
| 0
| 0
|
Apache-2.0
| 1,625,903,617,000
| 1,625,903,026,000
| null |
UTF-8
|
Lean
| false
| false
| 4,411
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Classical
import Init.Control.EState
import Init.Control.Reader
def EST (ε : Type) (σ : Type) : Type → Type := EStateM ε σ
abbrev ST (σ : Type) := EST Empty σ
instance (ε σ : Type) : Monad (EST ε σ) := inferInstanceAs (Monad (EStateM _ _))
instance (ε σ : Type) : MonadExceptOf ε (EST ε σ) := inferInstanceAs (MonadExceptOf ε (EStateM _ _))
instance {ε σ : Type} {α : Type} [Inhabited ε] : Inhabited (EST ε σ α) := inferInstanceAs (Inhabited (EStateM _ _ _))
instance (σ : Type) : Monad (ST σ) := inferInstanceAs (Monad (EST _ _))
-- Auxiliary class for inferring the "state" of `EST` and `ST` monads
class STWorld (σ : outParam Type) (m : Type → Type)
instance {σ m n} [MonadLift m n] [STWorld σ m] : STWorld σ n := ⟨⟩
instance {ε σ} : STWorld σ (EST ε σ) := ⟨⟩
@[noinline, nospecialize]
def runEST {ε α : Type} (x : forall (σ : Type), EST ε σ α) : Except ε α :=
match x Unit () with
| EStateM.Result.ok a _ => Except.ok a
| EStateM.Result.error ex _ => Except.error ex
@[noinline, nospecialize]
def runST {α : Type} (x : forall (σ : Type), ST σ α) : α :=
match x Unit () with
| EStateM.Result.ok a _ => a
| EStateM.Result.error ex _ => nomatch ex
instance {ε σ} : MonadLift (ST σ) (EST ε σ) := ⟨fun x s =>
match x s with
| EStateM.Result.ok a s => EStateM.Result.ok a s
| EStateM.Result.error ex _ => nomatch ex⟩
namespace ST
/- References -/
constant RefPointed : PointedType.{0}
structure Ref (σ : Type) (α : Type) : Type where
ref : RefPointed.type
h : Nonempty α
instance {σ α} [Inhabited α] : Inhabited (Ref σ α) where
default := { ref := RefPointed.val, h := Nonempty.intro arbitrary }
namespace Prim
/- Auxiliary definition for showing that `ST σ α` is inhabited when we have a `Ref σ α` -/
private noncomputable def inhabitedFromRef {σ α} (r : Ref σ α) : ST σ α :=
let inh : Inhabited α := Classical.inhabitedOfNonempty r.h
pure arbitrary
@[extern "lean_st_mk_ref"]
constant mkRef {σ α} (a : α) : ST σ (Ref σ α) := pure { ref := RefPointed.val, h := Nonempty.intro a }
@[extern "lean_st_ref_get"]
constant Ref.get {σ α} (r : @& Ref σ α) : ST σ α := inhabitedFromRef r
@[extern "lean_st_ref_set"]
constant Ref.set {σ α} (r : @& Ref σ α) (a : α) : ST σ Unit
@[extern "lean_st_ref_swap"]
constant Ref.swap {σ α} (r : @& Ref σ α) (a : α) : ST σ α := inhabitedFromRef r
@[extern "lean_st_ref_take"]
unsafe constant Ref.take {σ α} (r : @& Ref σ α) : ST σ α := inhabitedFromRef r
@[extern "lean_st_ref_ptr_eq"]
constant Ref.ptrEq {σ α} (r1 r2 : @& Ref σ α) : ST σ Bool
@[inline] unsafe def Ref.modifyUnsafe {σ α : Type} (r : Ref σ α) (f : α → α) : ST σ Unit := do
let v ← Ref.take r
Ref.set r (f v)
@[inline] unsafe def Ref.modifyGetUnsafe {σ α β : Type} (r : Ref σ α) (f : α → β × α) : ST σ β := do
let v ← Ref.take r
let (b, a) := f v
Ref.set r a
pure b
@[implementedBy Ref.modifyUnsafe]
def Ref.modify {σ α : Type} (r : Ref σ α) (f : α → α) : ST σ Unit := do
let v ← Ref.get r
Ref.set r (f v)
@[implementedBy Ref.modifyGetUnsafe]
def Ref.modifyGet {σ α β : Type} (r : Ref σ α) (f : α → β × α) : ST σ β := do
let v ← Ref.get r
let (b, a) := f v
Ref.set r a
pure b
end Prim
section
variable {σ : Type} {m : Type → Type} [Monad m] [MonadLiftT (ST σ) m]
@[inline] def mkRef {α : Type} (a : α) : m (Ref σ α) := liftM <| Prim.mkRef a
@[inline] def Ref.get {α : Type} (r : Ref σ α) : m α := liftM <| Prim.Ref.get r
@[inline] def Ref.set {α : Type} (r : Ref σ α) (a : α) : m Unit := liftM <| Prim.Ref.set r a
@[inline] def Ref.swap {α : Type} (r : Ref σ α) (a : α) : m α := liftM <| Prim.Ref.swap r a
@[inline] unsafe def Ref.take {α : Type} (r : Ref σ α) : m α := liftM <| Prim.Ref.take r
@[inline] def Ref.ptrEq {α : Type} (r1 r2 : Ref σ α) : m Bool := liftM <| Prim.Ref.ptrEq r1 r2
@[inline] def Ref.modify {α : Type} (r : Ref σ α) (f : α → α) : m Unit := liftM <| Prim.Ref.modify r f
@[inline] def Ref.modifyGet {α : Type} {β : Type} (r : Ref σ α) (f : α → β × α) : m β := liftM <| Prim.Ref.modifyGet r f
end
end ST
|
b48659277a38dc1cabadfcc10c24e2419810648d
|
fa02ed5a3c9c0adee3c26887a16855e7841c668b
|
/src/topology/continuous_function/stone_weierstrass.lean
|
4cf2a4bfddc09827e6a7eadd836c54c942162062
|
[
"Apache-2.0"
] |
permissive
|
jjgarzella/mathlib
|
96a345378c4e0bf26cf604aed84f90329e4896a2
|
395d8716c3ad03747059d482090e2bb97db612c8
|
refs/heads/master
| 1,686,480,124,379
| 1,625,163,323,000
| 1,625,163,323,000
| 281,190,421
| 2
| 0
|
Apache-2.0
| 1,595,268,170,000
| 1,595,268,169,000
| null |
UTF-8
|
Lean
| false
| false
| 19,745
|
lean
|
/-
Copyright (c) 2021 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Heather Macbeth
-/
import topology.continuous_function.weierstrass
import analysis.complex.basic
/-!
# The Stone-Weierstrass theorem
If a subalgebra `A` of `C(X, ℝ)`, where `X` is a compact topological space,
separates points, then it is dense.
We argue as follows.
* In any subalgebra `A` of `C(X, ℝ)`, if `f ∈ A`, then `abs f ∈ A.topological_closure`.
This follows from the Weierstrass approximation theorem on `[-∥f∥, ∥f∥]` by
approximating `abs` uniformly thereon by polynomials.
* This ensures that `A.topological_closure` is actually a sublattice:
if it contains `f` and `g`, then it contains the pointwise supremum `f ⊔ g`
and the pointwise infimum `f ⊓ g`.
* Any nonempty sublattice `L` of `C(X, ℝ)` which separates points is dense,
by a nice argument approximating a given `f` above and below using separating functions.
For each `x y : X`, we pick a function `g x y ∈ L` so `g x y x = f x` and `g x y y = f y`.
By continuity these functions remain close to `f` on small patches around `x` and `y`.
We use compactness to identify a certain finitely indexed infimum of finitely indexed supremums
which is then close to `f` everywhere, obtaining the desired approximation.
* Finally we put these pieces together. `L = A.topological_closure` is a nonempty sublattice
which separates points since `A` does, and so is dense (in fact equal to `⊤`).
We then prove the complex version for self-adjoint subalgebras `A`, by separately approximating
the real and imaginary parts using the real subalgebra of real-valued functions in `A`
(which still separates points, by taking the norm-square of a separating function).
## Future work
Extend to cover the case of subalgebras of the continuous functions vanishing at infinity,
on non-compact spaces.
-/
noncomputable theory
namespace continuous_map
variables {X : Type*} [topological_space X] [compact_space X]
/--
Turn a function `f : C(X, ℝ)` into a continuous map into `set.Icc (-∥f∥) (∥f∥)`,
thereby explicitly attaching bounds.
-/
def attach_bound (f : C(X, ℝ)) : C(X, set.Icc (-∥f∥) (∥f∥)) :=
{ to_fun := λ x, ⟨f x, ⟨neg_norm_le_apply f x, apply_le_norm f x⟩⟩ }
@[simp] lemma attach_bound_apply_coe (f : C(X, ℝ)) (x : X) : ((attach_bound f) x : ℝ) = f x := rfl
lemma polynomial_comp_attach_bound (A : subalgebra ℝ C(X, ℝ)) (f : A) (g : polynomial ℝ) :
(g.to_continuous_map_on (set.Icc (-∥f∥) ∥f∥)).comp (f : C(X, ℝ)).attach_bound =
polynomial.aeval f g :=
begin
ext,
simp only [continuous_map.comp_coe, function.comp_app,
continuous_map.attach_bound_apply_coe,
polynomial.to_continuous_map_on_to_fun,
polynomial.aeval_subalgebra_coe,
polynomial.aeval_continuous_map_apply,
polynomial.to_continuous_map_to_fun],
end
/--
Given a continuous function `f` in a subalgebra of `C(X, ℝ)`, postcomposing by a polynomial
gives another function in `A`.
This lemma proves something slightly more subtle than this:
we take `f`, and think of it as a function into the restricted target `set.Icc (-∥f∥) ∥f∥)`,
and then postcompose with a polynomial function on that interval.
This is in fact the same situation as above, and so also gives a function in `A`.
-/
lemma polynomial_comp_attach_bound_mem (A : subalgebra ℝ C(X, ℝ)) (f : A) (g : polynomial ℝ) :
(g.to_continuous_map_on (set.Icc (-∥f∥) ∥f∥)).comp (f : C(X, ℝ)).attach_bound ∈ A :=
begin
rw polynomial_comp_attach_bound,
apply set_like.coe_mem,
end
theorem comp_attach_bound_mem_closure
(A : subalgebra ℝ C(X, ℝ)) (f : A) (p : C(set.Icc (-∥f∥) (∥f∥), ℝ)) :
p.comp (attach_bound f) ∈ A.topological_closure :=
begin
-- `p` itself is in the closure of polynomials, by the Weierstrass theorem,
have mem_closure : p ∈ (polynomial_functions (set.Icc (-∥f∥) (∥f∥))).topological_closure :=
continuous_map_mem_polynomial_functions_closure _ _ p,
-- and so there are polynomials arbitrarily close.
have frequently_mem_polynomials := mem_closure_iff_frequently.mp mem_closure,
-- To prove `p.comp (attached_bound f)` is in the closure of `A`,
-- we show there are elements of `A` arbitrarily close.
apply mem_closure_iff_frequently.mpr,
-- To show that, we pull back the polynomials close to `p`,
refine ((comp_right_continuous_map ℝ (attach_bound (f : C(X, ℝ)))).continuous_at p).tendsto
.frequently_map _ _ frequently_mem_polynomials,
-- but need to show that those pullbacks are actually in `A`.
rintros _ ⟨g, ⟨-,rfl⟩⟩,
simp only [set_like.mem_coe, alg_hom.coe_to_ring_hom, comp_right_continuous_map_apply,
polynomial.to_continuous_map_on_alg_hom_apply],
apply polynomial_comp_attach_bound_mem,
end
theorem abs_mem_subalgebra_closure (A : subalgebra ℝ C(X, ℝ)) (f : A) :
(f : C(X, ℝ)).abs ∈ A.topological_closure :=
begin
let M := ∥f∥,
let f' := attach_bound (f : C(X, ℝ)),
let abs : C(set.Icc (-∥f∥) (∥f∥), ℝ) :=
{ to_fun := λ x : set.Icc (-∥f∥) (∥f∥), _root_.abs (x : ℝ) },
change (abs.comp f') ∈ A.topological_closure,
apply comp_attach_bound_mem_closure,
end
theorem inf_mem_subalgebra_closure (A : subalgebra ℝ C(X, ℝ)) (f g : A) :
(f : C(X, ℝ)) ⊓ (g : C(X, ℝ)) ∈ A.topological_closure :=
begin
rw inf_eq,
refine A.topological_closure.smul_mem
(A.topological_closure.sub_mem
(A.topological_closure.add_mem (A.subalgebra_topological_closure f.property)
(A.subalgebra_topological_closure g.property)) _) _,
exact_mod_cast abs_mem_subalgebra_closure A _,
end
theorem inf_mem_closed_subalgebra (A : subalgebra ℝ C(X, ℝ)) (h : is_closed (A : set C(X, ℝ)))
(f g : A) : (f : C(X, ℝ)) ⊓ (g : C(X, ℝ)) ∈ A :=
begin
convert inf_mem_subalgebra_closure A f g,
apply set_like.ext',
symmetry,
erw closure_eq_iff_is_closed,
exact h,
end
theorem sup_mem_subalgebra_closure (A : subalgebra ℝ C(X, ℝ)) (f g : A) :
(f : C(X, ℝ)) ⊔ (g : C(X, ℝ)) ∈ A.topological_closure :=
begin
rw sup_eq,
refine A.topological_closure.smul_mem
(A.topological_closure.add_mem
(A.topological_closure.add_mem (A.subalgebra_topological_closure f.property)
(A.subalgebra_topological_closure g.property)) _) _,
exact_mod_cast abs_mem_subalgebra_closure A _,
end
theorem sup_mem_closed_subalgebra (A : subalgebra ℝ C(X, ℝ)) (h : is_closed (A : set C(X, ℝ)))
(f g : A) : (f : C(X, ℝ)) ⊔ (g : C(X, ℝ)) ∈ A :=
begin
convert sup_mem_subalgebra_closure A f g,
apply set_like.ext',
symmetry,
erw closure_eq_iff_is_closed,
exact h,
end
open_locale topological_space
-- Here's the fun part of Stone-Weierstrass!
theorem sublattice_closure_eq_top
(L : set C(X, ℝ)) (nA : L.nonempty)
(inf_mem : ∀ f g ∈ L, f ⊓ g ∈ L) (sup_mem : ∀ f g ∈ L, f ⊔ g ∈ L)
(sep : L.separates_points_strongly) :
closure L = ⊤ :=
begin
-- We start by boiling down to a statement about close approximation.
apply eq_top_iff.mpr,
rintros f -,
refine filter.frequently.mem_closure
((filter.has_basis.frequently_iff metric.nhds_basis_ball).mpr (λ ε pos, _)),
simp only [exists_prop, metric.mem_ball],
-- It will be helpful to assume `X` is nonempty later,
-- so we get that out of the way here.
by_cases nX : nonempty X,
swap,
exact ⟨nA.some, (dist_lt_iff _ _ pos).mpr (λ x, false.elim (nX ⟨x⟩)), nA.some_spec⟩,
/-
The strategy now is to pick a family of continuous functions `g x y` in `A`
with the property that `g x y x = f x` and `g x y y = f y`
(this is immediate from `h : separates_points_strongly`)
then use continuity to see that `g x y` is close to `f` near both `x` and `y`,
and finally using compactness to produce the desired function `h`
as a maximum over finitely many `x` of a minimum over finitely many `y` of the `g x y`.
-/
dsimp [set.separates_points_strongly] at sep,
let g : X → X → L := λ x y, (sep f x y).some,
have w₁ : ∀ x y, g x y x = f x := λ x y, (sep f x y).some_spec.1,
have w₂ : ∀ x y, g x y y = f y := λ x y, (sep f x y).some_spec.2,
-- For each `x y`, we define `U x y` to be `{z | f z - ε < g x y z}`,
-- and observe this is a neighbourhood of `y`.
let U : X → X → set X := λ x y, {z | f z - ε < g x y z},
have U_nhd_y : ∀ x y, U x y ∈ 𝓝 y,
{ intros x y,
refine is_open.mem_nhds _ _,
{ apply is_open_lt; continuity, },
{ rw [set.mem_set_of_eq, w₂],
exact sub_lt_self _ pos, }, },
-- Fixing `x` for a moment, we have a family of functions `λ y, g x y`
-- which on different patches (the `U x y`) are greater than `f z - ε`.
-- Taking the supremum of these functions
-- indexed by a finite collection of patches which cover `X`
-- will give us an element of `A` that is globally greater than `f z - ε`
-- and still equal to `f x` at `x`.
-- Since `X` is compact, for every `x` there is some finset `ys t`
-- so the union of the `U x y` for `y ∈ ys x` still covers everything.
let ys : Π x, finset X := λ x, (compact_space.elim_nhds_subcover (U x) (U_nhd_y x)).some,
let ys_w : ∀ x, (⋃ y ∈ ys x, U x y) = ⊤ :=
λ x, (compact_space.elim_nhds_subcover (U x) (U_nhd_y x)).some_spec,
have ys_nonempty : ∀ x, (ys x).nonempty :=
λ x, set.nonempty_of_union_eq_top_of_nonempty _ _ nX (ys_w x),
-- Thus for each `x` we have the desired `h x : A` so `f z - ε < h x z` everywhere
-- and `h x x = f x`.
let h : Π x, L := λ x,
⟨(ys x).sup' (ys_nonempty x) (λ y, (g x y : C(X, ℝ))),
finset.sup'_mem _ sup_mem _ _ _ (λ y _, (g x y).2)⟩,
have lt_h : ∀ x z, f z - ε < h x z,
{ intros x z,
obtain ⟨y, ym, zm⟩ := set.exists_set_mem_of_union_eq_top _ _ (ys_w x) z,
dsimp [h],
simp only [finset.lt_sup'_iff, continuous_map.sup'_apply],
exact ⟨y, ym, zm⟩, },
have h_eq : ∀ x, h x x = f x, by { intro x, simp only [coe_fn_coe_base] at w₁, simp [w₁], },
-- For each `x`, we define `W x` to be `{z | h x z < f z + ε}`,
let W : Π x, set X := λ x, {z | h x z < f z + ε},
-- This is still a neighbourhood of `x`.
have W_nhd : ∀ x, W x ∈ 𝓝 x,
{ intros x,
refine is_open.mem_nhds _ _,
{ apply is_open_lt; continuity, },
{ dsimp only [W, set.mem_set_of_eq],
rw h_eq,
exact lt_add_of_pos_right _ pos}, },
-- Since `X` is compact, there is some finset `ys t`
-- so the union of the `W x` for `x ∈ xs` still covers everything.
let xs : finset X := (compact_space.elim_nhds_subcover W W_nhd).some,
let xs_w : (⋃ x ∈ xs, W x) = ⊤ :=
(compact_space.elim_nhds_subcover W W_nhd).some_spec,
have xs_nonempty : xs.nonempty := set.nonempty_of_union_eq_top_of_nonempty _ _ nX xs_w,
-- Finally our candidate function is the infimum over `x ∈ xs` of the `h x`.
-- This function is then globally less than `f z + ε`.
let k : (L : Type*) :=
⟨xs.inf' xs_nonempty (λ x, (h x : C(X, ℝ))),
finset.inf'_mem _ inf_mem _ _ _ (λ x _, (h x).2)⟩,
refine ⟨k.1, _, k.2⟩,
-- We just need to verify the bound, which we do pointwise.
rw dist_lt_iff _ _ pos,
intro z,
-- We rewrite into this particular form,
-- so that simp lemmas about inequalities involving `finset.inf'` can fire.
rw [(show ∀ a b ε : ℝ, dist a b < ε ↔ a < b + ε ∧ b - ε < a,
by { intros, simp only [← metric.mem_ball, real.ball_eq_Ioo, set.mem_Ioo, and_comm], })],
fsplit,
{ dsimp [k],
simp only [finset.inf'_lt_iff, continuous_map.inf'_apply],
exact set.exists_set_mem_of_union_eq_top _ _ xs_w z, },
{ dsimp [k],
simp only [finset.lt_inf'_iff, continuous_map.inf'_apply],
intros x xm,
apply lt_h, },
end
/--
The Stone-Weierstrass approximation theorem,
that a subalgebra `A` of `C(X, ℝ)`, where `X` is a compact topological space,
is dense if it separates points.
-/
theorem subalgebra_topological_closure_eq_top_of_separates_points
(A : subalgebra ℝ C(X, ℝ)) (w : A.separates_points) :
A.topological_closure = ⊤ :=
begin
-- The closure of `A` is closed under taking `sup` and `inf`,
-- and separates points strongly (since `A` does),
-- so we can apply `sublattice_closure_eq_top`.
apply set_like.ext',
let L := A.topological_closure,
have n : set.nonempty (L : set C(X, ℝ)) :=
⟨(1 : C(X, ℝ)), A.subalgebra_topological_closure A.one_mem⟩,
convert sublattice_closure_eq_top
(L : set C(X, ℝ)) n
(λ f g fm gm, inf_mem_closed_subalgebra L A.is_closed_topological_closure ⟨f, fm⟩ ⟨g, gm⟩)
(λ f g fm gm, sup_mem_closed_subalgebra L A.is_closed_topological_closure ⟨f, fm⟩ ⟨g, gm⟩)
(subalgebra.separates_points.strongly
(subalgebra.separates_points_monotone (A.subalgebra_topological_closure) w)),
{ simp, },
end
/--
An alternative statement of the Stone-Weierstrass theorem.
If `A` is a subalgebra of `C(X, ℝ)` which separates points (and `X` is compact),
every real-valued continuous function on `X` is a uniform limit of elements of `A`.
-/
theorem continuous_map_mem_subalgebra_closure_of_separates_points
(A : subalgebra ℝ C(X, ℝ)) (w : A.separates_points)
(f : C(X, ℝ)) :
f ∈ A.topological_closure :=
begin
rw subalgebra_topological_closure_eq_top_of_separates_points A w,
simp,
end
/--
An alternative statement of the Stone-Weierstrass theorem,
for those who like their epsilons.
If `A` is a subalgebra of `C(X, ℝ)` which separates points (and `X` is compact),
every real-valued continuous function on `X` is within any `ε > 0` of some element of `A`.
-/
theorem exists_mem_subalgebra_near_continuous_map_of_separates_points
(A : subalgebra ℝ C(X, ℝ)) (w : A.separates_points)
(f : C(X, ℝ)) (ε : ℝ) (pos : 0 < ε) :
∃ (g : A), ∥(g : C(X, ℝ)) - f∥ < ε :=
begin
have w := mem_closure_iff_frequently.mp
(continuous_map_mem_subalgebra_closure_of_separates_points A w f),
rw metric.nhds_basis_ball.frequently_iff at w,
obtain ⟨g, H, m⟩ := w ε pos,
rw [metric.mem_ball, dist_eq_norm] at H,
exact ⟨⟨g, m⟩, H⟩,
end
/--
An alternative statement of the Stone-Weierstrass theorem,
for those who like their epsilons and don't like bundled continuous functions.
If `A` is a subalgebra of `C(X, ℝ)` which separates points (and `X` is compact),
every real-valued continuous function on `X` is within any `ε > 0` of some element of `A`.
-/
theorem exists_mem_subalgebra_near_continuous_of_separates_points
(A : subalgebra ℝ C(X, ℝ)) (w : A.separates_points)
(f : X → ℝ) (c : continuous f) (ε : ℝ) (pos : 0 < ε) :
∃ (g : A), ∀ x, ∥g x - f x∥ < ε :=
begin
obtain ⟨g, b⟩ := exists_mem_subalgebra_near_continuous_map_of_separates_points A w ⟨f, c⟩ ε pos,
use g,
rwa norm_lt_iff _ pos at b,
end
end continuous_map
section complex
open complex
-- Redefine `X`, since for the next few lemmas it need not be compact
variables {X : Type*} [topological_space X]
namespace continuous_map
/-- A real subalgebra of `C(X, ℂ)` is `conj_invariant`, if it contains all its conjugates. -/
def conj_invariant_subalgebra (A : subalgebra ℝ C(X, ℂ)) : Prop :=
A.map (conj_ae.to_alg_hom.comp_left_continuous ℝ conj_cle.continuous) ≤ A
lemma mem_conj_invariant_subalgebra {A : subalgebra ℝ C(X, ℂ)} (hA : conj_invariant_subalgebra A)
{f : C(X, ℂ)} (hf : f ∈ A) :
(conj_ae.to_alg_hom.comp_left_continuous ℝ conj_cle.continuous) f ∈ A :=
hA ⟨f, hf, rfl⟩
end continuous_map
open continuous_map
/-- If a conjugation-invariant subalgebra of `C(X, ℂ)` separates points, then the real subalgebra
of its purely real-valued elements also separates points. -/
lemma subalgebra.separates_points.complex_to_real {A : subalgebra ℂ C(X, ℂ)}
(hA : A.separates_points) (hA' : conj_invariant_subalgebra (A.restrict_scalars ℝ)) :
((A.restrict_scalars ℝ).comap'
(of_real_am.comp_left_continuous ℝ continuous_of_real)).separates_points :=
begin
intros x₁ x₂ hx,
-- Let `f` in the subalgebra `A` separate the points `x₁`, `x₂`
obtain ⟨_, ⟨f, hfA, rfl⟩, hf⟩ := hA hx,
let F : C(X, ℂ) := f - const (f x₂),
-- Subtract the constant `f x₂` from `f`; this is still an element of the subalgebra
have hFA : F ∈ A,
{ refine A.sub_mem hfA _,
convert A.smul_mem A.one_mem (f x₂),
ext1,
simp },
-- Consider now the function `λ x, |f x - f x₂| ^ 2`
refine ⟨_, ⟨(⟨complex.norm_sq, continuous_norm_sq⟩ : C(ℂ, ℝ)).comp F, _, rfl⟩, _⟩,
{ -- This is also an element of the subalgebra, and takes only real values
rw [set_like.mem_coe, subalgebra.mem_comap],
convert (A.restrict_scalars ℝ).mul_mem (mem_conj_invariant_subalgebra hA' hFA) hFA,
ext1,
exact complex.norm_sq_eq_conj_mul_self },
{ -- And it also separates the points `x₁`, `x₂`
have : f x₁ - f x₂ ≠ 0 := sub_ne_zero.mpr hf,
simpa using this },
end
variables [compact_space X]
/--
The Stone-Weierstrass approximation theorem, complex version,
that a subalgebra `A` of `C(X, ℂ)`, where `X` is a compact topological space,
is dense if it is conjugation-invariant and separates points.
-/
theorem continuous_map.subalgebra_complex_topological_closure_eq_top_of_separates_points
(A : subalgebra ℂ C(X, ℂ)) (hA : A.separates_points)
(hA' : conj_invariant_subalgebra (A.restrict_scalars ℝ)) :
A.topological_closure = ⊤ :=
begin
rw algebra.eq_top_iff,
-- Let `I` be the natural inclusion of `C(X, ℝ)` into `C(X, ℂ)`
let I : C(X, ℝ) →ₗ[ℝ] C(X, ℂ) := of_real_clm.comp_left_continuous ℝ X,
-- The main point of the proof is that its range (i.e., every real-valued function) is contained
-- in the closure of `A`
have key : I.range ≤ (A.to_submodule.restrict_scalars ℝ).topological_closure,
{ -- Let `A₀` be the subalgebra of `C(X, ℝ)` consisting of `A`'s purely real elements; it is the
-- preimage of `A` under `I`. In this argument we only need its submodule structure.
let A₀ : submodule ℝ C(X, ℝ) := (A.to_submodule.restrict_scalars ℝ).comap I,
-- By `subalgebra.separates_points.complex_to_real`, this subalgebra also separates points, so
-- we may apply the real Stone-Weierstrass result to it.
have SW : A₀.topological_closure = ⊤,
{ have := subalgebra_topological_closure_eq_top_of_separates_points _ (hA.complex_to_real hA'),
exact congr_arg subalgebra.to_submodule this },
rw [← submodule.map_top, ← SW],
-- So it suffices to prove that the image under `I` of the closure of `A₀` is contained in the
-- closure of `A`, which follows by abstract nonsense
have h₁ := A₀.topological_closure_map (of_real_clm.comp_left_continuous_compact X),
have h₂ := (A.to_submodule.restrict_scalars ℝ).map_comap_le I,
exact h₁.trans (submodule.topological_closure_mono h₂) },
-- In particular, for a function `f` in `C(X, ℂ)`, the real and imaginary parts of `f` are in the
-- closure of `A`
intros f,
let f_re : C(X, ℝ) := (⟨complex.re, complex.re_clm.continuous⟩ : C(ℂ, ℝ)).comp f,
let f_im : C(X, ℝ) := (⟨complex.im, complex.im_clm.continuous⟩ : C(ℂ, ℝ)).comp f,
have h_f_re : I f_re ∈ A.topological_closure := key ⟨f_re, rfl⟩,
have h_f_im : I f_im ∈ A.topological_closure := key ⟨f_im, rfl⟩,
-- So `f_re + complex.I • f_im` is in the closure of `A`
convert A.topological_closure.add_mem h_f_re (A.topological_closure.smul_mem h_f_im complex.I),
-- And this, of course, is just `f`
ext; simp [I]
end
end complex
|
c47c2b8e1283ddbc2e86bd23abab25300b0ae73e
|
592ee40978ac7604005a4e0d35bbc4b467389241
|
/Library/generated/mathscheme-lean/LeftCancellativeOp.lean
|
79e850aa61fa42c7d066f27d0b4d0d960c4cd080
|
[] |
no_license
|
ysharoda/Deriving-Definitions
|
3e149e6641fae440badd35ac110a0bd705a49ad2
|
dfecb27572022de3d4aa702cae8db19957523a59
|
refs/heads/master
| 1,679,127,857,700
| 1,615,939,007,000
| 1,615,939,007,000
| 229,785,731
| 4
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,002
|
lean
|
import init.data.nat.basic
import init.data.fin.basic
import data.vector
import .Prelude
open Staged
open nat
open fin
open vector
section LeftCancellativeOp
structure LeftCancellativeOp (A : Type) : Type :=
(op : (A → (A → A)))
(linv : (A → (A → A)))
(lefCancelOp : (∀ {x y : A} , (linv x (op x y)) = y))
open LeftCancellativeOp
structure Sig (AS : Type) : Type :=
(opS : (AS → (AS → AS)))
(linvS : (AS → (AS → AS)))
structure Product (A : Type) : Type :=
(opP : ((Prod A A) → ((Prod A A) → (Prod A A))))
(linvP : ((Prod A A) → ((Prod A A) → (Prod A A))))
(lefCancelOpP : (∀ {xP yP : (Prod A A)} , (linvP xP (opP xP yP)) = yP))
structure Hom {A1 : Type} {A2 : Type} (Le1 : (LeftCancellativeOp A1)) (Le2 : (LeftCancellativeOp A2)) : Type :=
(hom : (A1 → A2))
(pres_op : (∀ {x1 x2 : A1} , (hom ((op Le1) x1 x2)) = ((op Le2) (hom x1) (hom x2))))
(pres_linv : (∀ {x1 x2 : A1} , (hom ((linv Le1) x1 x2)) = ((linv Le2) (hom x1) (hom x2))))
structure RelInterp {A1 : Type} {A2 : Type} (Le1 : (LeftCancellativeOp A1)) (Le2 : (LeftCancellativeOp A2)) : Type 1 :=
(interp : (A1 → (A2 → Type)))
(interp_op : (∀ {x1 x2 : A1} {y1 y2 : A2} , ((interp x1 y1) → ((interp x2 y2) → (interp ((op Le1) x1 x2) ((op Le2) y1 y2))))))
(interp_linv : (∀ {x1 x2 : A1} {y1 y2 : A2} , ((interp x1 y1) → ((interp x2 y2) → (interp ((linv Le1) x1 x2) ((linv Le2) y1 y2))))))
inductive LeftCancellativeOpTerm : Type
| opL : (LeftCancellativeOpTerm → (LeftCancellativeOpTerm → LeftCancellativeOpTerm))
| linvL : (LeftCancellativeOpTerm → (LeftCancellativeOpTerm → LeftCancellativeOpTerm))
open LeftCancellativeOpTerm
inductive ClLeftCancellativeOpTerm (A : Type) : Type
| sing : (A → ClLeftCancellativeOpTerm)
| opCl : (ClLeftCancellativeOpTerm → (ClLeftCancellativeOpTerm → ClLeftCancellativeOpTerm))
| linvCl : (ClLeftCancellativeOpTerm → (ClLeftCancellativeOpTerm → ClLeftCancellativeOpTerm))
open ClLeftCancellativeOpTerm
inductive OpLeftCancellativeOpTerm (n : ℕ) : Type
| v : ((fin n) → OpLeftCancellativeOpTerm)
| opOL : (OpLeftCancellativeOpTerm → (OpLeftCancellativeOpTerm → OpLeftCancellativeOpTerm))
| linvOL : (OpLeftCancellativeOpTerm → (OpLeftCancellativeOpTerm → OpLeftCancellativeOpTerm))
open OpLeftCancellativeOpTerm
inductive OpLeftCancellativeOpTerm2 (n : ℕ) (A : Type) : Type
| v2 : ((fin n) → OpLeftCancellativeOpTerm2)
| sing2 : (A → OpLeftCancellativeOpTerm2)
| opOL2 : (OpLeftCancellativeOpTerm2 → (OpLeftCancellativeOpTerm2 → OpLeftCancellativeOpTerm2))
| linvOL2 : (OpLeftCancellativeOpTerm2 → (OpLeftCancellativeOpTerm2 → OpLeftCancellativeOpTerm2))
open OpLeftCancellativeOpTerm2
def simplifyCl {A : Type} : ((ClLeftCancellativeOpTerm A) → (ClLeftCancellativeOpTerm A))
| (opCl x1 x2) := (opCl (simplifyCl x1) (simplifyCl x2))
| (linvCl x1 x2) := (linvCl (simplifyCl x1) (simplifyCl x2))
| (sing x1) := (sing x1)
def simplifyOpB {n : ℕ} : ((OpLeftCancellativeOpTerm n) → (OpLeftCancellativeOpTerm n))
| (opOL x1 x2) := (opOL (simplifyOpB x1) (simplifyOpB x2))
| (linvOL x1 x2) := (linvOL (simplifyOpB x1) (simplifyOpB x2))
| (v x1) := (v x1)
def simplifyOp {n : ℕ} {A : Type} : ((OpLeftCancellativeOpTerm2 n A) → (OpLeftCancellativeOpTerm2 n A))
| (opOL2 x1 x2) := (opOL2 (simplifyOp x1) (simplifyOp x2))
| (linvOL2 x1 x2) := (linvOL2 (simplifyOp x1) (simplifyOp x2))
| (v2 x1) := (v2 x1)
| (sing2 x1) := (sing2 x1)
def evalB {A : Type} : ((LeftCancellativeOp A) → (LeftCancellativeOpTerm → A))
| Le (opL x1 x2) := ((op Le) (evalB Le x1) (evalB Le x2))
| Le (linvL x1 x2) := ((linv Le) (evalB Le x1) (evalB Le x2))
def evalCl {A : Type} : ((LeftCancellativeOp A) → ((ClLeftCancellativeOpTerm A) → A))
| Le (sing x1) := x1
| Le (opCl x1 x2) := ((op Le) (evalCl Le x1) (evalCl Le x2))
| Le (linvCl x1 x2) := ((linv Le) (evalCl Le x1) (evalCl Le x2))
def evalOpB {A : Type} {n : ℕ} : ((LeftCancellativeOp A) → ((vector A n) → ((OpLeftCancellativeOpTerm n) → A)))
| Le vars (v x1) := (nth vars x1)
| Le vars (opOL x1 x2) := ((op Le) (evalOpB Le vars x1) (evalOpB Le vars x2))
| Le vars (linvOL x1 x2) := ((linv Le) (evalOpB Le vars x1) (evalOpB Le vars x2))
def evalOp {A : Type} {n : ℕ} : ((LeftCancellativeOp A) → ((vector A n) → ((OpLeftCancellativeOpTerm2 n A) → A)))
| Le vars (v2 x1) := (nth vars x1)
| Le vars (sing2 x1) := x1
| Le vars (opOL2 x1 x2) := ((op Le) (evalOp Le vars x1) (evalOp Le vars x2))
| Le vars (linvOL2 x1 x2) := ((linv Le) (evalOp Le vars x1) (evalOp Le vars x2))
def inductionB {P : (LeftCancellativeOpTerm → Type)} : ((∀ (x1 x2 : LeftCancellativeOpTerm) , ((P x1) → ((P x2) → (P (opL x1 x2))))) → ((∀ (x1 x2 : LeftCancellativeOpTerm) , ((P x1) → ((P x2) → (P (linvL x1 x2))))) → (∀ (x : LeftCancellativeOpTerm) , (P x))))
| popl plinvl (opL x1 x2) := (popl _ _ (inductionB popl plinvl x1) (inductionB popl plinvl x2))
| popl plinvl (linvL x1 x2) := (plinvl _ _ (inductionB popl plinvl x1) (inductionB popl plinvl x2))
def inductionCl {A : Type} {P : ((ClLeftCancellativeOpTerm A) → Type)} : ((∀ (x1 : A) , (P (sing x1))) → ((∀ (x1 x2 : (ClLeftCancellativeOpTerm A)) , ((P x1) → ((P x2) → (P (opCl x1 x2))))) → ((∀ (x1 x2 : (ClLeftCancellativeOpTerm A)) , ((P x1) → ((P x2) → (P (linvCl x1 x2))))) → (∀ (x : (ClLeftCancellativeOpTerm A)) , (P x)))))
| psing popcl plinvcl (sing x1) := (psing x1)
| psing popcl plinvcl (opCl x1 x2) := (popcl _ _ (inductionCl psing popcl plinvcl x1) (inductionCl psing popcl plinvcl x2))
| psing popcl plinvcl (linvCl x1 x2) := (plinvcl _ _ (inductionCl psing popcl plinvcl x1) (inductionCl psing popcl plinvcl x2))
def inductionOpB {n : ℕ} {P : ((OpLeftCancellativeOpTerm n) → Type)} : ((∀ (fin : (fin n)) , (P (v fin))) → ((∀ (x1 x2 : (OpLeftCancellativeOpTerm n)) , ((P x1) → ((P x2) → (P (opOL x1 x2))))) → ((∀ (x1 x2 : (OpLeftCancellativeOpTerm n)) , ((P x1) → ((P x2) → (P (linvOL x1 x2))))) → (∀ (x : (OpLeftCancellativeOpTerm n)) , (P x)))))
| pv popol plinvol (v x1) := (pv x1)
| pv popol plinvol (opOL x1 x2) := (popol _ _ (inductionOpB pv popol plinvol x1) (inductionOpB pv popol plinvol x2))
| pv popol plinvol (linvOL x1 x2) := (plinvol _ _ (inductionOpB pv popol plinvol x1) (inductionOpB pv popol plinvol x2))
def inductionOp {n : ℕ} {A : Type} {P : ((OpLeftCancellativeOpTerm2 n A) → Type)} : ((∀ (fin : (fin n)) , (P (v2 fin))) → ((∀ (x1 : A) , (P (sing2 x1))) → ((∀ (x1 x2 : (OpLeftCancellativeOpTerm2 n A)) , ((P x1) → ((P x2) → (P (opOL2 x1 x2))))) → ((∀ (x1 x2 : (OpLeftCancellativeOpTerm2 n A)) , ((P x1) → ((P x2) → (P (linvOL2 x1 x2))))) → (∀ (x : (OpLeftCancellativeOpTerm2 n A)) , (P x))))))
| pv2 psing2 popol2 plinvol2 (v2 x1) := (pv2 x1)
| pv2 psing2 popol2 plinvol2 (sing2 x1) := (psing2 x1)
| pv2 psing2 popol2 plinvol2 (opOL2 x1 x2) := (popol2 _ _ (inductionOp pv2 psing2 popol2 plinvol2 x1) (inductionOp pv2 psing2 popol2 plinvol2 x2))
| pv2 psing2 popol2 plinvol2 (linvOL2 x1 x2) := (plinvol2 _ _ (inductionOp pv2 psing2 popol2 plinvol2 x1) (inductionOp pv2 psing2 popol2 plinvol2 x2))
def stageB : (LeftCancellativeOpTerm → (Staged LeftCancellativeOpTerm))
| (opL x1 x2) := (stage2 opL (codeLift2 opL) (stageB x1) (stageB x2))
| (linvL x1 x2) := (stage2 linvL (codeLift2 linvL) (stageB x1) (stageB x2))
def stageCl {A : Type} : ((ClLeftCancellativeOpTerm A) → (Staged (ClLeftCancellativeOpTerm A)))
| (sing x1) := (Now (sing x1))
| (opCl x1 x2) := (stage2 opCl (codeLift2 opCl) (stageCl x1) (stageCl x2))
| (linvCl x1 x2) := (stage2 linvCl (codeLift2 linvCl) (stageCl x1) (stageCl x2))
def stageOpB {n : ℕ} : ((OpLeftCancellativeOpTerm n) → (Staged (OpLeftCancellativeOpTerm n)))
| (v x1) := (const (code (v x1)))
| (opOL x1 x2) := (stage2 opOL (codeLift2 opOL) (stageOpB x1) (stageOpB x2))
| (linvOL x1 x2) := (stage2 linvOL (codeLift2 linvOL) (stageOpB x1) (stageOpB x2))
def stageOp {n : ℕ} {A : Type} : ((OpLeftCancellativeOpTerm2 n A) → (Staged (OpLeftCancellativeOpTerm2 n A)))
| (sing2 x1) := (Now (sing2 x1))
| (v2 x1) := (const (code (v2 x1)))
| (opOL2 x1 x2) := (stage2 opOL2 (codeLift2 opOL2) (stageOp x1) (stageOp x2))
| (linvOL2 x1 x2) := (stage2 linvOL2 (codeLift2 linvOL2) (stageOp x1) (stageOp x2))
structure StagedRepr (A : Type) (Repr : (Type → Type)) : Type :=
(opT : ((Repr A) → ((Repr A) → (Repr A))))
(linvT : ((Repr A) → ((Repr A) → (Repr A))))
end LeftCancellativeOp
|
80b3f2348d647ba7a4bb9824ff94c515ccaf0ba4
|
7cef822f3b952965621309e88eadf618da0c8ae9
|
/src/tactic/explode.lean
|
5d193aadbf9983a6076e3b3988775644fc30c261
|
[
"Apache-2.0"
] |
permissive
|
rmitta/mathlib
|
8d90aee30b4db2b013e01f62c33f297d7e64a43d
|
883d974b608845bad30ae19e27e33c285200bf84
|
refs/heads/master
| 1,585,776,832,544
| 1,576,874,096,000
| 1,576,874,096,000
| 153,663,165
| 0
| 2
|
Apache-2.0
| 1,544,806,490,000
| 1,539,884,365,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 5,224
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
Displays a proof term in a line by line format somewhat akin to a Fitch style
proof or the Metamath proof style.
-/
import tactic.core
open expr tactic
namespace tactic
namespace explode
-- TODO(Mario): move back to list.basic
@[simp] def head' {α} : list α → option α
| [] := none
| (a :: l) := some a
inductive status | reg | intro | lam | sintro
meta structure entry :=
(expr : expr)
(line : nat)
(depth : nat)
(status : status)
(thm : string)
(deps : list nat)
meta def pad_right (l : list string) : list string :=
let n := l.foldl (λ r (s:string), max r s.length) 0 in
l.map $ λ s, nat.iterate (λ s, s.push ' ') (n - s.length) s
meta structure entries := mk' ::
(s : expr_map entry)
(l : list entry)
meta def entries.find (es : entries) (e : expr) := es.s.find e
meta def entries.size (es : entries) := es.s.size
meta def entries.add : entries → entry → entries
| es@⟨s, l⟩ e := if s.contains e.expr then es else ⟨s.insert e.expr e, e :: l⟩
meta def entries.head (es : entries) : option entry := head' es.l
meta instance : inhabited entries := ⟨⟨expr_map.mk _, []⟩⟩
meta def format_aux : list string → list string → list string → list entry → tactic format
| (line :: lines) (dep :: deps) (thm :: thms) (en :: es) := do
fmt ← do {
let margin := string.join (list.repeat " │" en.depth),
let margin := match en.status with
| status.sintro := " ├" ++ margin
| status.intro := " │" ++ margin ++ " ┌"
| status.reg := " │" ++ margin ++ ""
| status.lam := " │" ++ margin ++ ""
end,
p ← infer_type en.expr >>= pp,
let lhs := line ++ "│" ++ dep ++ "│ " ++ thm ++ margin ++ " ",
return $ format.of_string lhs ++ to_string p ++ format.line },
(++ fmt) <$> format_aux lines deps thms es
| _ _ _ _ := return format.nil
meta instance : has_to_tactic_format entries :=
⟨λ es : entries,
let lines := pad_right $ es.l.map (λ en, to_string en.line),
deps := pad_right $ es.l.map (λ en, string.intercalate "," (en.deps.map to_string)),
thms := pad_right $ es.l.map entry.thm in
format_aux lines deps thms es.l⟩
meta def append_dep (filter : expr → tactic unit)
(es : entries) (e : expr) (deps : list nat) : tactic (list nat) :=
do { ei ← es.find e,
filter ei.expr,
return (ei.line :: deps) }
<|> return deps
meta def may_be_proof (e : expr) : tactic bool :=
do expr.sort u ← infer_type e >>= infer_type,
return $ bnot u.nonzero
end explode
open explode
meta mutual def explode.core, explode.args (filter : expr → tactic unit)
with explode.core : expr → bool → nat → entries → tactic entries
| e@(lam n bi d b) si depth es := do
m ← mk_fresh_name,
let l := local_const m n bi d,
let b' := instantiate_var b l,
if si then
let en : entry := ⟨l, es.size, depth, status.sintro, to_string n, []⟩ in do
es' ← explode.core b' si depth (es.add en),
return $ es'.add ⟨e, es'.size, depth, status.lam, "∀I", [es.size, es'.size - 1]⟩
else do
let en : entry := ⟨l, es.size, depth, status.intro, to_string n, []⟩,
es' ← explode.core b' si (depth + 1) (es.add en),
deps' ← explode.append_dep filter es' b' [],
deps' ← explode.append_dep filter es' l deps',
return $ es'.add ⟨e, es'.size, depth, status.lam, "∀I", deps'⟩
| e@(macro _ l) si depth es := explode.core l.head si depth es
| e si depth es := filter e >>
match get_app_fn_args e with
| (const n _, args) :=
explode.args e args depth es (to_string n) []
| (fn, []) := do
p ← pp fn,
let en : entry := ⟨fn, es.size, depth, status.reg, to_string p, []⟩,
return (es.add en)
| (fn, args) := do
es' ← explode.core fn ff depth es,
deps ← explode.append_dep filter es' fn [],
explode.args e args depth es' "∀E" deps
end
with explode.args : expr → list expr → nat → entries → string → list nat → tactic entries
| e (arg :: args) depth es thm deps := do
es' ← explode.core arg ff depth es <|> return es,
deps' ← explode.append_dep filter es' arg deps,
explode.args e args depth es' thm deps'
| e [] depth es thm deps :=
return (es.add ⟨e, es.size, depth, status.reg, thm, deps.reverse⟩)
meta def explode_expr (e : expr) (hide_non_prop := tt) : tactic entries :=
let filter := if hide_non_prop then λ e, may_be_proof e >>= guardb else λ _, skip in
tactic.explode.core filter e tt 0 (default _)
meta def explode (n : name) : tactic unit :=
do const n _ ← resolve_name n | fail "cannot resolve name",
d ← get_decl n,
v ← match d with
| (declaration.defn _ _ _ v _ _) := return v
| (declaration.thm _ _ _ v) := return v.get
| _ := fail "not a definition"
end,
t ← pp d.type,
explode_expr v <* trace (to_fmt n ++ " : " ++ t) >>= trace
open interactive lean lean.parser interaction_monad.result
@[user_command]
meta def explode_cmd (_ : parse $ tk "#explode") : parser unit :=
do n ← ident,
explode n
.
-- #explode iff_true_intro
-- #explode nat.strong_rec_on
end tactic
|
549c242e9143bd1a475f0f0e16ccbefe3b2b8742
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/linear_algebra/matrix/finite_dimensional.lean
|
aaeefb583faa515b9b741ed858f86ee03779217d
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 1,402
|
lean
|
/-
Copyright (c) 2019 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Patrick Massot, Casper Putz, Anne Baanen
-/
import data.matrix.basic
import linear_algebra.finite_dimensional
/-!
# The finite-dimensional space of matrices
This file shows that `m` by `n` matrices form a finite-dimensional space,
and proves the `finrank` of that space is equal to `card m * card n`.
## Main definitions
* `matrix.finite_dimensional`: matrices form a finite dimensional vector space over a field `K`
* `matrix.finrank_matrix`: the `finrank` of `matrix m n R` is `card m * card n`
## Tags
matrix, finite dimensional, findim, finrank
-/
universes u v
namespace matrix
section finite_dimensional
variables {m n : Type*} [fintype m] [fintype n]
variables {R : Type v} [field R]
instance : finite_dimensional R (matrix m n R) :=
linear_equiv.finite_dimensional (linear_equiv.curry R m n)
/--
The dimension of the space of finite dimensional matrices
is the product of the number of rows and columns.
-/
@[simp] lemma finrank_matrix :
finite_dimensional.finrank R (matrix m n R) = fintype.card m * fintype.card n :=
by rw [@linear_equiv.finrank_eq R (matrix m n R) _ _ _ _ _ _ (linear_equiv.curry R m n).symm,
finite_dimensional.finrank_fintype_fun_eq_card, fintype.card_prod]
end finite_dimensional
end matrix
|
dfe16316486fc916a58378af95727b7b57a820db
|
c777c32c8e484e195053731103c5e52af26a25d1
|
/src/probability/probability_mass_function/constructions.lean
|
ae7df339f0f93a984e4ff9fb4a861b9784356ac5
|
[
"Apache-2.0"
] |
permissive
|
kbuzzard/mathlib
|
2ff9e85dfe2a46f4b291927f983afec17e946eb8
|
58537299e922f9c77df76cb613910914a479c1f7
|
refs/heads/master
| 1,685,313,702,744
| 1,683,974,212,000
| 1,683,974,212,000
| 128,185,277
| 1
| 0
| null | 1,522,920,600,000
| 1,522,920,600,000
| null |
UTF-8
|
Lean
| false
| false
| 9,658
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Devon Tuma
-/
import probability.probability_mass_function.monad
/-!
# Specific Constructions of Probability Mass Functions
This file gives a number of different `pmf` constructions for common probability distributions.
`map` and `seq` allow pushing a `pmf α` along a function `f : α → β` (or distribution of
functions `f : pmf (α → β)`) to get a `pmf β`
`of_finset` and `of_fintype` simplify the construction of a `pmf α` from a function `f : α → ℝ≥0∞`,
by allowing the "sum equals 1" constraint to be in terms of `finset.sum` instead of `tsum`.
`normalize` constructs a `pmf α` by normalizing a function `f : α → ℝ≥0∞` by its sum,
and `filter` uses this to filter the support of a `pmf` and re-normalize the new distribution.
`bernoulli` represents the bernoulli distribution on `bool`
-/
namespace pmf
noncomputable theory
variables {α β γ : Type*}
open_locale classical big_operators nnreal ennreal
section map
/-- The functorial action of a function on a `pmf`. -/
def map (f : α → β) (p : pmf α) : pmf β := bind p (pure ∘ f)
variables (f : α → β) (p : pmf α) (b : β)
lemma monad_map_eq_map {α β : Type*} (f : α → β) (p : pmf α) : f <$> p = p.map f := rfl
@[simp] lemma map_apply : (map f p) b = ∑' a, if b = f a then p a else 0 := by simp [map]
@[simp] lemma support_map : (map f p).support = f '' p.support :=
set.ext (λ b, by simp [map, @eq_comm β b])
lemma mem_support_map_iff : b ∈ (map f p).support ↔ ∃ a ∈ p.support, f a = b := by simp
lemma bind_pure_comp : bind p (pure ∘ f) = map f p := rfl
lemma map_id : map id p = p := bind_pure _
lemma map_comp (g : β → γ) : (p.map f).map g = p.map (g ∘ f) := by simp [map]
lemma pure_map (a : α) : (pure a).map f = pure (f a) := pure_bind _ _
lemma map_bind (q : α → pmf β) (f : β → γ) :
(p.bind q).map f = p.bind (λ a, (q a).map f) := bind_bind _ _ _
@[simp] lemma bind_map (p : pmf α) (f : α → β) (q : β → pmf γ) :
(p.map f).bind q = p.bind (q ∘ f) :=
(bind_bind _ _ _).trans (congr_arg _ (funext (λ a, pure_bind _ _)))
@[simp] lemma map_const : p.map (function.const α b) = pure b :=
by simp only [map, bind_const, function.comp_const]
section measure
variable (s : set β)
@[simp] lemma to_outer_measure_map_apply :
(p.map f).to_outer_measure s = p.to_outer_measure (f ⁻¹' s) :=
by simp [map, set.indicator, to_outer_measure_apply p (f ⁻¹' s)]
@[simp] lemma to_measure_map_apply [measurable_space α] [measurable_space β] (hf : measurable f)
(hs : measurable_set s) : (p.map f).to_measure s = p.to_measure (f ⁻¹' s) :=
begin
rw [to_measure_apply_eq_to_outer_measure_apply _ s hs,
to_measure_apply_eq_to_outer_measure_apply _ (f ⁻¹' s) (measurable_set_preimage hf hs)],
exact to_outer_measure_map_apply f p s,
end
end measure
end map
section seq
/-- The monadic sequencing operation for `pmf`. -/
def seq (q : pmf (α → β)) (p : pmf α) : pmf β := q.bind (λ m, p.bind $ λ a, pure (m a))
variables (q : pmf (α → β)) (p : pmf α) (b : β)
lemma monad_seq_eq_seq {α β : Type*} (q : pmf (α → β)) (p : pmf α) : q <*> p = q.seq p := rfl
@[simp] lemma seq_apply : (seq q p) b = ∑' (f : α → β) (a : α), if b = f a then q f * p a else 0 :=
begin
simp only [seq, mul_boole, bind_apply, pure_apply],
refine tsum_congr (λ f, (ennreal.tsum_mul_left).symm.trans (tsum_congr (λ a, _))),
simpa only [mul_zero] using mul_ite (b = f a) (q f) (p a) 0
end
@[simp] lemma support_seq : (seq q p).support = ⋃ f ∈ q.support, f '' p.support :=
set.ext (λ b, by simp [-mem_support_iff, seq, @eq_comm β b])
lemma mem_support_seq_iff : b ∈ (seq q p).support ↔ ∃ (f ∈ q.support), b ∈ f '' p.support :=
by simp
end seq
instance : is_lawful_functor pmf :=
{ map_const_eq := λ α β, rfl,
id_map := λ α, bind_pure,
comp_map := λ α β γ g h x, (map_comp _ _ _).symm }
instance : is_lawful_monad pmf :=
{ bind_pure_comp_eq_map := λ α β f x, rfl,
bind_map_eq_seq := λ α β f x, rfl,
pure_bind := λ α β, pure_bind,
bind_assoc := λ α β γ, bind_bind }
section of_finset
/-- Given a finset `s` and a function `f : α → ℝ≥0∞` with sum `1` on `s`,
such that `f a = 0` for `a ∉ s`, we get a `pmf` -/
def of_finset (f : α → ℝ≥0∞) (s : finset α) (h : ∑ a in s, f a = 1)
(h' : ∀ a ∉ s, f a = 0) : pmf α :=
⟨f, h ▸ has_sum_sum_of_ne_finset_zero h'⟩
variables {f : α → ℝ≥0∞} {s : finset α} (h : ∑ a in s, f a = 1) (h' : ∀ a ∉ s, f a = 0)
@[simp] lemma of_finset_apply (a : α) : of_finset f s h h' a = f a := rfl
@[simp] lemma support_of_finset : (of_finset f s h h').support = s ∩ (function.support f) :=
set.ext (λ a, by simpa [mem_support_iff] using mt (h' a))
lemma mem_support_of_finset_iff (a : α) : a ∈ (of_finset f s h h').support ↔ a ∈ s ∧ f a ≠ 0 :=
by simp
lemma of_finset_apply_of_not_mem {a : α} (ha : a ∉ s) : of_finset f s h h' a = 0 :=
h' a ha
section measure
variable (t : set α)
@[simp] lemma to_outer_measure_of_finset_apply :
(of_finset f s h h').to_outer_measure t = ∑' x, t.indicator f x :=
to_outer_measure_apply (of_finset f s h h') t
@[simp] lemma to_measure_of_finset_apply [measurable_space α] (ht : measurable_set t) :
(of_finset f s h h').to_measure t = ∑' x, t.indicator f x :=
(to_measure_apply_eq_to_outer_measure_apply _ t ht).trans
(to_outer_measure_of_finset_apply h h' t)
end measure
end of_finset
section of_fintype
/-- Given a finite type `α` and a function `f : α → ℝ≥0∞` with sum 1, we get a `pmf`. -/
def of_fintype [fintype α] (f : α → ℝ≥0∞) (h : ∑ a, f a = 1) : pmf α :=
of_finset f finset.univ h (λ a ha, absurd (finset.mem_univ a) ha)
variables [fintype α] {f : α → ℝ≥0∞} (h : ∑ a, f a = 1)
@[simp] lemma of_fintype_apply (a : α) : of_fintype f h a = f a := rfl
@[simp] lemma support_of_fintype : (of_fintype f h).support = function.support f := rfl
lemma mem_support_of_fintype_iff (a : α) : a ∈ (of_fintype f h).support ↔ f a ≠ 0 := iff.rfl
section measure
variable (s : set α)
@[simp] lemma to_outer_measure_of_fintype_apply :
(of_fintype f h).to_outer_measure s = ∑' x, s.indicator f x :=
to_outer_measure_apply (of_fintype f h) s
@[simp] lemma to_measure_of_fintype_apply [measurable_space α] (hs : measurable_set s) :
(of_fintype f h).to_measure s = ∑' x, s.indicator f x :=
(to_measure_apply_eq_to_outer_measure_apply _ s hs).trans
(to_outer_measure_of_fintype_apply h s)
end measure
end of_fintype
section normalize
/-- Given a `f` with non-zero and non-infinite sum, get a `pmf` by normalizing `f` by its `tsum` -/
def normalize (f : α → ℝ≥0∞) (hf0 : tsum f ≠ 0) (hf : tsum f ≠ ∞) : pmf α :=
⟨λ a, f a * (∑' x, f x)⁻¹, ennreal.summable.has_sum_iff.2
(ennreal.tsum_mul_right.trans (ennreal.mul_inv_cancel hf0 hf))⟩
variables {f : α → ℝ≥0∞} (hf0 : tsum f ≠ 0) (hf : tsum f ≠ ∞)
@[simp] lemma normalize_apply (a : α) : (normalize f hf0 hf) a = f a * (∑' x, f x)⁻¹ := rfl
@[simp] lemma support_normalize : (normalize f hf0 hf).support = function.support f :=
set.ext (λ a, by simp [hf, mem_support_iff])
lemma mem_support_normalize_iff (a : α) : a ∈ (normalize f hf0 hf).support ↔ f a ≠ 0 := by simp
end normalize
section filter
/-- Create new `pmf` by filtering on a set with non-zero measure and normalizing -/
def filter (p : pmf α) (s : set α) (h : ∃ a ∈ s, a ∈ p.support) : pmf α :=
pmf.normalize (s.indicator p) (by simpa using h) (p.tsum_coe_indicator_ne_top s)
variables {p : pmf α} {s : set α} (h : ∃ a ∈ s, a ∈ p.support)
@[simp]
lemma filter_apply (a : α) : (p.filter s h) a = (s.indicator p a) * (∑' a', (s.indicator p) a')⁻¹ :=
by rw [filter, normalize_apply]
lemma filter_apply_eq_zero_of_not_mem {a : α} (ha : a ∉ s) : (p.filter s h) a = 0 :=
by rw [filter_apply, set.indicator_apply_eq_zero.mpr (λ ha', absurd ha' ha), zero_mul]
lemma mem_support_filter_iff {a : α} : a ∈ (p.filter s h).support ↔ a ∈ s ∧ a ∈ p.support :=
(mem_support_normalize_iff _ _ _).trans set.indicator_apply_ne_zero
@[simp] lemma support_filter : (p.filter s h).support = s ∩ p.support:=
set.ext $ λ x, (mem_support_filter_iff _)
lemma filter_apply_eq_zero_iff (a : α) : (p.filter s h) a = 0 ↔ a ∉ s ∨ a ∉ p.support :=
by erw [apply_eq_zero_iff, support_filter, set.mem_inter_iff, not_and_distrib]
lemma filter_apply_ne_zero_iff (a : α) : (p.filter s h) a ≠ 0 ↔ a ∈ s ∧ a ∈ p.support :=
by rw [ne.def, filter_apply_eq_zero_iff, not_or_distrib, not_not, not_not]
end filter
section bernoulli
/-- A `pmf` which assigns probability `p` to `tt` and `1 - p` to `ff`. -/
def bernoulli (p : ℝ≥0∞) (h : p ≤ 1) : pmf bool :=
of_fintype (λ b, cond b p (1 - p)) (by simp [h])
variables {p : ℝ≥0∞} (h : p ≤ 1) (b : bool)
@[simp] lemma bernoulli_apply : bernoulli p h b = cond b p (1 - p) := rfl
@[simp] lemma support_bernoulli : (bernoulli p h).support = {b | cond b (p ≠ 0) (p ≠ 1)} :=
begin
refine set.ext (λ b, _),
induction b,
{ simp_rw [mem_support_iff, bernoulli_apply, bool.cond_ff, ne.def, tsub_eq_zero_iff_le, not_le],
exact ⟨ne_of_lt, lt_of_le_of_ne h⟩ },
{ simp only [mem_support_iff, bernoulli_apply, bool.cond_tt, set.mem_set_of_eq], }
end
lemma mem_support_bernoulli_iff : b ∈ (bernoulli p h).support ↔ cond b (p ≠ 0) (p ≠ 1) := by simp
end bernoulli
end pmf
|
ec3da0b5db19dace7e5f630ec93932ee7399930e
|
35677d2df3f081738fa6b08138e03ee36bc33cad
|
/src/category_theory/epi_mono.lean
|
cbdd362ed048cbabdb2a892a7ee81c2bf92bf8cd
|
[
"Apache-2.0"
] |
permissive
|
gebner/mathlib
|
eab0150cc4f79ec45d2016a8c21750244a2e7ff0
|
cc6a6edc397c55118df62831e23bfbd6e6c6b4ab
|
refs/heads/master
| 1,625,574,853,976
| 1,586,712,827,000
| 1,586,712,827,000
| 99,101,412
| 1
| 0
|
Apache-2.0
| 1,586,716,389,000
| 1,501,667,958,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 5,288
|
lean
|
/-
Copyright (c) 2019 Reid Barton. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Reid Barton, Scott Morrison
Facts about epimorphisms and monomorphisms.
The definitions of `epi` and `mono` are in `category_theory.category`,
since they are used by some lemmas for `iso`, which is used everywhere.
-/
import category_theory.adjunction.basic
import category_theory.fully_faithful
universes v₁ v₂ u₁ u₂
namespace category_theory
variables {C : Type u₁} [𝒞 : category.{v₁} C]
include 𝒞
section
variables {D : Type u₂} [𝒟 : category.{v₂} D]
include 𝒟
lemma left_adjoint_preserves_epi {F : C ⥤ D} {G : D ⥤ C} (adj : F ⊣ G)
{X Y : C} {f : X ⟶ Y} (hf : epi f) : epi (F.map f) :=
begin
constructor,
intros Z g h H,
replace H := congr_arg (adj.hom_equiv X Z) H,
rwa [adj.hom_equiv_naturality_left, adj.hom_equiv_naturality_left,
cancel_epi, equiv.apply_eq_iff_eq] at H
end
lemma right_adjoint_preserves_mono {F : C ⥤ D} {G : D ⥤ C} (adj : F ⊣ G)
{X Y : D} {f : X ⟶ Y} (hf : mono f) : mono (G.map f) :=
begin
constructor,
intros Z g h H,
replace H := congr_arg (adj.hom_equiv Z Y).symm H,
rwa [adj.hom_equiv_naturality_right_symm, adj.hom_equiv_naturality_right_symm,
cancel_mono, equiv.apply_eq_iff_eq] at H
end
lemma faithful_reflects_epi (F : C ⥤ D) [faithful F] {X Y : C} {f : X ⟶ Y}
(hf : epi (F.map f)) : epi f :=
⟨λ Z g h H, F.injectivity $
by rw [←cancel_epi (F.map f), ←F.map_comp, ←F.map_comp, H]⟩
lemma faithful_reflects_mono (F : C ⥤ D) [faithful F] {X Y : C} {f : X ⟶ Y}
(hf : mono (F.map f)) : mono f :=
⟨λ Z g h H, F.injectivity $
by rw [←cancel_mono (F.map f), ←F.map_comp, ←F.map_comp, H]⟩
end
/--
A split monomorphism is a morphism `f : X ⟶ Y` admitting a retraction `retraction f : Y ⟶ X`
such that `f ≫ retraction f = 𝟙 X`.
Every split monomorphism is a monomorphism.
-/
class split_mono {X Y : C} (f : X ⟶ Y) :=
(retraction : Y ⟶ X)
(id' : f ≫ retraction = 𝟙 X . obviously)
/--
A split epimorphism is a morphism `f : X ⟶ Y` admitting a section `section_ f : Y ⟶ X`
such that `section_ f ≫ f = 𝟙 Y`.
(Note that `section` is a reserved keyword, so we append an underscore.)
Every split epimorphism is an epimorphism.
-/
class split_epi {X Y : C} (f : X ⟶ Y) :=
(section_ : Y ⟶ X)
(id' : section_ ≫ f = 𝟙 Y . obviously)
/-- The chosen retraction of a split monomorphism. -/
def retraction {X Y : C} (f : X ⟶ Y) [split_mono f] : Y ⟶ X := split_mono.retraction.{v₁} f
@[simp, reassoc]
lemma split_mono.id {X Y : C} (f : X ⟶ Y) [split_mono f] : f ≫ retraction f = 𝟙 X :=
split_mono.id'
/-- The retraction of a split monomorphism is itself a split epimorphism. -/
instance retraction_split_epi {X Y : C} (f : X ⟶ Y) [split_mono f] : split_epi (retraction f) :=
{ section_ := f }
/--
The chosen section of a split epimorphism.
(Note that `section` is a reserved keyword, so we append an underscore.)
-/
def section_ {X Y : C} (f : X ⟶ Y) [split_epi f] : Y ⟶ X := split_epi.section_.{v₁} f
@[simp, reassoc]
lemma split_epi.id {X Y : C} (f : X ⟶ Y) [split_epi f] : section_ f ≫ f = 𝟙 Y :=
split_epi.id'
/-- The section of a split epimorphism is itself a split monomorphism. -/
instance section_split_mono {X Y : C} (f : X ⟶ Y) [split_epi f] : split_mono (section_ f) :=
{ retraction := f }
/-- Every iso is a split mono. -/
@[priority 100]
instance split_mono.of_iso {X Y : C} (f : X ⟶ Y) [is_iso f] : split_mono f :=
{ retraction := inv f }
/-- Every iso is a split epi. -/
@[priority 100]
instance split_epi.of_iso {X Y : C} (f : X ⟶ Y) [is_iso f] : split_epi f :=
{ section_ := inv f }
/-- Every split mono is a mono. -/
@[priority 100]
instance split_mono.mono {X Y : C} (f : X ⟶ Y) [split_mono f] : mono f :=
{ right_cancellation := λ Z g h w, begin replace w := w =≫ retraction f, simpa using w, end }
/-- Every split epi is an epi. -/
@[priority 100]
instance split_epi.epi {X Y : C} (f : X ⟶ Y) [split_epi f] : epi f :=
{ left_cancellation := λ Z g h w, begin replace w := section_ f ≫= w, simpa using w, end }
/-- Every split mono whose retraction is mono is an iso. -/
def is_iso.of_mono_retraction {X Y : C} {f : X ⟶ Y} [split_mono f] [mono $ retraction f]
: is_iso f :=
{ inv := retraction f,
inv_hom_id' := (cancel_mono_id $ retraction f).mp (by simp) }
/-- Every split epi whose section is epi is an iso. -/
def is_iso.of_epi_section {X Y : C} {f : X ⟶ Y} [split_epi f] [epi $ section_ f]
: is_iso f :=
{ inv := section_ f,
hom_inv_id' := (cancel_epi_id $ section_ f).mp (by simp) }
section
variables {D : Type u₂} [𝒟 : category.{v₂} D]
include 𝒟
/-- Split monomorphisms are also absolute monomorphisms. -/
instance {X Y : C} (f : X ⟶ Y) [split_mono f] (F : C ⥤ D) : split_mono (F.map f) :=
{ retraction := F.map (retraction f),
id' := by { rw [←functor.map_comp, split_mono.id, functor.map_id], } }
/-- Split epimorphisms are also absolute epimorphisms. -/
instance {X Y : C} (f : X ⟶ Y) [split_epi f] (F : C ⥤ D) : split_epi (F.map f) :=
{ section_ := F.map (section_ f),
id' := by { rw [←functor.map_comp, split_epi.id, functor.map_id], } }
end
end category_theory
|
ef9133ce3828d311857ff9c4bf135cc3a400776e
|
92b50235facfbc08dfe7f334827d47281471333b
|
/hott/types/cubical/squareover.hlean
|
21a7f841d10c7b61812fa22d24cc94d5c03d1ece
|
[
"Apache-2.0"
] |
permissive
|
htzh/lean
|
24f6ed7510ab637379ec31af406d12584d31792c
|
d70c79f4e30aafecdfc4a60b5d3512199200ab6e
|
refs/heads/master
| 1,607,677,731,270
| 1,437,089,952,000
| 1,437,089,952,000
| 37,078,816
| 0
| 0
| null | 1,433,780,956,000
| 1,433,780,955,000
| null |
UTF-8
|
Lean
| false
| false
| 7,087
|
hlean
|
/-
Copyright (c) 2015 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Floris van Doorn
Squareovers
-/
import .square
open eq equiv is_equiv equiv.ops
namespace eq
-- we give the argument B explicitly, because Lean would find (λa, B a) by itself, which
-- makes the type uglier (of course the two terms are definitionally equal)
inductive squareover {A : Type} (B : A → Type) {a₀₀ : A} {b₀₀ : B a₀₀} :
Π{a₂₀ a₀₂ a₂₂ : A}
{p₁₀ : a₀₀ = a₂₀} {p₁₂ : a₀₂ = a₂₂} {p₀₁ : a₀₀ = a₀₂} {p₂₁ : a₂₀ = a₂₂}
(s₁₁ : square p₁₀ p₁₂ p₀₁ p₂₁)
{b₂₀ : B a₂₀} {b₀₂ : B a₀₂} {b₂₂ : B a₂₂}
(q₁₀ : pathover B b₀₀ p₁₀ b₂₀) (q₁₂ : pathover B b₀₂ p₁₂ b₂₂)
(q₀₁ : pathover B b₀₀ p₀₁ b₀₂) (q₂₁ : pathover B b₂₀ p₂₁ b₂₂),
Type :=
idsquareo : squareover B ids idpo idpo idpo idpo
variables {A A' : Type} {B : A → Type}
{a a' a'' a₀₀ a₂₀ a₄₀ a₀₂ a₂₂ a₂₄ a₀₄ a₄₂ a₄₄ : A}
/-a₀₀-/ {p₁₀ : a₀₀ = a₂₀} /-a₂₀-/ {p₃₀ : a₂₀ = a₄₀} /-a₄₀-/
{p₀₁ : a₀₀ = a₀₂} /-s₁₁-/ {p₂₁ : a₂₀ = a₂₂} /-s₃₁-/ {p₄₁ : a₄₀ = a₄₂}
/-a₀₂-/ {p₁₂ : a₀₂ = a₂₂} /-a₂₂-/ {p₃₂ : a₂₂ = a₄₂} /-a₄₂-/
{p₀₃ : a₀₂ = a₀₄} /-s₁₃-/ {p₂₃ : a₂₂ = a₂₄} /-s₃₃-/ {p₄₃ : a₄₂ = a₄₄}
/-a₀₄-/ {p₁₄ : a₀₄ = a₂₄} /-a₂₄-/ {p₃₄ : a₂₄ = a₄₄} /-a₄₄-/
{s₁₁ : square p₁₀ p₁₂ p₀₁ p₂₁}
{b₀₀ : B a₀₀} {b₂₀ : B a₂₀} {b₄₀ : B a₄₀}
{b₀₂ : B a₀₂} {b₂₂ : B a₂₂} {b₄₂ : B a₄₂}
{b₀₄ : B a₀₄} {b₂₄ : B a₂₄} {b₄₄ : B a₄₄}
/-b₀₀-/ {q₁₀ : b₀₀ =[p₁₀] b₂₀} /-b₂₀-/ {q₃₀ : b₂₀ =[p₃₀] b₄₀} /-b₄₀-/
{q₀₁ : b₀₀ =[p₀₁] b₀₂} /-t₁₁-/ {q₂₁ : b₂₀ =[p₂₁] b₂₂} /-t₃₁-/ {q₄₁ : b₄₀ =[p₄₁] b₄₂}
/-b₀₂-/ {q₁₂ : b₀₂ =[p₁₂] b₂₂} /-b₂₂-/ {q₃₂ : b₂₂ =[p₃₂] b₄₂} /-b₄₂-/
{q₀₃ : b₀₂ =[p₀₃] b₀₄} /-t₁₃-/ {q₂₃ : b₂₂ =[p₂₃] b₂₄} /-t₃₃-/ {q₄₃ : b₄₂ =[p₄₃] b₄₄}
/-b₀₄-/ {q₁₄ : b₀₄ =[p₁₄] b₂₄} /-b₂₄-/ {q₃₄ : b₂₄ =[p₃₄] b₄₄} /-b₄₄-/
definition squareo := @squareover A B a₀₀
definition idsquareo [reducible] [constructor] (b₀₀ : B a₀₀) := @squareover.idsquareo A B a₀₀ b₀₀
definition idso [reducible] [constructor] := @squareover.idsquareo A B a₀₀ b₀₀
definition apds (f : Πa, B a) (s : square p₁₀ p₁₂ p₀₁ p₂₁)
: squareover B s (apdo f p₁₀) (apdo f p₁₂) (apdo f p₀₁) (apdo f p₂₁) :=
square.rec_on s idso
definition vrflo : squareover B vrfl q₁₀ q₁₀ idpo idpo :=
by induction q₁₀; exact idso
definition hrflo : squareover B hrfl idpo idpo q₁₀ q₁₀ :=
by induction q₁₀; exact idso
definition vdeg_squareover {q₁₀' : b₀₀ =[p₁₀] b₂₀} (r : q₁₀ = q₁₀')
: squareover B vrfl q₁₀ q₁₀' idpo idpo :=
by induction r;exact vrflo
definition hdeg_squareover {q₁₀' : b₀₀ =[p₁₀] b₂₀} (r : q₁₀ = q₁₀')
: squareover B hrfl idpo idpo q₁₀ q₁₀' :=
by induction r; exact hrflo
definition pathover_of_squareover (t₁₁ : squareover B s₁₁ q₁₀ q₁₂ q₀₁ q₂₁)
: q₁₀ ⬝o q₂₁ =[eq_of_square s₁₁] q₀₁ ⬝o q₁₂ :=
by induction t₁₁; constructor
definition squareover_of_pathover {s : p₁₀ ⬝ p₂₁ = p₀₁ ⬝ p₁₂}
(r : q₁₀ ⬝o q₂₁ =[s] q₀₁ ⬝o q₁₂) : squareover B (square_of_eq s) q₁₀ q₁₂ q₀₁ q₂₁ :=
by induction q₁₂; esimp [concato] at r;induction r;induction q₂₁;induction q₁₀;constructor
definition pathover_top_of_squareover (t₁₁ : squareover B s₁₁ q₁₀ q₁₂ q₀₁ q₂₁)
: q₁₀ =[eq_top_of_square s₁₁] q₀₁ ⬝o q₁₂ ⬝o q₂₁⁻¹ᵒ :=
by induction t₁₁; constructor
definition squareover_of_pathover_top {s : p₁₀ = p₀₁ ⬝ p₁₂ ⬝ p₂₁⁻¹}
(r : q₁₀ =[s] q₀₁ ⬝o q₁₂ ⬝o q₂₁⁻¹ᵒ)
: squareover B (square_of_eq_top s) q₁₀ q₁₂ q₀₁ q₂₁ :=
by induction q₂₁; induction q₁₂; esimp at r;induction r;induction q₁₀;constructor
definition squareover_equiv_pathover (q₁₀ : b₀₀ =[p₁₀] b₂₀) (q₁₂ : b₀₂ =[p₁₂] b₂₂)
(q₀₁ : b₀₀ =[p₀₁] b₀₂) (q₂₁ : b₂₀ =[p₂₁] b₂₂)
: squareover B s₁₁ q₁₀ q₁₂ q₀₁ q₂₁ ≃ q₁₀ ⬝o q₂₁ =[eq_of_square s₁₁] q₀₁ ⬝o q₁₂ :=
begin
fapply equiv.MK,
{ exact pathover_of_squareover},
{ intro r, rewrite [-to_left_inv !square_equiv_eq s₁₁], apply squareover_of_pathover, exact r},
{ intro r, apply sorry}, --need characterization of squareover lying over ids.
{ intro s, induction s, apply idp},
end
definition eq_of_vdeg_squareover {q₁₀' : b₀₀ =[p₁₀] b₂₀}
(p : squareover B vrfl q₁₀ q₁₀' idpo idpo) : q₁₀ = q₁₀' :=
sorry
-- definition vdeg_tr_squareover {q₁₂ : p₀₁ ▸ b₀₀ =[p₁₂] p₂₁ ▸ b₂₀} (r : q₁₀ =[_] q₁₂)
-- : squareover B s₁₁ q₁₀ q₁₂ !pathover_tr !pathover_tr :=
-- by induction p;exact vrflo
/- charcaterization of pathovers in pathovers -/
-- in this version the fibration (B) of the pathover does not depend on the variable a
definition pathover_pathover {a' a₂' : A'} {p : a' = a₂'} {f g : A' → A}
{b : Πa, B (f a)} {b₂ : Πa, B (g a)} {q : Π(a' : A'), f a' = g a'}
(r : pathover B (b a') (q a') (b₂ a'))
(r₂ : pathover B (b a₂') (q a₂') (b₂ a₂'))
(s : squareover B (natural_square_tr q p) r r₂
(pathover_ap B f (apdo b p)) (pathover_ap B g (apdo b₂ p)))
: pathover (λa, pathover B (b a) (q a) (b₂ a)) r p r₂ :=
by induction p;esimp at s; apply pathover_idp_of_eq; apply eq_of_vdeg_squareover; exact s
-- TODO: remove
definition pathover_pathover_fl {a' a₂' : A'} {p : a' = a₂'} {a₂ : A} {f : A' → A}
{b : Πa, B (f a)} {b₂ : B a₂} {q : Π(a' : A'), f a' = a₂} (r : pathover B (b a') (q a') b₂)
(r₂ : pathover B (b a₂') (q a₂') b₂)
(s : squareover B (natural_square_tr q p) r r₂
(pathover_ap B f (apdo b p)) (change_path !ap_constant⁻¹ idpo))
: r =[p] r₂ :=
by induction p; esimp at s; apply pathover_idp_of_eq; apply eq_of_vdeg_squareover; exact s
end eq
|
bf56d415432ef473c39cfda7918f108728e6e111
|
618003631150032a5676f229d13a079ac875ff77
|
/src/topology/maps.lean
|
f8c14e1b9ed611ce8c8b87ea67fbf29e2ab107d1
|
[
"Apache-2.0"
] |
permissive
|
awainverse/mathlib
|
939b68c8486df66cfda64d327ad3d9165248c777
|
ea76bd8f3ca0a8bf0a166a06a475b10663dec44a
|
refs/heads/master
| 1,659,592,962,036
| 1,590,987,592,000
| 1,590,987,592,000
| 268,436,019
| 1
| 0
|
Apache-2.0
| 1,590,990,500,000
| 1,590,990,500,000
| null |
UTF-8
|
Lean
| false
| false
| 15,684
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Patrick Massot
-/
import topology.order
/-!
# Specific classes of maps between topological spaces
This file introduces the following properties of a map `f : X → Y` between topological spaces:
* `is_open_map f` means the image of an open set under `f` is open.
* `is_closed_map f` means the image of a closed set under `f` is closed.
(Open and closed maps need not be continuous.)
* `inducing f` means the topology on `X` is the one induced via `f` from the topology on `Y`.
These behave like embeddings except they need not be injective. Instead, points of `X` which
are identified by `f` are also indistinguishable in the topology on `X`.
* `embedding f` means `f` is inducing and also injective. Equivalently, `f` identifies `X` with
a subspace of `Y`.
* `open_embedding f` means `f` is an embedding with open image, so it identifies `X` with an
open subspace of `Y`. Equivalently, `f` is an embedding and an open map.
* `closed_embedding f` similarly means `f` is an embedding with closed image, so it identifies
`X` with a closed subspace of `Y`. Equivalently, `f` is an embedding and a closed map.
* `quotient_map f` is the dual condition to `embedding f`: `f` is surjective and the topology
on `Y` is the one coinduced via `f` from the topology on `X`. Equivalently, `f` identifies
`Y` with a quotient of `X`. Quotient maps are also sometimes known as identification maps.
## References
* <https://en.wikipedia.org/wiki/Open_and_closed_maps>
* <https://en.wikipedia.org/wiki/Embedding#General_topology>
* <https://en.wikipedia.org/wiki/Quotient_space_(topology)#Quotient_map>
## Tags
open map, closed map, embedding, quotient map, identification map
-/
open set filter
open_locale topological_space
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
section inducing
structure inducing [tα : topological_space α] [tβ : topological_space β] (f : α → β) : Prop :=
(induced : tα = tβ.induced f)
variables [topological_space α] [topological_space β] [topological_space γ] [topological_space δ]
lemma inducing_id : inducing (@id α) :=
⟨induced_id.symm⟩
protected lemma inducing.comp {g : β → γ} {f : α → β} (hg : inducing g) (hf : inducing f) :
inducing (g ∘ f) :=
⟨by rw [hf.induced, hg.induced, induced_compose]⟩
lemma inducing_of_inducing_compose {f : α → β} {g : β → γ} (hf : continuous f) (hg : continuous g)
(hgf : inducing (g ∘ f)) : inducing f :=
⟨le_antisymm
(by rwa ← continuous_iff_le_induced)
(by { rw [hgf.induced, ← continuous_iff_le_induced], apply hg.comp continuous_induced_dom })⟩
lemma inducing_open {f : α → β} {s : set α}
(hf : inducing f) (h : is_open (range f)) (hs : is_open s) : is_open (f '' s) :=
let ⟨t, ht, h_eq⟩ := by rw [hf.induced] at hs; exact hs in
have is_open (t ∩ range f), from is_open_inter ht h,
h_eq ▸ by rwa [image_preimage_eq_inter_range]
lemma inducing_is_closed {f : α → β} {s : set α}
(hf : inducing f) (h : is_closed (range f)) (hs : is_closed s) : is_closed (f '' s) :=
let ⟨t, ht, h_eq⟩ := by rw [hf.induced, is_closed_induced_iff] at hs; exact hs in
have is_closed (t ∩ range f), from is_closed_inter ht h,
h_eq.symm ▸ by rwa [image_preimage_eq_inter_range]
lemma inducing.nhds_eq_comap {f : α → β} (hf : inducing f) :
∀ (a : α), 𝓝 a = comap f (𝓝 $ f a) :=
(induced_iff_nhds_eq f).1 hf.induced
lemma inducing.map_nhds_eq {f : α → β} (hf : inducing f) (a : α) (h : range f ∈ 𝓝 (f a)) :
(𝓝 a).map f = 𝓝 (f a) :=
hf.induced.symm ▸ map_nhds_induced_eq h
lemma inducing.tendsto_nhds_iff {ι : Type*}
{f : ι → β} {g : β → γ} {a : filter ι} {b : β} (hg : inducing g) :
tendsto f a (𝓝 b) ↔ tendsto (g ∘ f) a (𝓝 (g b)) :=
by rw [tendsto, tendsto, hg.induced, nhds_induced, ← map_le_iff_le_comap, filter.map_map]
lemma inducing.continuous_iff {f : α → β} {g : β → γ} (hg : inducing g) :
continuous f ↔ continuous (g ∘ f) :=
by simp [continuous_iff_continuous_at, continuous_at, inducing.tendsto_nhds_iff hg]
lemma inducing.continuous {f : α → β} (hf : inducing f) : continuous f :=
hf.continuous_iff.mp continuous_id
end inducing
section embedding
/-- A function between topological spaces is an embedding if it is injective,
and for all `s : set α`, `s` is open iff it is the preimage of an open set. -/
structure embedding [tα : topological_space α] [tβ : topological_space β] (f : α → β)
extends inducing f : Prop :=
(inj : function.injective f)
variables [topological_space α] [topological_space β] [topological_space γ]
lemma embedding.mk' (f : α → β) (inj : function.injective f)
(induced : ∀a, comap f (𝓝 (f a)) = 𝓝 a) : embedding f :=
⟨⟨(induced_iff_nhds_eq f).2 (λ a, (induced a).symm)⟩, inj⟩
lemma embedding_id : embedding (@id α) :=
⟨inducing_id, assume a₁ a₂ h, h⟩
lemma embedding.comp {g : β → γ} {f : α → β} (hg : embedding g) (hf : embedding f) :
embedding (g ∘ f) :=
{ inj:= assume a₁ a₂ h, hf.inj $ hg.inj h,
..hg.to_inducing.comp hf.to_inducing }
lemma embedding_of_embedding_compose {f : α → β} {g : β → γ} (hf : continuous f) (hg : continuous g)
(hgf : embedding (g ∘ f)) : embedding f :=
{ induced := (inducing_of_inducing_compose hf hg hgf.to_inducing).induced,
inj := assume a₁ a₂ h, hgf.inj $ by simp [h, (∘)] }
lemma embedding_open {f : α → β} {s : set α}
(hf : embedding f) (h : is_open (range f)) (hs : is_open s) : is_open (f '' s) :=
inducing_open hf.1 h hs
lemma embedding_is_closed {f : α → β} {s : set α}
(hf : embedding f) (h : is_closed (range f)) (hs : is_closed s) : is_closed (f '' s) :=
inducing_is_closed hf.1 h hs
lemma embedding.map_nhds_eq {f : α → β}
(hf : embedding f) (a : α) (h : range f ∈ 𝓝 (f a)) : (𝓝 a).map f = 𝓝 (f a) :=
inducing.map_nhds_eq hf.1 a h
lemma embedding.tendsto_nhds_iff {ι : Type*}
{f : ι → β} {g : β → γ} {a : filter ι} {b : β} (hg : embedding g) :
tendsto f a (𝓝 b) ↔ tendsto (g ∘ f) a (𝓝 (g b)) :=
by rw [tendsto, tendsto, hg.induced, nhds_induced, ← map_le_iff_le_comap, filter.map_map]
lemma embedding.continuous_iff {f : α → β} {g : β → γ} (hg : embedding g) :
continuous f ↔ continuous (g ∘ f) :=
inducing.continuous_iff hg.1
lemma embedding.continuous {f : α → β} (hf : embedding f) : continuous f :=
inducing.continuous hf.1
lemma embedding.closure_eq_preimage_closure_image {e : α → β} (he : embedding e) (s : set α) :
closure s = e ⁻¹' closure (e '' s) :=
by { ext x, rw [set.mem_preimage, ← closure_induced he.inj, he.induced] }
end embedding
/-- A function between topological spaces is a quotient map if it is surjective,
and for all `s : set β`, `s` is open iff its preimage is an open set. -/
def quotient_map {α : Type*} {β : Type*} [tα : topological_space α] [tβ : topological_space β]
(f : α → β) : Prop :=
function.surjective f ∧ tβ = tα.coinduced f
namespace quotient_map
variables [topological_space α] [topological_space β] [topological_space γ] [topological_space δ]
protected lemma id : quotient_map (@id α) :=
⟨assume a, ⟨a, rfl⟩, coinduced_id.symm⟩
protected lemma comp {g : β → γ} {f : α → β} (hg : quotient_map g) (hf : quotient_map f) :
quotient_map (g ∘ f) :=
⟨hg.left.comp hf.left, by rw [hg.right, hf.right, coinduced_compose]⟩
protected lemma of_quotient_map_compose {f : α → β} {g : β → γ}
(hf : continuous f) (hg : continuous g)
(hgf : quotient_map (g ∘ f)) : quotient_map g :=
⟨assume b, let ⟨a, h⟩ := hgf.left b in ⟨f a, h⟩,
le_antisymm
(by rw [hgf.right, ← continuous_iff_coinduced_le];
apply continuous_coinduced_rng.comp hf)
(by rwa ← continuous_iff_coinduced_le)⟩
protected lemma continuous_iff {f : α → β} {g : β → γ} (hf : quotient_map f) :
continuous g ↔ continuous (g ∘ f) :=
by rw [continuous_iff_coinduced_le, continuous_iff_coinduced_le, hf.right, coinduced_compose]
protected lemma continuous {f : α → β} (hf : quotient_map f) : continuous f :=
hf.continuous_iff.mp continuous_id
end quotient_map
/-- A map `f : α → β` is said to be an *open map*, if the image of any open `U : set α`
is open in `β`. -/
def is_open_map [topological_space α] [topological_space β] (f : α → β) :=
∀ U : set α, is_open U → is_open (f '' U)
namespace is_open_map
variables [topological_space α] [topological_space β] [topological_space γ]
open function
protected lemma id : is_open_map (@id α) := assume s hs, by rwa [image_id]
protected lemma comp
{g : β → γ} {f : α → β} (hg : is_open_map g) (hf : is_open_map f) : is_open_map (g ∘ f) :=
by intros s hs; rw [image_comp]; exact hg _ (hf _ hs)
lemma is_open_range {f : α → β} (hf : is_open_map f) : is_open (range f) :=
by { rw ← image_univ, exact hf _ is_open_univ }
lemma image_mem_nhds {f : α → β} (hf : is_open_map f) {x : α} {s : set α} (hx : s ∈ 𝓝 x) :
f '' s ∈ 𝓝 (f x) :=
let ⟨t, hts, ht, hxt⟩ := mem_nhds_sets_iff.1 hx in
mem_sets_of_superset (mem_nhds_sets (hf t ht) (mem_image_of_mem _ hxt)) (image_subset _ hts)
lemma nhds_le {f : α → β} (hf : is_open_map f) (a : α) : 𝓝 (f a) ≤ (𝓝 a).map f :=
le_map $ λ s, hf.image_mem_nhds
lemma of_inverse {f : α → β} {f' : β → α}
(h : continuous f') (l_inv : left_inverse f f') (r_inv : right_inverse f f') :
is_open_map f :=
assume s hs,
have f' ⁻¹' s = f '' s, by ext x; simp [mem_image_iff_of_inverse r_inv l_inv],
this ▸ h s hs
lemma to_quotient_map {f : α → β}
(open_map : is_open_map f) (cont : continuous f) (surj : function.surjective f) :
quotient_map f :=
⟨ surj,
begin
ext s,
show is_open s ↔ is_open (f ⁻¹' s),
split,
{ exact cont s },
{ assume h,
rw ← @image_preimage_eq _ _ _ s surj,
exact open_map _ h }
end⟩
end is_open_map
lemma is_open_map_iff_nhds_le [topological_space α] [topological_space β] {f : α → β} :
is_open_map f ↔ ∀(a:α), 𝓝 (f a) ≤ (𝓝 a).map f :=
begin
refine ⟨λ hf, hf.nhds_le, λ h s hs, is_open_iff_mem_nhds.2 _⟩,
rintros b ⟨a, ha, rfl⟩,
exact h _ (filter.image_mem_map $ mem_nhds_sets hs ha)
end
section is_closed_map
variables [topological_space α] [topological_space β]
def is_closed_map (f : α → β) := ∀ U : set α, is_closed U → is_closed (f '' U)
end is_closed_map
namespace is_closed_map
variables [topological_space α] [topological_space β] [topological_space γ]
open function
protected lemma id : is_closed_map (@id α) := assume s hs, by rwa image_id
protected lemma comp {g : β → γ} {f : α → β} (hg : is_closed_map g) (hf : is_closed_map f) :
is_closed_map (g ∘ f) :=
by { intros s hs, rw image_comp, exact hg _ (hf _ hs) }
lemma of_inverse {f : α → β} {f' : β → α}
(h : continuous f') (l_inv : left_inverse f f') (r_inv : right_inverse f f') :
is_closed_map f :=
assume s hs,
have f' ⁻¹' s = f '' s, by ext x; simp [mem_image_iff_of_inverse r_inv l_inv],
this ▸ continuous_iff_is_closed.mp h s hs
end is_closed_map
section open_embedding
variables [topological_space α] [topological_space β] [topological_space γ]
/-- An open embedding is an embedding with open image. -/
structure open_embedding (f : α → β) extends embedding f : Prop :=
(open_range : is_open $ range f)
lemma open_embedding.open_iff_image_open {f : α → β} (hf : open_embedding f)
{s : set α} : is_open s ↔ is_open (f '' s) :=
⟨embedding_open hf.to_embedding hf.open_range,
λ h, begin
convert ←hf.to_embedding.continuous _ h,
apply preimage_image_eq _ hf.inj
end⟩
lemma open_embedding.is_open_map {f : α → β} (hf : open_embedding f) : is_open_map f :=
λ s, hf.open_iff_image_open.mp
lemma open_embedding.continuous {f : α → β} (hf : open_embedding f) : continuous f :=
hf.to_embedding.continuous
lemma open_embedding.open_iff_preimage_open {f : α → β} (hf : open_embedding f)
{s : set β} (hs : s ⊆ range f) : is_open s ↔ is_open (f ⁻¹' s) :=
begin
convert ←hf.open_iff_image_open.symm,
rwa [image_preimage_eq_inter_range, inter_eq_self_of_subset_left]
end
lemma open_embedding_of_embedding_open {f : α → β} (h₁ : embedding f)
(h₂ : is_open_map f) : open_embedding f :=
⟨h₁, by convert h₂ univ is_open_univ; simp⟩
lemma open_embedding_of_continuous_injective_open {f : α → β} (h₁ : continuous f)
(h₂ : function.injective f) (h₃ : is_open_map f) : open_embedding f :=
begin
refine open_embedding_of_embedding_open ⟨⟨_⟩, h₂⟩ h₃,
apply le_antisymm (continuous_iff_le_induced.mp h₁) _,
intro s,
change is_open _ ≤ is_open _,
rw is_open_induced_iff,
refine λ hs, ⟨f '' s, h₃ s hs, _⟩,
rw preimage_image_eq _ h₂
end
lemma open_embedding_id : open_embedding (@id α) :=
⟨embedding_id, by convert is_open_univ; apply range_id⟩
lemma open_embedding.comp {g : β → γ} {f : α → β}
(hg : open_embedding g) (hf : open_embedding f) : open_embedding (g ∘ f) :=
⟨hg.1.comp hf.1, show is_open (range (g ∘ f)),
by rw [range_comp, ←hg.open_iff_image_open]; exact hf.2⟩
end open_embedding
section closed_embedding
variables [topological_space α] [topological_space β] [topological_space γ]
/-- A closed embedding is an embedding with closed image. -/
structure closed_embedding (f : α → β) extends embedding f : Prop :=
(closed_range : is_closed $ range f)
variables {f : α → β}
lemma closed_embedding.continuous (hf : closed_embedding f) : continuous f :=
hf.to_embedding.continuous
lemma closed_embedding.closed_iff_image_closed (hf : closed_embedding f)
{s : set α} : is_closed s ↔ is_closed (f '' s) :=
⟨embedding_is_closed hf.to_embedding hf.closed_range,
λ h, begin
convert ←continuous_iff_is_closed.mp hf.continuous _ h,
apply preimage_image_eq _ hf.inj
end⟩
lemma closed_embedding.is_closed_map (hf : closed_embedding f) : is_closed_map f :=
λ s, hf.closed_iff_image_closed.mp
lemma closed_embedding.closed_iff_preimage_closed (hf : closed_embedding f)
{s : set β} (hs : s ⊆ range f) : is_closed s ↔ is_closed (f ⁻¹' s) :=
begin
convert ←hf.closed_iff_image_closed.symm,
rwa [image_preimage_eq_inter_range, inter_eq_self_of_subset_left]
end
lemma closed_embedding_of_embedding_closed (h₁ : embedding f)
(h₂ : is_closed_map f) : closed_embedding f :=
⟨h₁, by convert h₂ univ is_closed_univ; simp⟩
lemma closed_embedding_of_continuous_injective_closed (h₁ : continuous f)
(h₂ : function.injective f) (h₃ : is_closed_map f) : closed_embedding f :=
begin
refine closed_embedding_of_embedding_closed ⟨⟨_⟩, h₂⟩ h₃,
apply le_antisymm (continuous_iff_le_induced.mp h₁) _,
intro s',
change is_open _ ≤ is_open _,
rw [←is_closed_compl_iff, ←is_closed_compl_iff],
generalize : -s' = s,
rw is_closed_induced_iff,
refine λ hs, ⟨f '' s, h₃ s hs, _⟩,
rw preimage_image_eq _ h₂
end
lemma closed_embedding_id : closed_embedding (@id α) :=
⟨embedding_id, by convert is_closed_univ; apply range_id⟩
lemma closed_embedding.comp {g : β → γ} {f : α → β}
(hg : closed_embedding g) (hf : closed_embedding f) : closed_embedding (g ∘ f) :=
⟨hg.to_embedding.comp hf.to_embedding, show is_closed (range (g ∘ f)),
by rw [range_comp, ←hg.closed_iff_image_closed]; exact hf.closed_range⟩
end closed_embedding
|
f0607f7e97010132a234d9e9589270cc9c8f8a28
|
1e3a43e8ba59c6fe1c66775b6e833e721eaf1675
|
/src/field_theory/subfield.lean
|
2252c7594c106ea354accbf5c8914319292ffaf4
|
[
"Apache-2.0"
] |
permissive
|
Sterrs/mathlib
|
ea6910847b8dfd18500486de9ab0ee35704a3f52
|
d9327e433804004aa1dc65091bbe0de1e5a08c5e
|
refs/heads/master
| 1,650,769,884,257
| 1,587,808,694,000
| 1,587,808,694,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 4,448
|
lean
|
/-
Copyright (c) 2018 Andreas Swerdlow. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andreas Swerdlow
-/
import ring_theory.subring
variables {F : Type*} [field F] (S : set F)
section prio
set_option default_priority 100 -- see Note [default priority]
class is_subfield extends is_subring S : Prop :=
(inv_mem : ∀ {x : F}, x ∈ S → x⁻¹ ∈ S)
end prio
instance is_subfield.field [is_subfield S] : field S :=
{ inv := λ x, ⟨x⁻¹, is_subfield.inv_mem x.2⟩,
zero_ne_one := λ h : 0 = 1, (@zero_ne_one F _) (subtype.ext.1 h),
mul_inv_cancel := λ a ha, subtype.ext.2 (mul_inv_cancel
(λ h, ha $ subtype.ext.2 h)),
inv_zero := subtype.ext.2 inv_zero,
..show comm_ring S, by apply_instance }
instance univ.is_subfield : is_subfield (@set.univ F) :=
{ inv_mem := by intros; trivial }
/- note: in the next two declarations, if we let type-class inference figure out the instance
`is_ring_hom.is_subring_preimage` then that instance only applies when particular instances of
`is_add_subgroup _` and `is_submonoid _` are chosen (which are not the default ones).
If we specify it explicitly, then it doesn't complain. -/
instance preimage.is_subfield {K : Type*} [field K]
(f : F →+* K) (s : set K) [is_subfield s] : is_subfield (f ⁻¹' s) :=
{ inv_mem := λ a (ha : f a ∈ s), show f a⁻¹ ∈ s,
by { rw [f.map_inv],
exact is_subfield.inv_mem ha },
..f.is_subring_preimage s }
instance image.is_subfield {K : Type*} [field K]
(f : F →+* K) (s : set F) [is_subfield s] : is_subfield (f '' s) :=
{ inv_mem := λ a ⟨x, xmem, ha⟩, ⟨x⁻¹, is_subfield.inv_mem xmem, ha ▸ f.map_inv⟩,
..f.is_subring_image s }
instance range.is_subfield {K : Type*} [field K]
(f : F →+* K) : is_subfield (set.range f) :=
by { rw ← set.image_univ, apply_instance }
namespace field
def closure : set F :=
{ x | ∃ y ∈ ring.closure S, ∃ z ∈ ring.closure S, y / z = x }
variables {S}
theorem ring_closure_subset : ring.closure S ⊆ closure S :=
λ x hx, ⟨x, hx, 1, is_submonoid.one_mem, div_one x⟩
instance closure.is_submonoid : is_submonoid (closure S) :=
{ mul_mem := by rintros _ _ ⟨p, hp, q, hq, hq0, rfl⟩ ⟨r, hr, s, hs, hs0, rfl⟩;
exact ⟨p * r,
is_submonoid.mul_mem hp hr,
q * s,
is_submonoid.mul_mem hq hs,
(div_mul_div _ _ _ _).symm⟩,
one_mem := ring_closure_subset $ is_submonoid.one_mem }
instance closure.is_subfield : is_subfield (closure S) :=
have h0 : (0:F) ∈ closure S, from ring_closure_subset $ is_add_submonoid.zero_mem,
{ add_mem := begin
intros a b ha hb,
rcases (id ha) with ⟨p, hp, q, hq, rfl⟩,
rcases (id hb) with ⟨r, hr, s, hs, rfl⟩,
classical, by_cases hq0 : q = 0, by simp [hb, hq0], by_cases hs0 : s = 0, by simp [ha, hs0],
exact ⟨p * s + q * r, is_add_submonoid.add_mem (is_submonoid.mul_mem hp hs)
(is_submonoid.mul_mem hq hr), q * s, is_submonoid.mul_mem hq hs,
(div_add_div p r hq0 hs0).symm⟩
end,
zero_mem := h0,
neg_mem := begin
rintros _ ⟨p, hp, q, hq, rfl⟩,
exact ⟨-p, is_add_subgroup.neg_mem hp, q, hq, neg_div q p⟩
end,
inv_mem := begin
rintros _ ⟨p, hp, q, hq, rfl⟩,
classical, by_cases hp0 : p = 0, by simp [hp0, h0],
exact ⟨q, hq, p, hp, inv_div.symm⟩
end }
theorem mem_closure {a : F} (ha : a ∈ S) : a ∈ closure S :=
ring_closure_subset $ ring.mem_closure ha
theorem subset_closure : S ⊆ closure S :=
λ _, mem_closure
theorem closure_subset {T : set F} [is_subfield T] (H : S ⊆ T) : closure S ⊆ T :=
by rintros _ ⟨p, hp, q, hq, hq0, rfl⟩; exact is_submonoid.mul_mem (ring.closure_subset H hp)
(is_subfield.inv_mem $ ring.closure_subset H hq)
theorem closure_subset_iff (s t : set F) [is_subfield t] : closure s ⊆ t ↔ s ⊆ t :=
⟨set.subset.trans subset_closure, closure_subset⟩
theorem closure_mono {s t : set F} (H : s ⊆ t) : closure s ⊆ closure t :=
closure_subset $ set.subset.trans H subset_closure
end field
lemma is_subfield_Union_of_directed {ι : Type*} [hι : nonempty ι]
(s : ι → set F) [∀ i, is_subfield (s i)]
(directed : ∀ i j, ∃ k, s i ⊆ s k ∧ s j ⊆ s k) :
is_subfield (⋃i, s i) :=
{ inv_mem := λ x hx, let ⟨i, hi⟩ := set.mem_Union.1 hx in
set.mem_Union.2 ⟨i, is_subfield.inv_mem hi⟩,
to_is_subring := is_subring_Union_of_directed s directed }
|
4208287249f799298b14a28c2fc43703c0e1dec7
|
94e33a31faa76775069b071adea97e86e218a8ee
|
/src/representation_theory/fdRep.lean
|
bdc8703f7707cb2e3dbf9d2252dcac16c8a11031
|
[
"Apache-2.0"
] |
permissive
|
urkud/mathlib
|
eab80095e1b9f1513bfb7f25b4fa82fa4fd02989
|
6379d39e6b5b279df9715f8011369a301b634e41
|
refs/heads/master
| 1,658,425,342,662
| 1,658,078,703,000
| 1,658,078,703,000
| 186,910,338
| 0
| 0
|
Apache-2.0
| 1,568,512,083,000
| 1,557,958,709,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,530
|
lean
|
/-
Copyright (c) 2022 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import representation_theory.Rep
import algebra.category.FinVect
import representation_theory.basic
/-!
# `fdRep k G` is the category of finite dimensional `k`-linear representations of `G`.
If `V : fdRep k G`, there is a coercion that allows you to treat `V` as a type,
and this type comes equipped with `module k V` and `finite_dimensional k V` instances.
Also `V.ρ` gives the homomorphism `G →* (V →ₗ[k] V)`.
Conversely, given a homomorphism `ρ : G →* (V →ₗ[k] V)`,
you can construct the bundled representation as `Rep.of ρ`.
We verify that `fdRep k G` is a rigid monoidal category.
## TODO
* `fdRep k G` has all finite (co)limits.
* `fdRep k G` is abelian.
* `fdRep k G ≌ FinVect (monoid_algebra k G)` (this will require generalising `FinVect` first).
* Upgrade the right rigid structure to a rigid structure.
-/
universes u
open category_theory
open category_theory.limits
/-- The category of finite dimensional `k`-linear representations of a monoid `G`. -/
@[derive [large_category, concrete_category/-, has_limits, has_colimits-/]]
abbreviation fdRep (k G : Type u) [field k] [monoid G] :=
Action (FinVect.{u} k) (Mon.of G)
namespace fdRep
variables {k G : Type u} [field k] [monoid G]
instance : has_coe_to_sort (fdRep k G) (Type u) := concrete_category.has_coe_to_sort _
instance (V : fdRep k G) : add_comm_group V :=
by { change add_comm_group ((forget₂ (fdRep k G) (FinVect k)).obj V), apply_instance, }
instance (V : fdRep k G) : module k V :=
by { change module k ((forget₂ (fdRep k G) (FinVect k)).obj V), apply_instance, }
instance (V : fdRep k G) : finite_dimensional k V :=
by { change finite_dimensional k ((forget₂ (fdRep k G) (FinVect k)).obj V), apply_instance, }
/-- The monoid homomorphism corresponding to the action of `G` onto `V : fdRep k G`. -/
def ρ (V : fdRep k G) : G →* (V →ₗ[k] V) := V.ρ
/-- The underlying `linear_equiv` of an isomorphism of representations. -/
def iso_to_linear_equiv {V W : fdRep k G} (i : V ≅ W) : V ≃ₗ[k] W :=
FinVect.iso_to_linear_equiv ((Action.forget (FinVect k) (Mon.of G)).map_iso i)
lemma iso.conj_ρ {V W : fdRep k G} (i : V ≅ W) (g : G) :
W.ρ g = (fdRep.iso_to_linear_equiv i).conj (V.ρ g) :=
begin
rw [fdRep.iso_to_linear_equiv, ←FinVect.iso.conj_eq_conj, iso.conj_apply],
rw [iso.eq_inv_comp ((Action.forget (FinVect k) (Mon.of G)).map_iso i)],
exact (i.hom.comm g).symm,
end
-- This works well with the new design for representations:
example (V : fdRep k G) : G →* (V →ₗ[k] V) := V.ρ
/-- Lift an unbundled representation to `fdRep`. -/
@[simps ρ]
def of {V : Type u} [add_comm_group V] [module k V] [finite_dimensional k V]
(ρ : representation k G V) : fdRep k G :=
⟨FinVect.of k V, ρ⟩
instance : has_forget₂ (fdRep k G) (Rep k G) :=
{ forget₂ := (forget₂ (FinVect k) (Module k)).map_Action (Mon.of G), }
-- Verify that the monoidal structure is available.
example : monoidal_category (fdRep k G) := by apply_instance
end fdRep
namespace fdRep
variables {k G : Type u} [field k] [group G]
-- Verify that the rigid structure is available when the monoid is a group.
noncomputable instance : right_rigid_category (fdRep k G) :=
by { change right_rigid_category (Action (FinVect k) (Group.of G)), apply_instance, }
end fdRep
namespace fdRep
open representation
variables {k G V W : Type u} [field k] [group G]
variables [add_comm_group V] [module k V] [add_comm_group W] [module k W]
variables [finite_dimensional k V] [finite_dimensional k W]
variables (ρV : representation k G V) (ρW : representation k G W)
/-- Auxiliary definition for `fdRep.dual_tensor_iso_lin_hom`. -/
noncomputable def dual_tensor_iso_lin_hom_aux :
((fdRep.of ρV.dual) ⊗ (fdRep.of ρW)).V ≅ (fdRep.of (lin_hom ρV ρW)).V :=
(dual_tensor_hom_equiv k V W).to_FinVect_iso
/-- When `V` and `W` are finite dimensional representations of a group `G`, the isomorphism
`dual_tensor_hom_equiv k V W` of vector spaces induces an isomorphism of representations. -/
noncomputable def dual_tensor_iso_lin_hom :
(fdRep.of ρV.dual) ⊗ (fdRep.of ρW) ≅ fdRep.of (lin_hom ρV ρW) :=
begin
apply Action.mk_iso (dual_tensor_iso_lin_hom_aux ρV ρW),
convert (dual_tensor_hom_comm ρV ρW),
end
@[simp] lemma dual_tensor_iso_lin_hom_hom_hom :
(dual_tensor_iso_lin_hom ρV ρW).hom.hom = dual_tensor_hom k V W := rfl
end fdRep
|
d0780329f4545e778208e145ec76adf550229d2b
|
30b012bb72d640ec30c8fdd4c45fdfa67beb012c
|
/algebra/order_functions.lean
|
57931e8dce85e43bf2be16611631cc36906f5884
|
[
"Apache-2.0"
] |
permissive
|
kckennylau/mathlib
|
21fb810b701b10d6606d9002a4004f7672262e83
|
47b3477e20ffb5a06588dd3abb01fe0fe3205646
|
refs/heads/master
| 1,634,976,409,281
| 1,542,042,832,000
| 1,542,319,733,000
| 109,560,458
| 0
| 0
|
Apache-2.0
| 1,542,369,208,000
| 1,509,867,494,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 7,805
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import algebra.ordered_group order.lattice
open lattice
universes u v
variables {α : Type u} {β : Type v}
attribute [simp] max_eq_left max_eq_right min_eq_left min_eq_right
section
variables [decidable_linear_order α] [decidable_linear_order β] {f : α → β} {a b c d : α}
-- translate from lattices to linear orders (sup → max, inf → min)
lemma le_min_iff : c ≤ min a b ↔ c ≤ a ∧ c ≤ b := le_inf_iff
lemma max_le_iff : max a b ≤ c ↔ a ≤ c ∧ b ≤ c := sup_le_iff
lemma max_le_max : a ≤ c → b ≤ d → max a b ≤ max c d := sup_le_sup
lemma min_le_min : a ≤ c → b ≤ d → min a b ≤ min c d := inf_le_inf
lemma le_max_left_of_le : a ≤ b → a ≤ max b c := le_sup_left_of_le
lemma le_max_right_of_le : a ≤ c → a ≤ max b c := le_sup_right_of_le
lemma min_le_left_of_le : a ≤ c → min a b ≤ c := inf_le_left_of_le
lemma min_le_right_of_le : b ≤ c → min a b ≤ c := inf_le_right_of_le
lemma max_min_distrib_left : max a (min b c) = min (max a b) (max a c) := sup_inf_left
lemma max_min_distrib_right : max (min a b) c = min (max a c) (max b c) := sup_inf_right
lemma min_max_distrib_left : min a (max b c) = max (min a b) (min a c) := inf_sup_left
lemma min_max_distrib_right : min (max a b) c = max (min a c) (min b c) := inf_sup_right
instance max_idem : is_idempotent α max := by apply_instance
instance min_idem : is_idempotent α min := by apply_instance
lemma min_le_iff : min a b ≤ c ↔ a ≤ c ∨ b ≤ c :=
have a ≤ b → (a ≤ c ∨ b ≤ c ↔ a ≤ c),
from assume h, or_iff_left_of_imp $ le_trans h,
have b ≤ a → (a ≤ c ∨ b ≤ c ↔ b ≤ c),
from assume h, or_iff_right_of_imp $ le_trans h,
by cases le_total a b; simp *
lemma le_max_iff : a ≤ max b c ↔ a ≤ b ∨ a ≤ c :=
have b ≤ c → (a ≤ b ∨ a ≤ c ↔ a ≤ c),
from assume h, or_iff_right_of_imp $ assume h', le_trans h' h,
have c ≤ b → (a ≤ b ∨ a ≤ c ↔ a ≤ b),
from assume h, or_iff_left_of_imp $ assume h', le_trans h' h,
by cases le_total b c; simp *
lemma max_lt_iff : max a b < c ↔ (a < c ∧ b < c) :=
by rw [lt_iff_not_ge]; simp [(≥), le_max_iff, not_or_distrib]
lemma lt_min_iff : a < min b c ↔ (a < b ∧ a < c) :=
by rw [lt_iff_not_ge]; simp [(≥), min_le_iff, not_or_distrib]
lemma lt_max_iff : a < max b c ↔ a < b ∨ a < c :=
by rw [lt_iff_not_ge]; simp [(≥), max_le_iff, not_and_distrib]
lemma min_lt_iff : min a b < c ↔ a < c ∨ b < c :=
by rw [lt_iff_not_ge]; simp [(≥), le_min_iff, not_and_distrib]
theorem min_right_comm (a b c : α) : min (min a b) c = min (min a c) b :=
right_comm min min_comm min_assoc a b c
theorem max.left_comm (a b c : α) : max a (max b c) = max b (max a c) :=
left_comm max max_comm max_assoc a b c
theorem max.right_comm (a b c : α) : max (max a b) c = max (max a c) b :=
right_comm max max_comm max_assoc a b c
lemma max_distrib_of_monotone (hf : monotone f) : f (max a b) = max (f a) (f b) :=
by cases le_total a b; simp [h, hf h]
lemma min_distrib_of_monotone (hf : monotone f) : f (min a b) = min (f a) (f b) :=
by cases le_total a b; simp [h, hf h]
theorem min_choice (a b : α) : min a b = a ∨ min a b = b :=
by by_cases h : a ≤ b; simp [min, h]
theorem max_choice (a b : α) : max a b = a ∨ max a b = b :=
by by_cases h : a ≤ b; simp [max, h]
lemma le_of_max_le_left {a b c : α} (h : max a b ≤ c) : a ≤ c :=
le_trans (le_max_left _ _) h
lemma le_of_max_le_right {a b c : α} (h : max a b ≤ c) : b ≤ c :=
le_trans (le_max_right _ _) h
end
lemma min_add {α : Type u} [decidable_linear_ordered_comm_group α] (a b c : α) :
min a b + c = min (a + c) (b + c) :=
if hle : a ≤ b then
have a - c ≤ b - c, from sub_le_sub hle (le_refl _),
by simp * at *
else
have b - c ≤ a - c, from sub_le_sub (le_of_lt (lt_of_not_ge hle)) (le_refl _),
by simp * at *
lemma min_sub {α : Type u} [decidable_linear_ordered_comm_group α] (a b c : α) :
min a b - c = min (a - c) (b - c) :=
by simp [min_add, sub_eq_add_neg]
section decidable_linear_ordered_comm_group
variables [decidable_linear_ordered_comm_group α] {a b c : α}
attribute [simp] abs_zero abs_neg
def abs_add := @abs_add_le_abs_add_abs
theorem abs_le : abs a ≤ b ↔ - b ≤ a ∧ a ≤ b :=
⟨assume h, ⟨neg_le_of_neg_le $ le_trans (neg_le_abs_self _) h, le_trans (le_abs_self _) h⟩,
assume ⟨h₁, h₂⟩, abs_le_of_le_of_neg_le h₂ $ neg_le_of_neg_le h₁⟩
lemma abs_lt : abs a < b ↔ - b < a ∧ a < b :=
⟨assume h, ⟨neg_lt_of_neg_lt $ lt_of_le_of_lt (neg_le_abs_self _) h, lt_of_le_of_lt (le_abs_self _) h⟩,
assume ⟨h₁, h₂⟩, abs_lt_of_lt_of_neg_lt h₂ $ neg_lt_of_neg_lt h₁⟩
lemma abs_sub_le_iff : abs (a - b) ≤ c ↔ a - b ≤ c ∧ b - a ≤ c :=
by rw [abs_le, neg_le_sub_iff_le_add, @sub_le_iff_le_add' _ _ b, and_comm]
lemma abs_sub_lt_iff : abs (a - b) < c ↔ a - b < c ∧ b - a < c :=
by rw [abs_lt, neg_lt_sub_iff_lt_add, @sub_lt_iff_lt_add' _ _ b, and_comm]
def sub_abs_le_abs_sub := @abs_sub_abs_le_abs_sub
lemma abs_abs_sub_le_abs_sub (a b : α) : abs (abs a - abs b) ≤ abs (a - b) :=
abs_sub_le_iff.2 ⟨sub_abs_le_abs_sub _ _, by rw abs_sub; apply sub_abs_le_abs_sub⟩
lemma abs_eq (hb : b ≥ 0) : abs a = b ↔ a = b ∨ a = -b :=
iff.intro
begin
cases le_total a 0 with a_nonpos a_nonneg,
{ rw [abs_of_nonpos a_nonpos, neg_eq_iff_neg_eq, eq_comm], exact or.inr },
{ rw [abs_of_nonneg a_nonneg, eq_comm], exact or.inl }
end
(by intro h; cases h; subst h; try { rw abs_neg }; exact abs_of_nonneg hb)
@[simp] lemma abs_eq_zero : abs a = 0 ↔ a = 0 :=
⟨eq_zero_of_abs_eq_zero, λ e, e.symm ▸ abs_zero⟩
lemma abs_pos_iff {a : α} : 0 < abs a ↔ a ≠ 0 :=
⟨λ h, mt abs_eq_zero.2 (ne_of_gt h), abs_pos_of_ne_zero⟩
lemma abs_le_max_abs_abs (hab : a ≤ b) (hbc : b ≤ c) : abs b ≤ max (abs a) (abs c) :=
abs_le_of_le_of_neg_le
(by simp [le_max_iff, le_trans hbc (le_abs_self c)])
(by simp [le_max_iff, le_trans (neg_le_neg hab) (neg_le_abs_self a)])
lemma min_le_add_of_nonneg_right {a b : α} (hb : b ≥ 0) : min a b ≤ a + b :=
calc
min a b ≤ a : by apply min_le_left
... ≤ a + b : le_add_of_nonneg_right hb
lemma min_le_add_of_nonneg_left {a b : α} (ha : a ≥ 0) : min a b ≤ a + b :=
calc
min a b ≤ b : by apply min_le_right
... ≤ a + b : le_add_of_nonneg_left ha
lemma max_le_add_of_nonneg {a b : α} (ha : a ≥ 0) (hb : b ≥ 0) : max a b ≤ a + b :=
max_le_iff.2 (by split; simpa)
end decidable_linear_ordered_comm_group
section decidable_linear_ordered_comm_ring
variables [decidable_linear_ordered_comm_ring α] {a b c d : α}
@[simp] lemma abs_one : abs (1 : α) = 1 := abs_of_pos zero_lt_one
lemma monotone_mul_of_nonneg (ha : 0 ≤ a) : monotone (λ x, a*x) :=
assume b c b_le_c, mul_le_mul_of_nonneg_left b_le_c ha
lemma mul_max_of_nonneg (b c : α) (ha : 0 ≤ a) : a * max b c = max (a * b) (a * c) :=
max_distrib_of_monotone (monotone_mul_of_nonneg ha)
lemma mul_min_of_nonneg (b c : α) (ha : 0 ≤ a) : a * min b c = min (a * b) (a * c) :=
min_distrib_of_monotone (monotone_mul_of_nonneg ha)
lemma max_mul_mul_le_max_mul_max (b c : α) (ha : 0 ≤ a) (hd: 0 ≤ d) :
max (a * b) (d * c) ≤ max a c * max d b :=
have ba : b * a ≤ max d b * max c a,
from mul_le_mul (le_max_right d b) (le_max_right c a) ha (le_trans hd (le_max_left d b)),
have cd : c * d ≤ max a c * max b d,
from mul_le_mul (le_max_right a c) (le_max_right b d) hd (le_trans ha (le_max_left a c)),
max_le
(by simpa [mul_comm, max_comm] using ba)
(by simpa [mul_comm, max_comm] using cd)
end decidable_linear_ordered_comm_ring
|
89ef49163f8206de7d782728edbaa960c35267ec
|
d1a52c3f208fa42c41df8278c3d280f075eb020c
|
/src/Lean/Meta/CollectMVars.lean
|
3bc337321719319cf0c9cf2c4092c45b10b0906f
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
cipher1024/lean4
|
6e1f98bb58e7a92b28f5364eb38a14c8d0aae393
|
69114d3b50806264ef35b57394391c3e738a9822
|
refs/heads/master
| 1,642,227,983,603
| 1,642,011,696,000
| 1,642,011,696,000
| 228,607,691
| 0
| 0
|
Apache-2.0
| 1,576,584,269,000
| 1,576,584,268,000
| null |
UTF-8
|
Lean
| false
| false
| 1,669
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Util.CollectMVars
import Lean.Meta.Basic
namespace Lean.Meta
/--
Collect unassigned metavariables occuring in the given expression.
Remark: if `e` contains `?m` and there is a `t` assigned to `?m`, we
collect unassigned metavariables occurring in `t`.
Remark: if `e` contains `?m` and `?m` is delayed assigned to some term `t`,
we collect `?m` and unassigned metavariables occurring in `t`.
We collect `?m` because it has not been assigned yet. -/
partial def collectMVars (e : Expr) : StateRefT CollectMVars.State MetaM Unit := do
let e ← instantiateMVars e
let s ← get
let resultSavedSize := s.result.size
let s := e.collectMVars s
set s
for mvarId in s.result[resultSavedSize:] do
match (← getDelayedAssignment? mvarId) with
| none => pure ()
| some d => collectMVars d.val
/-- Return metavariables in occuring the given expression. See `collectMVars` -/
def getMVars (e : Expr) : MetaM (Array MVarId) := do
let (_, s) ← (collectMVars e).run {}
pure s.result
/-- Similar to getMVars, but removes delayed assignments. -/
def getMVarsNoDelayed (e : Expr) : MetaM (Array MVarId) := do
let mvarIds ← getMVars e
mvarIds.filterM fun mvarId => not <$> isDelayedAssigned mvarId
def collectMVarsAtDecl (d : Declaration) : StateRefT CollectMVars.State MetaM Unit :=
d.forExprM collectMVars
def getMVarsAtDecl (d : Declaration) : MetaM (Array MVarId) := do
let (_, s) ← (collectMVarsAtDecl d).run {}
pure s.result
end Lean.Meta
|
c496cd7f2a52530fef95857d638b64f4d1c2e415
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/library/data/tuple.lean
|
61407155099facab93ee53b3e85f85bfb7dd4ffc
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 13,062
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
Tuples are lists of a fixed size.
It is implemented as a subtype.
-/
import logic data.list data.fin
open nat list subtype function
attribute [reducible]
definition tuple (A : Type) (n : nat) := {l : list A | length l = n}
namespace tuple
variables {A B C : Type}
attribute [recursor 4]
theorem induction_on {P : ∀ {n}, tuple A n → Prop}
: ∀ {n} (v : tuple A n), (∀ (l : list A) {n : nat} (h : length l = n), P (tag l h)) → P v
| n (tag l h) H := @H l n h
definition nil : tuple A 0 :=
tag [] rfl
lemma length_succ {n : nat} {l : list A} (a : A) : length l = n → length (a::l) = succ n :=
λ h, congr_arg succ h
definition cons {n : nat} : A → tuple A n → tuple A (succ n)
| a (tag v h) := tag (a::v) (length_succ a h)
notation a :: b := cons a b
attribute [instance]
protected definition is_inhabited [h : inhabited A] : ∀ (n : nat), inhabited (tuple A n)
| 0 := inhabited.mk nil
| (succ n) := inhabited.mk (inhabited.value h :: inhabited.value (is_inhabited n))
attribute [instance]
protected definition has_decidable_eq [h : decidable_eq A] : ∀ (n : nat), decidable_eq (tuple A n) :=
λ n, subtype.has_decidable_eq
definition head {n : nat} : tuple A (succ n) → A
| (tag [] h) := by contradiction
| (tag (a::v) h) := a
definition tail {n : nat} : tuple A (succ n) → tuple A n
| (tag [] h) := by contradiction
| (tag (a::v) h) := tag v (succ.inj h)
theorem head_cons {n : nat} (a : A) (v : tuple A n) : head (a :: v) = a :=
by induction v; reflexivity
theorem tail_cons {n : nat} (a : A) (v : tuple A n) : tail (a :: v) = v :=
by induction v; reflexivity
theorem head_lcons {n : nat} (a : A) (l : list A) (h : length (a::l) = succ n) : head (tag (a::l) h) = a :=
rfl
theorem tail_lcons {n : nat} (a : A) (l : list A) (h : length (a::l) = succ n) : tail (tag (a::l) h) = tag l (succ.inj h) :=
rfl
definition last {n : nat} : tuple A (succ n) → A
| (tag l h) := list.last l (ne_nil_of_length_eq_succ h)
theorem eta : ∀ {n : nat} (v : tuple A (succ n)), head v :: tail v = v
| 0 (tag [] h) := by contradiction
| 0 (tag (a::l) h) := rfl
| (n+1) (tag [] h) := by contradiction
| (n+1) (tag (a::l) h) := rfl
definition of_list (l : list A) : tuple A (list.length l) :=
tag l rfl
definition to_list {n : nat} : tuple A n → list A
| (tag l h) := l
theorem to_list_of_list (l : list A) : to_list (of_list l) = l :=
rfl
theorem to_list_nil : to_list nil = ([] : list A) :=
rfl
theorem length_to_list {n : nat} : ∀ (v : tuple A n), list.length (to_list v) = n
| (tag l h) := h
theorem heq_of_list_eq {n m} : ∀ {v₁ : tuple A n} {v₂ : tuple A m}, to_list v₁ = to_list v₂ → n = m → v₁ == v₂
| (tag l₁ h₁) (tag l₂ h₂) e₁ e₂ := begin
clear heq_of_list_eq,
subst e₂, subst h₁,
unfold to_list at e₁,
subst l₁
end
theorem list_eq_of_heq {n m} {v₁ : tuple A n} {v₂ : tuple A m} : v₁ == v₂ → n = m → to_list v₁ = to_list v₂ :=
begin
intro h₁ h₂, revert v₁ v₂ h₁,
subst n, intro v₁ v₂ h₁, rewrite [eq_of_heq h₁]
end
theorem of_list_to_list {n : nat} (v : tuple A n) : of_list (to_list v) == v :=
begin
apply heq_of_list_eq, rewrite to_list_of_list, rewrite length_to_list
end
/- append -/
definition append {n m : nat} : tuple A n → tuple A m → tuple A (n + m)
| (tag l₁ h₁) (tag l₂ h₂) := tag (list.append l₁ l₂) (by rewrite [length_append, h₁, h₂])
infix ++ := append
open eq.ops
lemma push_eq_rec : ∀ {n m : nat} {l : list A} (h₁ : n = m) (h₂ : length l = n), h₁ ▹ (tag l h₂) = tag l (h₁ ▹ h₂)
| n n l (eq.refl n) h₂ := rfl
theorem append_nil_right {n : nat} (v : tuple A n) : v ++ nil = v :=
induction_on v (λ l n h, by unfold [tuple.append, tuple.nil]; congruence; apply list.append_nil_right)
theorem append_nil_left {n : nat} (v : tuple A n) : !zero_add ▹ (nil ++ v) = v :=
induction_on v (λ l n h, begin unfold [tuple.append, tuple.nil], rewrite [push_eq_rec] end)
theorem append_nil_left_heq {n : nat} (v : tuple A n) : nil ++ v == v :=
heq_of_eq_rec_left !zero_add (append_nil_left v)
theorem append.assoc {n₁ n₂ n₃} : ∀ (v₁ : tuple A n₁) (v₂ : tuple A n₂) (v₃ : tuple A n₃), !add.assoc ▹ ((v₁ ++ v₂) ++ v₃) = v₁ ++ (v₂ ++ v₃)
| (tag l₁ h₁) (tag l₂ h₂) (tag l₃ h₃) := begin
unfold tuple.append, rewrite push_eq_rec,
congruence,
apply list.append.assoc
end
theorem append.assoc_heq {n₁ n₂ n₃} (v₁ : tuple A n₁) (v₂ : tuple A n₂) (v₃ : tuple A n₃) : (v₁ ++ v₂) ++ v₃ == v₁ ++ (v₂ ++ v₃) :=
heq_of_eq_rec_left !add.assoc (append.assoc v₁ v₂ v₃)
/- reverse -/
definition reverse {n : nat} : tuple A n → tuple A n
| (tag l h) := tag (list.reverse l) (by rewrite [length_reverse, h])
theorem reverse_reverse {n : nat} (v : tuple A n) : reverse (reverse v) = v :=
induction_on v (λ l n h, begin unfold reverse, congruence, apply list.reverse_reverse end)
theorem tuple0_eq_nil : ∀ (v : tuple A 0), v = nil
| (tag [] h) := rfl
| (tag (a::l) h) := by contradiction
/- mem -/
definition mem {n : nat} (a : A) (v : tuple A n) : Prop :=
a ∈ elt_of v
notation e ∈ s := mem e s
notation e ∉ s := ¬ e ∈ s
theorem not_mem_nil (a : A) : a ∉ nil :=
list.not_mem_nil a
attribute [simp]
theorem mem_cons {n : nat} (a : A) (v : tuple A n) : a ∈ a :: v :=
induction_on v (λ l n h, !list.mem_cons)
theorem mem_cons_of_mem {n : nat} (y : A) {x : A} {v : tuple A n} : x ∈ v → x ∈ y :: v :=
induction_on v (λ l n h₁ h₂, list.mem_cons_of_mem y h₂)
theorem eq_or_mem_of_mem_cons {n : nat} {x y : A} {v : tuple A n} : x ∈ y::v → x = y ∨ x ∈ v :=
induction_on v (λ l n h₁ h₂, eq_or_mem_of_mem_cons h₂)
theorem mem_singleton {n : nat} {x a : A} : x ∈ (a::nil : tuple A 1) → x = a :=
assume h, list.mem_singleton h
/- map -/
definition map {n : nat} (f : A → B) : tuple A n → tuple B n
| (tag l h) := tag (list.map f l) (by clear map; substvars; rewrite length_map)
theorem map_nil (f : A → B) : map f nil = nil :=
rfl
theorem map_cons {n : nat} (f : A → B) (a : A) (v : tuple A n) : map f (a::v) = f a :: map f v :=
by induction v; reflexivity
theorem map_tag {n : nat} (f : A → B) (l : list A) (h : length l = n)
: map f (tag l h) = tag (list.map f l) (by substvars; rewrite length_map) :=
by reflexivity
theorem map_map {n : nat} (g : B → C) (f : A → B) (v : tuple A n) : map g (map f v) = map (g ∘ f) v :=
begin cases v, rewrite *map_tag, apply subtype.eq, apply list.map_map end
theorem map_id {n : nat} (v : tuple A n) : map id v = v :=
begin induction v, unfold map, congruence, apply list.map_id end
theorem mem_map {n : nat} {a : A} {v : tuple A n} (f : A → B) : a ∈ v → f a ∈ map f v :=
begin induction v, unfold map, apply list.mem_map end
theorem exists_of_mem_map {n : nat} {f : A → B} {b : B} {v : tuple A n} : b ∈ map f v → ∃a, a ∈ v ∧ f a = b :=
begin induction v, unfold map, apply list.exists_of_mem_map end
theorem eq_of_map_const {n : nat} {b₁ b₂ : B} {v : tuple A n} : b₁ ∈ map (const A b₂) v → b₁ = b₂ :=
begin induction v, unfold map, apply list.eq_of_map_const end
/- product -/
definition product {n m : nat} : tuple A n → tuple B m → tuple (A × B) (n * m)
| (tag l₁ h₁) (tag l₂ h₂) := tag (list.product l₁ l₂) (by rewrite [length_product, h₁, h₂])
theorem nil_product {m : nat} (v : tuple B m) : !zero_mul ▹ product (@nil A) v = nil :=
begin induction v, unfold [nil, product], rewrite push_eq_rec end
theorem nil_product_heq {m : nat} (v : tuple B m) : product (@nil A) v == (@nil (A × B)) :=
heq_of_eq_rec_left _ (nil_product v)
theorem product_nil {n : nat} (v : tuple A n) : product v (@nil B) = nil :=
begin induction v, unfold [nil, product], congruence, apply list.product_nil end
theorem mem_product {n m : nat} {a : A} {b : B} {v₁ : tuple A n} {v₂ : tuple B m} : a ∈ v₁ → b ∈ v₂ → (a, b) ∈ product v₁ v₂ :=
begin cases v₁, cases v₂, unfold product, apply list.mem_product end
theorem mem_of_mem_product_left {n m : nat} {a : A} {b : B} {v₁ : tuple A n} {v₂ : tuple B m} : (a, b) ∈ product v₁ v₂ → a ∈ v₁ :=
begin cases v₁, cases v₂, unfold product, apply list.mem_of_mem_product_left end
theorem mem_of_mem_product_right {n m : nat} {a : A} {b : B} {v₁ : tuple A n} {v₂ : tuple B m} : (a, b) ∈ product v₁ v₂ → b ∈ v₂ :=
begin cases v₁, cases v₂, unfold product, apply list.mem_of_mem_product_right end
/- ith -/
open fin
definition ith {n : nat} : tuple A n → fin n → A
| (tag l h₁) (mk i h₂) := list.ith l i (by rewrite h₁; exact h₂)
lemma ith_zero {n : nat} (a : A) (v : tuple A n) (h : 0 < succ n) : ith (a::v) (mk 0 h) = a :=
by induction v; reflexivity
lemma ith_fin_zero {n : nat} (a : A) (v : tuple A n) : ith (a::v) (fin.zero n) = a :=
by unfold fin.zero; apply ith_zero
lemma ith_succ {n : nat} (a : A) (v : tuple A n) (i : nat) (h : succ i < succ n)
: ith (a::v) (mk (succ i) h) = ith v (mk_pred i h) :=
by induction v; reflexivity
lemma ith_fin_succ {n : nat} (a : A) (v : tuple A n) (i : fin n)
: ith (a::v) (succ i) = ith v i :=
begin cases i, unfold fin.succ, rewrite ith_succ end
lemma ith_zero_eq_head {n : nat} (v : tuple A (nat.succ n)) : ith v (fin.zero n) = head v :=
by rewrite [-eta v, ith_fin_zero, head_cons]
lemma ith_succ_eq_ith_tail {n : nat} (v : tuple A (nat.succ n)) (i : fin n) : ith v (succ i) = ith (tail v) i :=
by rewrite [-eta v, ith_fin_succ, tail_cons]
protected lemma ext {n : nat} (v₁ v₂ : tuple A n) (h : ∀ i : fin n, ith v₁ i = ith v₂ i) : v₁ = v₂ :=
begin
induction n with n ih,
rewrite [tuple0_eq_nil v₁, tuple0_eq_nil v₂],
rewrite [-eta v₁, -eta v₂], congruence,
show head v₁ = head v₂, by rewrite [-ith_zero_eq_head, -ith_zero_eq_head]; apply h,
have ∀ i : fin n, ith (tail v₁) i = ith (tail v₂) i, from
take i, by rewrite [-ith_succ_eq_ith_tail, -ith_succ_eq_ith_tail]; apply h,
show tail v₁ = tail v₂, from ih _ _ this
end
/- tabulate -/
definition tabulate : Π {n : nat}, (fin n → A) → tuple A n
| 0 f := nil
| (n+1) f := f (fin.zero n) :: tabulate (λ i : fin n, f (succ i))
theorem ith_tabulate {n : nat} (f : fin n → A) (i : fin n) : ith (tabulate f) i = f i :=
begin
induction n with n ih,
apply elim0 i,
cases i with v hlt, cases v,
{unfold tabulate, rewrite ith_zero},
{unfold tabulate, rewrite [ith_succ, ih]}
end
variable {n : ℕ}
definition replicate : A → tuple A n
| a := tag (list.replicate n a) (length_replicate n a)
definition dropn : Π (i:ℕ), tuple A n → tuple A (n - i)
| i (tag l p) := tag (list.dropn i l) (p ▸ list.length_dropn i l)
definition firstn : Π (i:ℕ) {p:i ≤ n}, tuple A n → tuple A i
| i isLe (tag l p) :=
let q := calc list.length (list.firstn i l)
= min i (list.length l) : list.length_firstn_eq
... = min i n : p
... = i : min_eq_left isLe in
tag (list.firstn i l) q
definition map₂ : (A → B → C) → tuple A n → tuple B n → tuple C n
| f (tag x px) (tag y py) :=
let z : list C := list.map₂ f x y in
let p : list.length z = n := calc
list.length z = min (list.length x) (list.length y) : list.length_map₂
... = min n n : by rewrite [px, py]
... = n : min_self in
tag z p
section accum
open prod
variable {S : Type}
definition mapAccumR
: (A → S → S × B) → tuple A n → S → S × tuple B n
| f (tag x px) c :=
let z := list.mapAccumR f x c in
let p := calc
list.length (pr₂ (list.mapAccumR f x c))
= length x : length_mapAccumR
... = n : px in
(pr₁ z, tag (pr₂ z) p)
definition mapAccumR₂
: (A → B → S → S × C) → tuple A n → tuple B n → S → S × tuple C n
| f (tag x px) (tag y py) c :=
let z := list.mapAccumR₂ f x y c in
let p := calc
list.length (pr₂ (list.mapAccumR₂ f x y c))
= min (length x) (length y) : length_mapAccumR₂
... = n : by rewrite [ px, py, min_self ] in
(pr₁ z, tag (pr₂ z) p)
end accum
end tuple
|
0dc4991321b53efacddb7c025b2ec6bb8619e30c
|
531456391187e1b7678c24ddaf3d6470b4dba971
|
/library/init/meta/tactic.lean
|
b6b7e67b18b7d842db415c7c76e6b99adacc21f0
|
[
"Apache-2.0"
] |
permissive
|
tigerneil/lean
|
7e4834cb9b03027c0e3ba42efd8c1a4f52389c9c
|
8f31cff99bca2c5dd7fcd425de1ff1cb8e4e150a
|
refs/heads/master
| 1,606,890,672,381
| 1,499,580,201,000
| 1,499,580,305,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 46,321
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.function init.data.option.basic init.util
import init.category.combinators init.category.monad init.category.alternative init.category.monad_fail
import init.data.nat.div init.meta.exceptional init.meta.format init.meta.environment
import init.meta.pexpr init.data.repr init.data.string.basic init.meta.interaction_monad
meta constant tactic_state : Type
universes u v
namespace tactic_state
/-- Create a tactic state with an empty local context and a dummy goal. -/
meta constant mk_empty : environment → options → tactic_state
meta constant env : tactic_state → environment
/-- Format the given tactic state. If `target_lhs_only` is true and the target
is of the form `lhs ~ rhs`, where `~` is a simplification relation,
then only the `lhs` is displayed.
Remark: the parameter `target_lhs_only` is a temporary hack used to implement
the `conv` monad. It will be removed in the future. -/
meta constant to_format (s : tactic_state) (target_lhs_only : bool := ff) : format
/-- Format expression with respect to the main goal in the tactic state.
If the tactic state does not contain any goals, then format expression
using an empty local context. -/
meta constant format_expr : tactic_state → expr → format
meta constant get_options : tactic_state → options
meta constant set_options : tactic_state → options → tactic_state
end tactic_state
meta instance : has_to_format tactic_state :=
⟨tactic_state.to_format⟩
meta instance : has_to_string tactic_state :=
⟨λ s, (to_fmt s).to_string s.get_options⟩
@[reducible] meta def tactic := interaction_monad tactic_state
@[reducible] meta def tactic_result := interaction_monad.result tactic_state
namespace tactic
export interaction_monad (hiding failed fail)
meta def failed {α : Type} : tactic α := interaction_monad.failed
meta def fail {α : Type u} {β : Type v} [has_to_format β] (msg : β) : tactic α :=
interaction_monad.fail msg
end tactic
namespace tactic_result
export interaction_monad.result
end tactic_result
open tactic
open tactic_result
infixl ` >>=[tactic] `:2 := interaction_monad_bind
infixl ` >>[tactic] `:2 := interaction_monad_seq
meta instance : alternative tactic :=
{ interaction_monad.monad with
failure := @interaction_monad.failed _,
orelse := @interaction_monad_orelse _ }
meta def {u₁ u₂} tactic.up {α : Type u₂} (t : tactic α) : tactic (ulift.{u₁} α) :=
λ s, match t s with
| success a s' := success (ulift.up a) s'
| exception t ref s := exception t ref s
end
meta def {u₁ u₂} tactic.down {α : Type u₂} (t : tactic (ulift.{u₁} α)) : tactic α :=
λ s, match t s with
| success (ulift.up a) s' := success a s'
| exception t ref s := exception t ref s
end
namespace tactic
variables {α : Type u}
meta def try_core (t : tactic α) : tactic (option α) :=
λ s, result.cases_on (t s)
(λ a, success (some a))
(λ e ref s', success none s)
meta def skip : tactic unit :=
success ()
meta def try (t : tactic α) : tactic unit :=
try_core t >>[tactic] skip
meta def fail_if_success {α : Type u} (t : tactic α) : tactic unit :=
λ s, result.cases_on (t s)
(λ a s, mk_exception "fail_if_success combinator failed, given tactic succeeded" none s)
(λ e ref s', success () s)
meta def success_if_fail {α : Type u} (t : tactic α) : tactic unit :=
λ s, match t s with
| (interaction_monad.result.exception _ _ s') := success () s
| (interaction_monad.result.success a s) :=
mk_exception "success_if_fail combinator failed, given tactic succeeded" none s
end
open nat
/-- (repeat_at_most n t): repeat the given tactic at most n times or until t fails -/
meta def repeat_at_most : nat → tactic unit → tactic unit
| 0 t := skip
| (succ n) t := (do t, repeat_at_most n t) <|> skip
/-- (repeat_exactly n t) : execute t n times -/
meta def repeat_exactly : nat → tactic unit → tactic unit
| 0 t := skip
| (succ n) t := do t, repeat_exactly n t
meta def repeat : tactic unit → tactic unit :=
repeat_at_most 100000
meta def returnopt (e : option α) : tactic α :=
λ s, match e with
| (some a) := success a s
| none := mk_exception "failed" none s
end
meta instance opt_to_tac : has_coe (option α) (tactic α) :=
⟨returnopt⟩
/-- Decorate t's exceptions with msg -/
meta def decorate_ex (msg : format) (t : tactic α) : tactic α :=
λ s, result.cases_on (t s)
success
(λ opt_thunk,
match opt_thunk with
| some e := exception (some (λ u, msg ++ format.nest 2 (format.line ++ e u)))
| none := exception none
end)
@[inline] meta def write (s' : tactic_state) : tactic unit :=
λ s, success () s'
@[inline] meta def read : tactic tactic_state :=
λ s, success s s
meta def get_options : tactic options :=
do s ← read, return s.get_options
meta def set_options (o : options) : tactic unit :=
do s ← read, write (s.set_options o)
meta def save_options {α : Type} (t : tactic α) : tactic α :=
do o ← get_options,
a ← t,
set_options o,
return a
meta def returnex {α : Type} (e : exceptional α) : tactic α :=
λ s, match e with
| exceptional.success a := success a s
| exceptional.exception ._ f :=
match get_options s with
| success opt _ := exception (some (λ u, f opt)) none s
| exception _ _ _ := exception (some (λ u, f options.mk)) none s
end
end
meta instance ex_to_tac {α : Type} : has_coe (exceptional α) (tactic α) :=
⟨returnex⟩
end tactic
meta def tactic_format_expr (e : expr) : tactic format :=
do s ← tactic.read, return (tactic_state.format_expr s e)
meta class has_to_tactic_format (α : Type u) :=
(to_tactic_format : α → tactic format)
meta instance : has_to_tactic_format expr :=
⟨tactic_format_expr⟩
meta def tactic.pp {α : Type u} [has_to_tactic_format α] : α → tactic format :=
has_to_tactic_format.to_tactic_format
open tactic format
meta instance {α : Type u} [has_to_tactic_format α] : has_to_tactic_format (list α) :=
⟨has_map.map to_fmt ∘ monad.mapm pp⟩
meta instance (α : Type u) (β : Type v) [has_to_tactic_format α] [has_to_tactic_format β] :
has_to_tactic_format (α × β) :=
⟨λ ⟨a, b⟩, to_fmt <$> (prod.mk <$> pp a <*> pp b)⟩
meta def option_to_tactic_format {α : Type u} [has_to_tactic_format α] : option α → tactic format
| (some a) := do fa ← pp a, return (to_fmt "(some " ++ fa ++ ")")
| none := return "none"
meta instance {α : Type u} [has_to_tactic_format α] : has_to_tactic_format (option α) :=
⟨option_to_tactic_format⟩
meta instance {α} (a : α) : has_to_tactic_format (reflected a) :=
⟨λ h, pp h.to_expr⟩
@[priority 10] meta instance has_to_format_to_has_to_tactic_format (α : Type) [has_to_format α] : has_to_tactic_format α :=
⟨(λ x, return x) ∘ to_fmt⟩
namespace tactic
open tactic_state
meta def get_env : tactic environment :=
do s ← read,
return $ env s
meta def get_decl (n : name) : tactic declaration :=
do s ← read,
(env s).get n
meta def trace {α : Type u} [has_to_tactic_format α] (a : α) : tactic unit :=
do fmt ← pp a,
return $ _root_.trace_fmt fmt (λ u, ())
meta def trace_call_stack : tactic unit :=
assume state, _root_.trace_call_stack (success () state)
meta def timetac {α : Type u} (desc : string) (t : thunk (tactic α)) : tactic α :=
λ s, timeit desc (t () s)
meta def trace_state : tactic unit :=
do s ← read,
trace $ to_fmt s
inductive transparency
| all | semireducible | instances | reducible | none
export transparency (reducible semireducible)
/-- (eval_expr α e) evaluates 'e' IF 'e' has type 'α'. -/
meta constant eval_expr (α : Type u) [reflected α] : expr → tactic α
/-- Return the partial term/proof constructed so far. Note that the resultant expression
may contain variables that are not declarate in the current main goal. -/
meta constant result : tactic expr
/-- Display the partial term/proof constructed so far. This tactic is *not* equivalent to
`do { r ← result, s ← read, return (format_expr s r) }` because this one will format the result with respect
to the current goal, and trace_result will do it with respect to the initial goal. -/
meta constant format_result : tactic format
/-- Return target type of the main goal. Fail if tactic_state does not have any goal left. -/
meta constant target : tactic expr
meta constant intro_core : name → tactic expr
meta constant intron : nat → tactic unit
/-- Clear the given local constant. The tactic fails if the given expression is not a local constant. -/
meta constant clear : expr → tactic unit
meta constant revert_lst : list expr → tactic nat
/-- Return `e` in weak head normal form with respect to the given transparency setting. -/
meta constant whnf (e : expr) (md := semireducible) : tactic expr
/-- (head) eta expand the given expression -/
meta constant head_eta_expand : expr → tactic expr
/-- (head) beta reduction -/
meta constant head_beta : expr → tactic expr
/-- (head) zeta reduction -/
meta constant head_zeta : expr → tactic expr
/-- zeta reduction -/
meta constant zeta : expr → tactic expr
/-- (head) eta reduction -/
meta constant head_eta : expr → tactic expr
/-- Succeeds if `t` and `s` can be unified using the given transparency setting. -/
meta constant unify (t s : expr) (md := semireducible) : tactic unit
/-- Similar to `unify`, but it treats metavariables as constants. -/
meta constant is_def_eq (t s : expr) (md := semireducible) : tactic unit
/-- Infer the type of the given expression.
Remark: transparency does not affect type inference -/
meta constant infer_type : expr → tactic expr
meta constant get_local : name → tactic expr
/-- Resolve a name using the current local context, environment, aliases, etc. -/
meta constant resolve_name : name → tactic pexpr
/-- Return the hypothesis in the main goal. Fail if tactic_state does not have any goal left. -/
meta constant local_context : tactic (list expr)
meta constant get_unused_name (n : name) (i : option nat := none) : tactic name
/-- Helper tactic for creating simple applications where some arguments are inferred using
type inference.
Example, given
```
rel.{l_1 l_2} : Pi (α : Type.{l_1}) (β : α -> Type.{l_2}), (Pi x : α, β x) -> (Pi x : α, β x) -> , Prop
nat : Type
real : Type
vec.{l} : Pi (α : Type l) (n : nat), Type.{l1}
f g : Pi (n : nat), vec real n
```
then
```
mk_app_core semireducible "rel" [f, g]
```
returns the application
```
rel.{1 2} nat (fun n : nat, vec real n) f g
```
The unification constraints due to type inference are solved using the transparency `md`.
-/
meta constant mk_app (fn : name) (args : list expr) (md := semireducible) : tactic expr
/-- Similar to `mk_app`, but allows to specify which arguments are explicit/implicit.
Example, given `(a b : nat)` then
```
mk_mapp "ite" [some (a > b), none, none, some a, some b]
```
returns the application
```
@ite.{1} (a > b) (nat.decidable_gt a b) nat a b
```
-/
meta constant mk_mapp (fn : name) (args : list (option expr)) (md := semireducible) : tactic expr
/-- (mk_congr_arg h₁ h₂) is a more efficient version of (mk_app `congr_arg [h₁, h₂]) -/
meta constant mk_congr_arg : expr → expr → tactic expr
/-- (mk_congr_fun h₁ h₂) is a more efficient version of (mk_app `congr_fun [h₁, h₂]) -/
meta constant mk_congr_fun : expr → expr → tactic expr
/-- (mk_congr h₁ h₂) is a more efficient version of (mk_app `congr [h₁, h₂]) -/
meta constant mk_congr : expr → expr → tactic expr
/-- (mk_eq_refl h) is a more efficient version of (mk_app `eq.refl [h]) -/
meta constant mk_eq_refl : expr → tactic expr
/-- (mk_eq_symm h) is a more efficient version of (mk_app `eq.symm [h]) -/
meta constant mk_eq_symm : expr → tactic expr
/-- (mk_eq_trans h₁ h₂) is a more efficient version of (mk_app `eq.trans [h₁, h₂]) -/
meta constant mk_eq_trans : expr → expr → tactic expr
/-- (mk_eq_mp h₁ h₂) is a more efficient version of (mk_app `eq.mp [h₁, h₂]) -/
meta constant mk_eq_mp : expr → expr → tactic expr
/-- (mk_eq_mpr h₁ h₂) is a more efficient version of (mk_app `eq.mpr [h₁, h₂]) -/
meta constant mk_eq_mpr : expr → expr → tactic expr
/- Given a local constant t, if t has type (lhs = rhs) apply substitution.
Otherwise, try to find a local constant that has type of the form (t = t') or (t' = t).
The tactic fails if the given expression is not a local constant. -/
meta constant subst : expr → tactic unit
/-- Close the current goal using `e`. Fail is the type of `e` is not definitionally equal to
the target type. -/
meta constant exact (e : expr) (md := semireducible) : tactic unit
/-- Elaborate the given quoted expression with respect to the current main goal.
If `allow_mvars` is tt, then metavariables are tolerated and become new goals if `subgoals` is tt. -/
meta constant to_expr (q : pexpr) (allow_mvars := tt) (subgoals := tt) : tactic expr
/-- Return true if the given expression is a type class. -/
meta constant is_class : expr → tactic bool
/-- Try to create an instance of the given type class. -/
meta constant mk_instance : expr → tactic expr
/-- Change the target of the main goal.
The input expression must be definitionally equal to the current target.
If `check` is `ff`, then the tactic does not check whether `e`
is definitionally equal to the current target. If it is not,
then the error will only be detected by the kernel type checker. -/
meta constant change (e : expr) (check : bool := tt): tactic unit
/-- `assert_core H T`, adds a new goal for T, and change target to `T -> target`. -/
meta constant assert_core : name → expr → tactic unit
/-- `assertv_core H T P`, change target to (T -> target) if P has type T. -/
meta constant assertv_core : name → expr → expr → tactic unit
/-- `define_core H T`, adds a new goal for T, and change target to `let H : T := ?M in target` in the current goal. -/
meta constant define_core : name → expr → tactic unit
/-- `definev_core H T P`, change target to `let H : T := P in target` if P has type T. -/
meta constant definev_core : name → expr → expr → tactic unit
/-- rotate goals to the left -/
meta constant rotate_left : nat → tactic unit
meta constant get_goals : tactic (list expr)
meta constant set_goals : list expr → tactic unit
inductive new_goals
| non_dep_first | non_dep_only | all
/-- Configuration options for the `apply` tactic. -/
structure apply_cfg :=
(md := semireducible)
(approx := tt)
(new_goals := new_goals.non_dep_first)
(instances := tt)
(auto_param := tt)
(opt_param := tt)
/-- Apply the expression `e` to the main goal,
the unification is performed using the transparency mode in `cfg`.
If `cfg.approx` is `tt`, then fallback to first-order unification, and approximate context during unification.
`cfg.new_goals` specifies which unassigned metavariables become new goals, and their order.
If `cfg.instances` is `tt`, then use type class resolution to instantiate unassigned meta-variables.
The fields `cfg.auto_param` and `cfg.opt_param` are ignored by this tactic (See `tactic.apply`).
It returns a list of all introduced meta variables, even the assigned ones. -/
meta constant apply_core (e : expr) (cfg : apply_cfg := {}) : tactic (list expr)
/- Create a fresh meta universe variable. -/
meta constant mk_meta_univ : tactic level
/- Create a fresh meta-variable with the given type.
The scope of the new meta-variable is the local context of the main goal. -/
meta constant mk_meta_var : expr → tactic expr
/-- Return the value assigned to the given universe meta-variable.
Fail if argument is not an universe meta-variable or if it is not assigned. -/
meta constant get_univ_assignment : level → tactic level
/-- Return the value assigned to the given meta-variable.
Fail if argument is not a meta-variable or if it is not assigned. -/
meta constant get_assignment : expr → tactic expr
meta constant mk_fresh_name : tactic name
/-- Return a hash code for expr that ignores inst_implicit arguments,
and proofs. -/
meta constant abstract_hash : expr → tactic nat
/-- Return the "weight" of the given expr while ignoring inst_implicit arguments,
and proofs. -/
meta constant abstract_weight : expr → tactic nat
meta constant abstract_eq : expr → expr → tactic bool
/-- Induction on `h` using recursor `rec`, names for the new hypotheses
are retrieved from `ns`. If `ns` does not have sufficient names, then use the internal binder names
in the recursor.
It returns for each new goal a list of new hypotheses and a list of substitutions for hypotheses
depending on `h`. The substitutions map internal names to their replacement terms. If the
replacement is again a hypothesis the user name stays the same. The internal names are only valid
in the original goal, not in the type context of the new goal.
If `rec` is none, then the type of `h` is inferred, if it is of the form `C ...`, tactic uses `C.rec` -/
meta constant induction (h : expr) (ns : list name := []) (rec : option name := none) (md := semireducible) : tactic (list (list expr × list (name × expr)))
/-- Apply `cases_on` recursor, names for the new hypotheses are retrieved from `ns`.
`h` must be a local constant. It returns for each new goal the name of the constructor, a list of new hypotheses, and a list of
substitutions for hypotheses depending on `h`. The number of new goals may be smaller than the
number of constructors. Some goals may be discarded when the indices to not match.
See `induction` for information on the list of substitutions.
The `cases` tactic is implemented using this one, and it relaxes the restriction of `h`. -/
meta constant cases_core (h : expr) (ns : list name := []) (md := semireducible) : tactic (list (name × list expr × list (name × expr)))
/-- Similar to cases tactic, but does not revert/intro/clear hypotheses. -/
meta constant destruct (e : expr) (md := semireducible) : tactic unit
/-- Generalizes the target with respect to `e`. -/
meta constant generalize (e : expr) (n : name := `_x) (md := semireducible) : tactic unit
/-- instantiate assigned metavariables in the given expression -/
meta constant instantiate_mvars : expr → tactic expr
/-- Add the given declaration to the environment -/
meta constant add_decl : declaration → tactic unit
/-- Changes the environment to the `new_env`. `new_env` needs to be a descendant from the current environment. -/
meta constant set_env : environment → tactic unit
/-- (doc_string env d k) return the doc string for d (if available) -/
meta constant doc_string : name → tactic string
meta constant add_doc_string : name → string → tactic unit
/--
Create an auxiliary definition with name `c` where `type` and `value` may contain local constants and
meta-variables. This function collects all dependencies (universe parameters, universe metavariables,
local constants (aka hypotheses) and metavariables).
It updates the environment in the tactic_state, and returns an expression of the form
(c.{l_1 ... l_n} a_1 ... a_m)
where l_i's and a_j's are the collected dependencies.
-/
meta constant add_aux_decl (c : name) (type : expr) (val : expr) (is_lemma : bool) : tactic expr
meta constant module_doc_strings : tactic (list (option name × string))
/-- Set attribute `attr_name` for constant `c_name` with the given priority.
If the priority is none, then use default -/
meta constant set_basic_attribute (attr_name : name) (c_name : name) (persistent := ff) (prio : option nat := none) : tactic unit
/-- `unset_attribute attr_name c_name` -/
meta constant unset_attribute : name → name → tactic unit
/-- `has_attribute attr_name c_name` succeeds if the declaration `decl_name`
has the attribute `attr_name`. The result is the priority. -/
meta constant has_attribute : name → name → tactic nat
/-- `copy_attribute attr_name c_name d_name` copy attribute `attr_name` from
`src` to `tgt` if it is defined for `src` -/
meta def copy_attribute (attr_name : name) (src : name) (p : bool) (tgt : name) : tactic unit :=
try $ do
prio ← has_attribute attr_name src,
set_basic_attribute attr_name tgt p (some prio)
/-- Name of the declaration currently being elaborated. -/
meta constant decl_name : tactic name
/-- `save_type_info e ref` save (typeof e) at position associated with ref -/
meta constant save_type_info {elab : bool} : expr → expr elab → tactic unit
meta constant save_info_thunk : pos → (unit → format) → tactic unit
/-- Return list of currently open namespaces -/
meta constant open_namespaces : tactic (list name)
/-- Return tt iff `t` "occurs" in `e`. The occurrence checking is performed using
keyed matching with the given transparency setting.
We say `t` occurs in `e` by keyed matching iff there is a subterm `s`
s.t. `t` and `s` have the same head, and `is_def_eq t s md`
The main idea is to minimize the number of `is_def_eq` checks
performed. -/
meta constant kdepends_on (e t : expr) (md := reducible) : tactic bool
/-- Blocks the execution of the current thread for at least `msecs` milliseconds.
This tactic is used mainly for debugging purposes. -/
meta constant sleep (msecs : nat) : tactic unit
/-- Type check `e` with respect to the current goal.
Fails if `e` is not type correct. -/
meta constant type_check (e : expr) (md := semireducible) : tactic unit
open list nat
meta def induction' (h : expr) (ns : list name := []) (rec : option name := none) (md := semireducible) : tactic unit :=
induction h ns rec md >> return ()
/-- Remark: set_goals will erase any solved goal -/
meta def cleanup : tactic unit :=
get_goals >>= set_goals
/-- Auxiliary definition used to implement begin ... end blocks -/
meta def step {α : Type u} (t : tactic α) : tactic unit :=
t >>[tactic] cleanup
meta def istep {α : Type u} (line0 col0 : ℕ) (line col : ℕ) (t : tactic α) : tactic unit :=
λ s, (@scope_trace _ line col (λ _, step t s)).clamp_pos line0 line col
meta def is_prop (e : expr) : tactic bool :=
do t ← infer_type e,
return (t = `(Prop))
/-- Return true iff n is the name of declaration that is a proposition. -/
meta def is_prop_decl (n : name) : tactic bool :=
do env ← get_env,
d ← env.get n,
t ← return $ d.type,
is_prop t
meta def is_proof (e : expr) : tactic bool :=
infer_type e >>= is_prop
meta def whnf_no_delta (e : expr) : tactic expr :=
whnf e transparency.none
meta def whnf_target : tactic unit :=
target >>= whnf >>= change
meta def unsafe_change (e : expr) : tactic unit :=
change e ff
meta def intro (n : name) : tactic expr :=
do t ← target,
if expr.is_pi t ∨ expr.is_let t then intro_core n
else whnf_target >> intro_core n
meta def intro1 : tactic expr :=
intro `_
meta def intros : tactic (list expr) :=
do t ← target,
match t with
| expr.pi _ _ _ _ := do H ← intro1, Hs ← intros, return (H :: Hs)
| expr.elet _ _ _ _ := do H ← intro1, Hs ← intros, return (H :: Hs)
| _ := return []
end
meta def intro_lst : list name → tactic (list expr)
| [] := return []
| (n::ns) := do H ← intro n, Hs ← intro_lst ns, return (H :: Hs)
/-- Introduces new hypotheses with forward dependencies -/
meta def intros_dep : tactic (list expr) :=
do t ← target,
let proc (b : expr) :=
if b.has_var_idx 0 then
do h ← intro1, hs ← intros_dep, return (h::hs)
else
-- body doesn't depend on new hypothesis
return [],
match t with
| expr.pi _ _ _ b := proc b
| expr.elet _ _ _ b := proc b
| _ := return []
end
meta def introv : list name → tactic (list expr)
| [] := intros_dep
| (n::ns) := do hs ← intros_dep, h ← intro n, hs' ← introv ns, return (hs ++ h :: hs')
/-- Returns n fully qualified if it refers to a constant, or else fails. -/
meta def resolve_constant (n : name) : tactic name :=
do (expr.const n _) ← resolve_name n,
pure n
meta def to_expr_strict (q : pexpr) : tactic expr :=
to_expr q
meta def revert (l : expr) : tactic nat :=
revert_lst [l]
meta def clear_lst : list name → tactic unit
| [] := skip
| (n::ns) := do H ← get_local n, clear H, clear_lst ns
meta def match_not (e : expr) : tactic expr :=
match (expr.is_not e) with
| (some a) := return a
| none := fail "expression is not a negation"
end
meta def match_and (e : expr) : tactic (expr × expr) :=
match (expr.is_and e) with
| (some (α, β)) := return (α, β)
| none := fail "expression is not a conjunction"
end
meta def match_or (e : expr) : tactic (expr × expr) :=
match (expr.is_or e) with
| (some (α, β)) := return (α, β)
| none := fail "expression is not a disjunction"
end
meta def match_iff (e : expr) : tactic (expr × expr) :=
match (expr.is_iff e) with
| (some (lhs, rhs)) := return (lhs, rhs)
| none := fail "expression is not an iff"
end
meta def match_eq (e : expr) : tactic (expr × expr) :=
match (expr.is_eq e) with
| (some (lhs, rhs)) := return (lhs, rhs)
| none := fail "expression is not an equality"
end
meta def match_ne (e : expr) : tactic (expr × expr) :=
match (expr.is_ne e) with
| (some (lhs, rhs)) := return (lhs, rhs)
| none := fail "expression is not a disequality"
end
meta def match_heq (e : expr) : tactic (expr × expr × expr × expr) :=
do match (expr.is_heq e) with
| (some (α, lhs, β, rhs)) := return (α, lhs, β, rhs)
| none := fail "expression is not a heterogeneous equality"
end
meta def match_refl_app (e : expr) : tactic (name × expr × expr) :=
do env ← get_env,
match (environment.is_refl_app env e) with
| (some (R, lhs, rhs)) := return (R, lhs, rhs)
| none := fail "expression is not an application of a reflexive relation"
end
meta def match_app_of (e : expr) (n : name) : tactic (list expr) :=
guard (expr.is_app_of e n) >> return e.get_app_args
meta def get_local_type (n : name) : tactic expr :=
get_local n >>= infer_type
meta def trace_result : tactic unit :=
format_result >>= trace
meta def rexact (e : expr) : tactic unit :=
exact e reducible
/-- `find_same_type t es` tries to find in es an expression with type definitionally equal to t -/
meta def find_same_type : expr → list expr → tactic expr
| e [] := failed
| e (H :: Hs) :=
do t ← infer_type H,
(unify e t >> return H) <|> find_same_type e Hs
meta def find_assumption (e : expr) : tactic expr :=
do ctx ← local_context, find_same_type e ctx
meta def assumption : tactic unit :=
do { ctx ← local_context,
t ← target,
H ← find_same_type t ctx,
exact H }
<|> fail "assumption tactic failed"
meta def save_info (p : pos) : tactic unit :=
do s ← read,
tactic.save_info_thunk p (λ _, tactic_state.to_format s)
notation `‹` p `›` := (by assumption : p)
/-- Swap first two goals, do nothing if tactic state does not have at least two goals. -/
meta def swap : tactic unit :=
do gs ← get_goals,
match gs with
| (g₁ :: g₂ :: rs) := set_goals (g₂ :: g₁ :: rs)
| e := skip
end
/-- `assert h t`, adds a new goal for t, and the hypothesis `h : t` in the current goal. -/
meta def assert (h : name) (t : expr) : tactic expr :=
do assert_core h t, swap, e ← intro h, swap, return e
/-- `assertv h t v`, adds the hypothesis `h : t` in the current goal if v has type t. -/
meta def assertv (h : name) (t : expr) (v : expr) : tactic expr :=
assertv_core h t v >> intro h
/-- `define h t`, adds a new goal for t, and the hypothesis `h : t := ?M` in the current goal. -/
meta def define (h : name) (t : expr) : tactic expr :=
do define_core h t, swap, e ← intro h, swap, return e
/-- `definev h t v`, adds the hypothesis (h : t := v) in the current goal if v has type t. -/
meta def definev (h : name) (t : expr) (v : expr) : tactic expr :=
definev_core h t v >> intro h
/-- Add `h : t := pr` to the current goal -/
meta def pose (h : name) (t : option expr := none) (pr : expr) : tactic expr :=
let dv := λt, definev h t pr in
option.cases_on t (infer_type pr >>= dv) dv
/-- Add `h : t` to the current goal, given a proof `pr : t` -/
meta def note (h : name) (t : option expr := none) (pr : expr) : tactic expr :=
let dv := λt, assertv h t pr in
option.cases_on t (infer_type pr >>= dv) dv
/-- Return the number of goals that need to be solved -/
meta def num_goals : tactic nat :=
do gs ← get_goals,
return (length gs)
/-- We have to provide the instance argument `[has_mod nat]` because
mod for nat was not defined yet -/
meta def rotate_right (n : nat) [has_mod nat] : tactic unit :=
do ng ← num_goals,
if ng = 0 then skip
else rotate_left (ng - n % ng)
meta def rotate : nat → tactic unit :=
rotate_left
/-- `first [t_1, ..., t_n]` applies the first tactic that doesn't fail.
The tactic fails if all t_i's fail. -/
meta def first {α : Type u} : list (tactic α) → tactic α
| [] := fail "first tactic failed, no more alternatives"
| (t::ts) := t <|> first ts
/-- Applies the given tactic to the main goal and fails if it is not solved. -/
meta def solve1 (tac : tactic unit) : tactic unit :=
do gs ← get_goals,
match gs with
| [] := fail "solve1 tactic failed, there isn't any goal left to focus"
| (g::rs) :=
do set_goals [g],
tac,
gs' ← get_goals,
match gs' with
| [] := set_goals rs
| gs := fail "solve1 tactic failed, focused goal has not been solved"
end
end
/-- `solve [t_1, ... t_n]` applies the first tactic that solves the main goal. -/
meta def solve (ts : list (tactic unit)) : tactic unit :=
first $ map solve1 ts
private meta def focus_aux : list (tactic unit) → list expr → list expr → tactic unit
| [] [] rs := set_goals rs
| [] gs rs := fail "focus tactic failed, insufficient number of tactics"
| (t::ts) (g::gs) rs := do
set_goals [g], t, rs' ← get_goals,
focus_aux ts gs (rs ++ rs')
| (t::ts) [] rs := fail "focus tactic failed, insufficient number of goals"
/-- `focus [t_1, ..., t_n]` applies t_i to the i-th goal. Fails if the number of goals is not n. -/
meta def focus (ts : list (tactic unit)) : tactic unit :=
do gs ← get_goals, focus_aux ts gs []
meta def focus1 {α} (tac : tactic α) : tactic α :=
do g::gs ← get_goals,
match gs with
| [] := tac
| _ := do
set_goals [g],
a ← tac,
gs' ← get_goals,
set_goals (gs' ++ gs),
return a
end
private meta def all_goals_core (tac : tactic unit) : list expr → list expr → tactic unit
| [] ac := set_goals ac
| (g :: gs) ac :=
do set_goals [g],
tac,
new_gs ← get_goals,
all_goals_core gs (ac ++ new_gs)
/-- Apply the given tactic to all goals. -/
meta def all_goals (tac : tactic unit) : tactic unit :=
do gs ← get_goals,
all_goals_core tac gs []
private meta def any_goals_core (tac : tactic unit) : list expr → list expr → bool → tactic unit
| [] ac progress := guard progress >> set_goals ac
| (g :: gs) ac progress :=
do set_goals [g],
succeeded ← try_core tac,
new_gs ← get_goals,
any_goals_core gs (ac ++ new_gs) (succeeded.is_some || progress)
/-- Apply the given tactic to any goal where it succeeds. The tactic succeeds only if
tac succeeds for at least one goal. -/
meta def any_goals (tac : tactic unit) : tactic unit :=
do gs ← get_goals,
any_goals_core tac gs [] ff
/-- LCF-style AND_THEN tactic. It applies tac1, and if succeed applies tac2 to each subgoal produced by tac1 -/
meta def seq (tac1 : tactic unit) (tac2 : tactic unit) : tactic unit :=
do g::gs ← get_goals,
set_goals [g],
tac1, all_goals tac2,
gs' ← get_goals,
set_goals (gs' ++ gs)
meta def seq_focus (tac1 : tactic unit) (tacs2 : list (tactic unit)) : tactic unit :=
do g::gs ← get_goals,
set_goals [g],
tac1, focus tacs2,
gs' ← get_goals,
set_goals (gs' ++ gs)
meta instance andthen_seq : has_andthen (tactic unit) (tactic unit) (tactic unit) :=
⟨seq⟩
meta instance andthen_seq_focus : has_andthen (tactic unit) (list (tactic unit)) (tactic unit) :=
⟨seq_focus⟩
meta constant is_trace_enabled_for : name → bool
/-- Execute tac only if option trace.n is set to true. -/
meta def when_tracing (n : name) (tac : tactic unit) : tactic unit :=
when (is_trace_enabled_for n = tt) tac
/-- Fail if there are no remaining goals. -/
meta def fail_if_no_goals : tactic unit :=
do n ← num_goals,
when (n = 0) (fail "tactic failed, there are no goals to be solved")
/-- Fail if there are unsolved goals. -/
meta def done : tactic unit :=
do n ← num_goals,
when (n ≠ 0) (fail "done tactic failed, there are unsolved goals")
meta def apply_opt_param : tactic unit :=
do `(opt_param %%t %%v) ← target,
exact v
meta def apply_auto_param : tactic unit :=
do `(auto_param %%type %%tac_name_expr) ← target,
change type,
tac_name ← eval_expr name tac_name_expr,
tac ← eval_expr (tactic unit) (expr.const tac_name []),
tac
meta def has_opt_auto_param (ms : list expr) : tactic bool :=
ms.mfoldl
(λ r m, do type ← infer_type m,
return $ r || type.is_napp_of `opt_param 2 || type.is_napp_of `auto_param 2)
ff
meta def try_apply_opt_auto_param (cfg : apply_cfg) (ms : list expr) : tactic unit :=
when (cfg.auto_param || cfg.opt_param) $
mwhen (has_opt_auto_param ms) $ do
gs ← get_goals,
ms.mfor' (λ m, set_goals [m] >>
when cfg.opt_param (try apply_opt_param) >>
when cfg.auto_param (try apply_auto_param)),
set_goals gs
meta def apply (e : expr) (cfg : apply_cfg := {}) : tactic unit :=
apply_core e cfg >>= try_apply_opt_auto_param cfg
meta def fapply (e : expr) : tactic unit :=
apply e {new_goals := new_goals.all}
meta def eapply (e : expr) : tactic unit :=
apply e {new_goals := new_goals.non_dep_only}
/-- Try to solve the main goal using type class resolution. -/
meta def apply_instance : tactic unit :=
do tgt ← target >>= instantiate_mvars,
b ← is_class tgt,
if b then mk_instance tgt >>= exact
else fail "apply_instance tactic fail, target is not a type class"
/-- Create a list of universe meta-variables of the given size. -/
meta def mk_num_meta_univs : nat → tactic (list level)
| 0 := return []
| (succ n) := do
l ← mk_meta_univ,
ls ← mk_num_meta_univs n,
return (l::ls)
/-- Return `expr.const c [l_1, ..., l_n]` where l_i's are fresh universe meta-variables. -/
meta def mk_const (c : name) : tactic expr :=
do env ← get_env,
decl ← env.get c,
let num := decl.univ_params.length,
ls ← mk_num_meta_univs num,
return (expr.const c ls)
/-- Apply the constant `c` -/
meta def applyc (c : name) : tactic unit :=
mk_const c >>= apply
meta def eapplyc (c : name) : tactic unit :=
mk_const c >>= eapply
meta def save_const_type_info (n : name) {elab : bool} (ref : expr elab) : tactic unit :=
try (do c ← mk_const n, save_type_info c ref)
/-- Create a fresh universe `?u`, a metavariable `?T : Type.{?u}`,
and return metavariable `?M : ?T`.
This action can be used to create a meta-variable when
we don't know its type at creation time -/
meta def mk_mvar : tactic expr :=
do u ← mk_meta_univ,
t ← mk_meta_var (expr.sort u),
mk_meta_var t
/-- Makes a sorry macro with a meta-variable as its type. -/
meta def mk_sorry : tactic expr := do
u ← mk_meta_univ,
t ← mk_meta_var (expr.sort u),
return $ expr.mk_sorry t
/-- Closes the main goal using sorry. -/
meta def admit : tactic unit :=
target >>= exact ∘ expr.mk_sorry
meta def mk_local' (pp_name : name) (bi : binder_info) (type : expr) : tactic expr := do
uniq_name ← mk_fresh_name,
return $ expr.local_const uniq_name pp_name bi type
meta def mk_local_def (pp_name : name) (type : expr) : tactic expr :=
mk_local' pp_name binder_info.default type
meta def mk_local_pis : expr → tactic (list expr × expr)
| (expr.pi n bi d b) := do
p ← mk_local' n bi d,
(ps, r) ← mk_local_pis (expr.instantiate_var b p),
return ((p :: ps), r)
| e := return ([], e)
private meta def get_pi_arity_aux : expr → tactic nat
| (expr.pi n bi d b) :=
do m ← mk_fresh_name,
let l := expr.local_const m n bi d,
new_b ← whnf (expr.instantiate_var b l),
r ← get_pi_arity_aux new_b,
return (r + 1)
| e := return 0
/-- Compute the arity of the given (Pi-)type -/
meta def get_pi_arity (type : expr) : tactic nat :=
whnf type >>= get_pi_arity_aux
/-- Compute the arity of the given function -/
meta def get_arity (fn : expr) : tactic nat :=
infer_type fn >>= get_pi_arity
meta def triv : tactic unit := mk_const `trivial >>= exact
notation `dec_trivial` := of_as_true (by tactic.triv)
meta def by_contradiction (H : option name := none) : tactic expr :=
do tgt : expr ← target,
(match_not tgt >> return ())
<|>
(mk_mapp `decidable.by_contradiction [some tgt, none] >>= eapply)
<|>
fail "tactic by_contradiction failed, target is not a negation nor a decidable proposition (remark: when 'local attribute classical.prop_decidable [instance]' is used all propositions are decidable)",
match H with
| some n := intro n
| none := intro1
end
private meta def generalizes_aux (md : transparency) : list expr → tactic unit
| [] := skip
| (e::es) := generalize e `x md >> generalizes_aux es
meta def generalizes (es : list expr) (md := semireducible) : tactic unit :=
generalizes_aux md es
private meta def kdependencies_core (e : expr) (md : transparency) : list expr → list expr → tactic (list expr)
| [] r := return r
| (h::hs) r :=
do type ← infer_type h,
d ← kdepends_on type e md,
if d then kdependencies_core hs (h::r)
else kdependencies_core hs r
/-- Return all hypotheses that depends on `e`
The dependency test is performed using `kdepends_on` with the given transparency setting. -/
meta def kdependencies (e : expr) (md := reducible) : tactic (list expr) :=
do ctx ← local_context, kdependencies_core e md ctx []
/-- Revert all hypotheses that depend on `e` -/
meta def revert_kdependencies (e : expr) (md := reducible) : tactic nat :=
kdependencies e md >>= revert_lst
meta def revert_kdeps (e : expr) (md := reducible) :=
revert_kdependencies e md
/-- Similar to `cases_core`, but `e` doesn't need to be a hypothesis.
Remark, it reverts dependencies using `revert_kdeps`.
Two different transparency modes are used `md` and `dmd`.
The mode `md` is used with `cases_core` and `dmd` with `generalize` and `revert_kdeps`. -/
meta def cases (e : expr) (ids : list name := []) (md := semireducible) (dmd := semireducible) : tactic unit :=
if e.is_local_constant then
cases_core e ids md >> return ()
else do
x ← mk_fresh_name,
n ← revert_kdependencies e dmd,
(tactic.generalize e x dmd)
<|>
(do t ← infer_type e,
tactic.assertv x t e,
get_local x >>= tactic.revert,
return ()),
h ← tactic.intro1,
(step (cases_core h ids md); intron n)
meta def refine (e : pexpr) : tactic unit :=
do tgt : expr ← target,
to_expr ``(%%e : %%tgt) tt >>= exact
meta def by_cases (e : expr) (h : name) : tactic unit :=
do dec_e ← (mk_app `decidable [e] <|> fail "by_cases tactic failed, type is not a proposition"),
inst ← (mk_instance dec_e <|> fail "by_cases tactic failed, type of given expression is not decidable"),
em ← mk_app `decidable.em [e, inst],
cases em [h, h]
private meta def get_undeclared_const (env : environment) (base : name) : ℕ → name | i :=
let n := base <.> ("_aux_" ++ repr i) in
if ¬env.contains n then n
else get_undeclared_const (i+1)
meta def new_aux_decl_name : tactic name := do
env ← get_env, n ← decl_name,
return $ get_undeclared_const env n 1
private meta def mk_aux_decl_name : option name → tactic name
| none := new_aux_decl_name
| (some suffix) := do p ← decl_name, return $ p ++ suffix
meta def abstract (tac : tactic unit) (suffix : option name := none) (zeta_reduce := tt) : tactic unit :=
do fail_if_no_goals,
gs ← get_goals,
type ← if zeta_reduce then target >>= zeta else target,
is_lemma ← is_prop type,
m ← mk_meta_var type,
set_goals [m],
tac,
n ← num_goals,
when (n ≠ 0) (fail "abstract tactic failed, there are unsolved goals"),
set_goals gs,
val ← instantiate_mvars m,
val ← if zeta_reduce then zeta val else return val,
c ← mk_aux_decl_name suffix,
e ← add_aux_decl c type val is_lemma,
exact e
/-- `solve_aux type tac` synthesize an element of 'type' using tactic 'tac' -/
meta def solve_aux {α : Type} (type : expr) (tac : tactic α) : tactic (α × expr) :=
do m ← mk_meta_var type,
gs ← get_goals,
set_goals [m],
a ← tac,
set_goals gs,
return (a, m)
/-- Return tt iff 'd' is a declaration in one of the current open namespaces -/
meta def in_open_namespaces (d : name) : tactic bool :=
do ns ← open_namespaces,
env ← get_env,
return $ ns.any (λ n, n.is_prefix_of d) && env.contains d
/-- Execute tac for 'max' "heartbeats". The heartbeat is approx. the maximum number of
memory allocations (in thousands) performed by 'tac'. This is a deterministic way of interrupting
long running tactics. -/
meta def try_for {α} (max : nat) (tac : tactic α) : tactic α :=
λ s,
match _root_.try_for max (tac s) with
| some r := r
| none := mk_exception "try_for tactic failed, timeout" none s
end
meta def updateex_env (f : environment → exceptional environment) : tactic unit :=
do env ← get_env,
env ← returnex $ f env,
set_env env
/- Add a new inductive datatype to the environment
name, universe parameters, number of parameters, type, constructors (name and type), is_meta -/
meta def add_inductive (n : name) (ls : list name) (p : nat) (ty : expr) (is : list (name × expr))
(is_meta : bool := ff) : tactic unit :=
updateex_env $ λe, e.add_inductive n ls p ty is is_meta
meta def add_meta_definition (n : name) (lvls : list name) (type value : expr) : tactic unit :=
add_decl (declaration.defn n lvls type value reducibility_hints.abbrev ff)
meta def rename (curr : name) (new : name) : tactic unit :=
do h ← get_local curr,
n ← revert h,
intro new,
intron (n - 1)
/--
"Replace" hypothesis `h : type` with `h : new_type` where `eq_pr` is a proof
that (type = new_type). The tactic actually creates a new hypothesis
with the same user facing name, and (tries to) clear `h`.
The `clear` step fails if `h` has forward dependencies. In this case, the old `h`
will remain in the local context. The tactic returns the new hypothesis. -/
meta def replace_hyp (h : expr) (new_type : expr) (eq_pr : expr) : tactic expr :=
do h_type ← infer_type h,
new_h ← assert h.local_pp_name new_type,
mk_eq_mp eq_pr h >>= exact,
try $ clear h,
return new_h
end tactic
notation [parsing_only] `command`:max := tactic unit
open tactic
namespace list
meta def for_each {α} : list α → (α → tactic unit) → tactic unit
| [] fn := skip
| (e::es) fn := do fn e, for_each es fn
meta def any_of {α β} : list α → (α → tactic β) → tactic β
| [] fn := failed
| (e::es) fn := do opt_b ← try_core (fn e),
match opt_b with
| some b := return b
| none := any_of es fn
end
end list
/-
Define id_locked using meta-programming because we don't have
syntax for setting reducibility_hints.
See module init.meta.declaration.
Remark: id_locked is used in the builtin implementation of tactic.change
-/
run_cmd do
let l := level.param `l,
let Ty : pexpr := expr.sort l,
type ← to_expr ``(Π (α : %%Ty), α → α),
val ← to_expr ``(λ (α : %%Ty) (a : α), a),
add_decl (declaration.defn `id_locked [`l] type val reducibility_hints.opaque tt)
lemma id_locked_eq {α : Type u} (a : α) : id_locked α a = a :=
rfl
attribute [inline] id_locked
/-
Define id_rhs using meta-programming because we don't have
syntax for setting reducibility_hints.
See module init.meta.declaration.
Remark: id_rhs is used in the equation compiler to address performance
issues when proving equational lemmas.
-/
run_cmd do
let l := level.param `l,
let Ty : pexpr := expr.sort l,
type ← to_expr ``(Π (α : %%Ty), α → α),
val ← to_expr ``(λ (α : %%Ty) (a : α), a),
add_decl (declaration.defn `id_rhs [`l] type val reducibility_hints.abbrev tt)
attribute [reducible, inline] id_rhs
/- Install monad laws tactic and use it to prove some instances. -/
meta def control_laws_tac := whnf_target >> intros >> to_expr ``(rfl) >>= exact
meta def unsafe_monad_from_pure_bind {m : Type u → Type v}
(pure : Π {α : Type u}, α → m α)
(bind : Π {α β : Type u}, m α → (α → m β) → m β) : monad m :=
{pure := @pure, bind := @bind,
id_map := undefined, pure_bind := undefined, bind_assoc := undefined}
meta instance : monad task :=
{map := @task.map, bind := @task.bind, pure := @task.pure,
id_map := undefined, pure_bind := undefined, bind_assoc := undefined,
bind_pure_comp_eq_map := undefined}
namespace tactic
meta def mk_id_locked_proof (prop : expr) (pr : expr) : expr :=
expr.app (expr.app (expr.const ``id_locked [level.zero]) prop) pr
meta def mk_id_locked_eq (lhs : expr) (rhs : expr) (pr : expr) : tactic expr :=
do prop ← mk_app `eq [lhs, rhs],
return $ mk_id_locked_proof prop pr
meta def replace_target (new_target : expr) (pr : expr) : tactic unit :=
do t ← target,
assert `htarget new_target, swap,
ht ← get_local `htarget,
locked_pr ← mk_id_locked_eq t new_target pr,
mk_eq_mpr locked_pr ht >>= exact
end tactic
|
18340fe504bfadc4f511e88defe38d02805ed325
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/tests/lean/protected_test.lean
|
20f8757deac4d78e4b786eab1d3a31765853c6f8
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 250
|
lean
|
namespace nat
check induction_on -- ERROR
check rec_on -- ERROR
check nat.induction_on
check le.rec_on -- OK
check nat.le.rec_on
namespace le
check rec_on -- ERROR
check le.rec_on
end le
end nat
|
60d89f79d710addeae0254ea52233da3b9cb3335
|
ee8cdbabf07f77e7be63a449b8483ce308d37218
|
/lean/src/test/mathd-algebra-24.lean
|
6af150deee8b23719d0dabfaaf179f9846e49930
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
zeta1999/miniF2F
|
6d66c75d1c18152e224d07d5eed57624f731d4b7
|
c1ba9629559c5273c92ec226894baa0c1ce27861
|
refs/heads/main
| 1,681,897,460,642
| 1,620,646,361,000
| 1,620,646,361,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 239
|
lean
|
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng
-/
import data.real.basic
example (x : ℝ) (h₀ : x / 50 = 40) : x = 2000 :=
begin
sorry
end
|
480ed842bbed673cb1fbc3442f8e4fce2501c2d6
|
bb31430994044506fa42fd667e2d556327e18dfe
|
/src/algebra/star/prod.lean
|
3ced92aefa10a6d16564e1c00167ab00e59efe3a
|
[
"Apache-2.0"
] |
permissive
|
sgouezel/mathlib
|
0cb4e5335a2ba189fa7af96d83a377f83270e503
|
00638177efd1b2534fc5269363ebf42a7871df9a
|
refs/heads/master
| 1,674,527,483,042
| 1,673,665,568,000
| 1,673,665,568,000
| 119,598,202
| 0
| 0
| null | 1,517,348,647,000
| 1,517,348,646,000
| null |
UTF-8
|
Lean
| false
| false
| 1,985
|
lean
|
/-
Copyright (c) 2022 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import algebra.star.basic
import algebra.ring.prod
import algebra.module.prod
/-!
# `star` on product types
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We put a `has_star` structure on product types that operates elementwise.
-/
universes u v w
variables {R : Type u} {S : Type v}
namespace prod
instance [has_star R] [has_star S] : has_star (R × S) :=
{ star := λ x, (star x.1, star x.2) }
@[simp] lemma fst_star [has_star R] [has_star S] (x : R × S) : (star x).1 = star x.1 := rfl
@[simp] lemma snd_star [has_star R] [has_star S] (x : R × S) : (star x).2 = star x.2 := rfl
lemma star_def [has_star R] [has_star S] (x : R × S) : star x = (star x.1, star x.2) := rfl
instance [has_involutive_star R] [has_involutive_star S] : has_involutive_star (R × S) :=
{ star_involutive := λ _, prod.ext (star_star _) (star_star _) }
instance [semigroup R] [semigroup S] [star_semigroup R] [star_semigroup S] :
star_semigroup (R × S) :=
{ star_mul := λ _ _, prod.ext (star_mul _ _) (star_mul _ _) }
instance [add_monoid R] [add_monoid S] [star_add_monoid R] [star_add_monoid S] :
star_add_monoid (R × S) :=
{ star_add := λ _ _, prod.ext (star_add _ _) (star_add _ _) }
instance [non_unital_semiring R] [non_unital_semiring S] [star_ring R] [star_ring S] :
star_ring (R × S) :=
{ ..prod.star_add_monoid, ..(prod.star_semigroup : star_semigroup (R × S)) }
instance {α : Type w} [has_smul α R] [has_smul α S] [has_star α] [has_star R] [has_star S]
[star_module α R] [star_module α S] :
star_module α (R × S) :=
{ star_smul := λ r x, prod.ext (star_smul _ _) (star_smul _ _) }
end prod
@[simp] lemma units.embed_product_star [monoid R] [star_semigroup R] (u : Rˣ) :
units.embed_product R (star u) = star (units.embed_product R u) := rfl
|
e75e413cd0cec19f1ac21e786843110ca2079a21
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/algebra/order.lean
|
ec7ee5f7fe8b821c8bcc156913d0f8f884bdb423
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 15,142
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import tactic.alias
import tactic.lint
/-!
# Lemmas about inequalities
This file contains some lemmas about `≤`/`≥`/`<`/`>`, and `cmp`.
* We simplify `a ≥ b` and `a > b` to `b ≤ a` and `b < a`, respectively. This way we can formulate
all lemmas using `≤`/`<` avoiding duplication.
* In some cases we introduce dot syntax aliases so that, e.g., from
`(hab : a ≤ b) (hbc : b ≤ c) (hbc' : b < c)` one can prove `hab.trans hbc : a ≤ c` and
`hab.trans_lt hbc' : a < c`.
-/
universe u
variables {α : Type u}
alias le_trans ← has_le.le.trans
alias lt_of_le_of_lt ← has_le.le.trans_lt
alias le_antisymm ← has_le.le.antisymm
alias lt_of_le_of_ne ← has_le.le.lt_of_ne
alias lt_of_le_not_le ← has_le.le.lt_of_not_le
alias lt_or_eq_of_le ← has_le.le.lt_or_eq
alias le_of_lt ← has_lt.lt.le
alias lt_trans ← has_lt.lt.trans
alias lt_of_lt_of_le ← has_lt.lt.trans_le
alias ne_of_lt ← has_lt.lt.ne
alias lt_asymm ← has_lt.lt.asymm has_lt.lt.not_lt
alias le_of_eq ← eq.le
/-- A version of `le_refl` where the argument is implicit -/
lemma le_rfl [preorder α] {x : α} : x ≤ x := le_refl x
namespace eq
/--
If `x = y` then `y ≤ x`. Note: this lemma uses `y ≤ x` instead of `x ≥ y`,
because `le` is used almost exclusively in mathlib.
-/
protected lemma ge [preorder α] {x y : α} (h : x = y) : y ≤ x := h.symm.le
lemma trans_le [preorder α] {x y z : α} (h1 : x = y) (h2 : y ≤ z) : x ≤ z := h1.le.trans h2
end eq
namespace has_le.le
@[nolint ge_or_gt] -- see Note [nolint_ge]
protected lemma ge [has_le α] {x y : α} (h : x ≤ y) : y ≥ x := h
lemma trans_eq [preorder α] {x y z : α} (h1 : x ≤ y) (h2 : y = z) : x ≤ z := h1.trans h2.le
lemma lt_iff_ne [partial_order α] {x y : α} (h : x ≤ y) : x < y ↔ x ≠ y := ⟨λ h, h.ne, h.lt_of_ne⟩
lemma le_iff_eq [partial_order α] {x y : α} (h : x ≤ y) : y ≤ x ↔ y = x :=
⟨λ h', h'.antisymm h, eq.le⟩
lemma lt_or_le [linear_order α] {a b : α} (h : a ≤ b) (c : α) : a < c ∨ c ≤ b :=
(lt_or_ge a c).imp id $ λ hc, le_trans hc h
lemma le_or_lt [linear_order α] {a b : α} (h : a ≤ b) (c : α) : a ≤ c ∨ c < b :=
(le_or_gt a c).imp id $ λ hc, lt_of_lt_of_le hc h
lemma le_or_le [linear_order α] {a b : α} (h : a ≤ b) (c : α) : a ≤ c ∨ c ≤ b :=
(h.le_or_lt c).elim or.inl (λ h, or.inr $ le_of_lt h)
end has_le.le
namespace has_lt.lt
@[nolint ge_or_gt] -- see Note [nolint_ge]
protected lemma gt [has_lt α] {x y : α} (h : x < y) : y > x := h
protected lemma false [preorder α] {x : α} : x < x → false := lt_irrefl x
lemma ne' [preorder α] {x y : α} (h : x < y) : y ≠ x := h.ne.symm
lemma lt_or_lt [linear_order α] {x y : α} (h : x < y) (z : α) : x < z ∨ z < y :=
(lt_or_ge z y).elim or.inr (λ hz, or.inl $ h.trans_le hz)
end has_lt.lt
namespace ge
@[nolint ge_or_gt] -- see Note [nolint_ge]
protected lemma le [has_le α] {x y : α} (h : x ≥ y) : y ≤ x := h
end ge
namespace gt
@[nolint ge_or_gt] -- see Note [nolint_ge]
protected lemma lt [has_lt α] {x y : α} (h : x > y) : y < x := h
end gt
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem ge_of_eq [preorder α] {a b : α} (h : a = b) : a ≥ b :=
h.ge
@[simp, nolint ge_or_gt] -- see Note [nolint_ge]
lemma ge_iff_le [preorder α] {a b : α} : a ≥ b ↔ b ≤ a := iff.rfl
@[simp, nolint ge_or_gt] -- see Note [nolint_ge]
lemma gt_iff_lt [preorder α] {a b : α} : a > b ↔ b < a := iff.rfl
lemma not_le_of_lt [preorder α] {a b : α} (h : a < b) : ¬ b ≤ a :=
(le_not_le_of_lt h).right
alias not_le_of_lt ← has_lt.lt.not_le
lemma not_lt_of_le [preorder α] {a b : α} (h : a ≤ b) : ¬ b < a
| hab := hab.not_le h
alias not_lt_of_le ← has_le.le.not_lt
lemma le_iff_eq_or_lt [partial_order α] {a b : α} : a ≤ b ↔ a = b ∨ a < b :=
le_iff_lt_or_eq.trans or.comm
lemma lt_iff_le_and_ne [partial_order α] {a b : α} : a < b ↔ a ≤ b ∧ a ≠ b :=
⟨λ h, ⟨le_of_lt h, ne_of_lt h⟩, λ ⟨h1, h2⟩, h1.lt_of_ne h2⟩
lemma eq_iff_le_not_lt [partial_order α] {a b : α} : a = b ↔ a ≤ b ∧ ¬ a < b :=
⟨λ h, ⟨h.le, h ▸ lt_irrefl _⟩, λ ⟨h₁, h₂⟩, h₁.antisymm $
classical.by_contradiction $ λ h₃, h₂ (h₁.lt_of_not_le h₃)⟩
lemma eq_or_lt_of_le [partial_order α] {a b : α} (h : a ≤ b) : a = b ∨ a < b :=
h.lt_or_eq.symm
alias eq_or_lt_of_le ← has_le.le.eq_or_lt
lemma ne.le_iff_lt [partial_order α] {a b : α} (h : a ≠ b) : a ≤ b ↔ a < b :=
⟨λ h', lt_of_le_of_ne h' h, λ h, h.le⟩
@[simp] lemma ne_iff_lt_iff_le [partial_order α] {a b : α} : (a ≠ b ↔ a < b) ↔ a ≤ b :=
⟨λ h, classical.by_cases le_of_eq (le_of_lt ∘ h.mp), λ h, ⟨lt_of_le_of_ne h, ne_of_lt⟩⟩
lemma lt_of_not_ge' [linear_order α] {a b : α} (h : ¬ b ≤ a) : a < b :=
((le_total _ _).resolve_right h).lt_of_not_le h
lemma lt_iff_not_ge' [linear_order α] {x y : α} : x < y ↔ ¬ y ≤ x :=
⟨not_le_of_gt, lt_of_not_ge'⟩
lemma le_of_not_lt [linear_order α] {a b : α} : ¬ a < b → b ≤ a := not_lt.1
lemma lt_or_le [linear_order α] : ∀ a b : α, a < b ∨ b ≤ a := lt_or_ge
lemma le_or_lt [linear_order α] : ∀ a b : α, a ≤ b ∨ b < a := le_or_gt
lemma ne.lt_or_lt [linear_order α] {a b : α} (h : a ≠ b) : a < b ∨ b < a :=
lt_or_gt_of_ne h
lemma not_lt_iff_eq_or_lt [linear_order α] {a b : α} : ¬ a < b ↔ a = b ∨ b < a :=
not_lt.trans $ le_iff_eq_or_lt.trans $ or_congr eq_comm iff.rfl
lemma exists_ge_of_linear [linear_order α] (a b : α) : ∃ c, a ≤ c ∧ b ≤ c :=
match le_total a b with
| or.inl h := ⟨_, h, le_rfl⟩
| or.inr h := ⟨_, le_rfl, h⟩
end
lemma lt_imp_lt_of_le_imp_le {β} [linear_order α] [preorder β] {a b : α} {c d : β}
(H : a ≤ b → c ≤ d) (h : d < c) : b < a :=
lt_of_not_ge' $ λ h', (H h').not_lt h
lemma le_imp_le_of_lt_imp_lt {β} [preorder α] [linear_order β] {a b : α} {c d : β}
(H : d < c → b < a) (h : a ≤ b) : c ≤ d :=
le_of_not_gt $ λ h', (H h').not_le h
lemma le_imp_le_iff_lt_imp_lt {β} [linear_order α] [linear_order β] {a b : α} {c d : β} :
(a ≤ b → c ≤ d) ↔ (d < c → b < a) :=
⟨lt_imp_lt_of_le_imp_le, le_imp_le_of_lt_imp_lt⟩
lemma lt_iff_lt_of_le_iff_le' {β} [preorder α] [preorder β] {a b : α} {c d : β}
(H : a ≤ b ↔ c ≤ d) (H' : b ≤ a ↔ d ≤ c) : b < a ↔ d < c :=
lt_iff_le_not_le.trans $ (and_congr H' (not_congr H)).trans lt_iff_le_not_le.symm
lemma lt_iff_lt_of_le_iff_le {β} [linear_order α] [linear_order β] {a b : α} {c d : β}
(H : a ≤ b ↔ c ≤ d) : b < a ↔ d < c :=
not_le.symm.trans $ (not_congr H).trans $ not_le
lemma le_iff_le_iff_lt_iff_lt {β} [linear_order α] [linear_order β] {a b : α} {c d : β} :
(a ≤ b ↔ c ≤ d) ↔ (b < a ↔ d < c) :=
⟨lt_iff_lt_of_le_iff_le, λ H, not_lt.symm.trans $ (not_congr H).trans $ not_lt⟩
lemma eq_of_forall_le_iff [partial_order α] {a b : α}
(H : ∀ c, c ≤ a ↔ c ≤ b) : a = b :=
le_antisymm ((H _).1 (le_refl _)) ((H _).2 (le_refl _))
lemma le_of_forall_le [preorder α] {a b : α}
(H : ∀ c, c ≤ a → c ≤ b) : a ≤ b :=
H _ (le_refl _)
lemma le_of_forall_le' [preorder α] {a b : α}
(H : ∀ c, a ≤ c → b ≤ c) : b ≤ a :=
H _ (le_refl _)
lemma le_of_forall_lt [linear_order α] {a b : α}
(H : ∀ c, c < a → c < b) : a ≤ b :=
le_of_not_lt $ λ h, lt_irrefl _ (H _ h)
lemma forall_lt_iff_le [linear_order α] {a b : α} :
(∀ ⦃c⦄, c < a → c < b) ↔ a ≤ b :=
⟨le_of_forall_lt, λ h c hca, lt_of_lt_of_le hca h⟩
lemma le_of_forall_lt' [linear_order α] {a b : α}
(H : ∀ c, a < c → b < c) : b ≤ a :=
le_of_not_lt $ λ h, lt_irrefl _ (H _ h)
lemma forall_lt_iff_le' [linear_order α] {a b : α} :
(∀ ⦃c⦄, a < c → b < c) ↔ b ≤ a :=
⟨le_of_forall_lt', λ h c hac, lt_of_le_of_lt h hac⟩
lemma eq_of_forall_ge_iff [partial_order α] {a b : α}
(H : ∀ c, a ≤ c ↔ b ≤ c) : a = b :=
le_antisymm ((H _).2 (le_refl _)) ((H _).1 (le_refl _))
/-- monotonicity of `≤` with respect to `→` -/
lemma le_implies_le_of_le_of_le {a b c d : α} [preorder α] (h₀ : c ≤ a) (h₁ : b ≤ d) :
a ≤ b → c ≤ d :=
assume h₂ : a ≤ b,
calc c
≤ a : h₀
... ≤ b : h₂
... ≤ d : h₁
namespace decidable
-- See Note [decidable namespace]
lemma le_imp_le_iff_lt_imp_lt {β} [linear_order α] [linear_order β]
{a b : α} {c d : β} : (a ≤ b → c ≤ d) ↔ (d < c → b < a) :=
⟨lt_imp_lt_of_le_imp_le, le_imp_le_of_lt_imp_lt⟩
-- See Note [decidable namespace]
lemma le_iff_le_iff_lt_iff_lt {β} [linear_order α] [linear_order β]
{a b : α} {c d : β} : (a ≤ b ↔ c ≤ d) ↔ (b < a ↔ d < c) :=
⟨lt_iff_lt_of_le_iff_le, λ H, not_lt.symm.trans $ (not_congr H).trans $ not_lt⟩
end decidable
/-- Like `cmp`, but uses a `≤` on the type instead of `<`. Given two elements
`x` and `y`, returns a three-way comparison result `ordering`. -/
def cmp_le {α} [has_le α] [@decidable_rel α (≤)] (x y : α) : ordering :=
if x ≤ y then
if y ≤ x then ordering.eq else ordering.lt
else ordering.gt
theorem cmp_le_swap {α} [has_le α] [is_total α (≤)] [@decidable_rel α (≤)] (x y : α) :
(cmp_le x y).swap = cmp_le y x :=
begin
by_cases xy : x ≤ y; by_cases yx : y ≤ x; simp [cmp_le, *, ordering.swap],
cases not_or xy yx (total_of _ _ _)
end
theorem cmp_le_eq_cmp {α} [preorder α] [is_total α (≤)]
[@decidable_rel α (≤)] [@decidable_rel α (<)] (x y : α) : cmp_le x y = cmp x y :=
begin
by_cases xy : x ≤ y; by_cases yx : y ≤ x;
simp [cmp_le, lt_iff_le_not_le, *, cmp, cmp_using],
cases not_or xy yx (total_of _ _ _)
end
namespace ordering
/-- `compares o a b` means that `a` and `b` have the ordering relation
`o` between them, assuming that the relation `a < b` is defined -/
@[simp] def compares [has_lt α] : ordering → α → α → Prop
| lt a b := a < b
| eq a b := a = b
| gt a b := a > b
theorem compares_swap [has_lt α] {a b : α} {o : ordering} :
o.swap.compares a b ↔ o.compares b a :=
by { cases o, exacts [iff.rfl, eq_comm, iff.rfl] }
alias compares_swap ↔ ordering.compares.of_swap ordering.compares.swap
theorem swap_eq_iff_eq_swap {o o' : ordering} : o.swap = o' ↔ o = o'.swap :=
⟨λ h, by rw [← swap_swap o, h], λ h, by rw [← swap_swap o', h]⟩
theorem compares.eq_lt [preorder α] :
∀ {o} {a b : α}, compares o a b → (o = lt ↔ a < b)
| lt a b h := ⟨λ _, h, λ _, rfl⟩
| eq a b h := ⟨λ h, by injection h, λ h', (ne_of_lt h' h).elim⟩
| gt a b h := ⟨λ h, by injection h, λ h', (lt_asymm h h').elim⟩
theorem compares.ne_lt [preorder α] :
∀ {o} {a b : α}, compares o a b → (o ≠ lt ↔ b ≤ a)
| lt a b h := ⟨absurd rfl, λ h', (not_le_of_lt h h').elim⟩
| eq a b h := ⟨λ _, ge_of_eq h, λ _ h, by injection h⟩
| gt a b h := ⟨λ _, le_of_lt h, λ _ h, by injection h⟩
theorem compares.eq_eq [preorder α] :
∀ {o} {a b : α}, compares o a b → (o = eq ↔ a = b)
| lt a b h := ⟨λ h, by injection h, λ h', (ne_of_lt h h').elim⟩
| eq a b h := ⟨λ _, h, λ _, rfl⟩
| gt a b h := ⟨λ h, by injection h, λ h', (ne_of_gt h h').elim⟩
theorem compares.eq_gt [preorder α] {o} {a b : α} (h : compares o a b) : (o = gt ↔ b < a) :=
swap_eq_iff_eq_swap.symm.trans h.swap.eq_lt
theorem compares.ne_gt [preorder α] {o} {a b : α} (h : compares o a b) : (o ≠ gt ↔ a ≤ b) :=
(not_congr swap_eq_iff_eq_swap.symm).trans h.swap.ne_lt
theorem compares.le_total [preorder α] {a b : α} :
∀ {o}, compares o a b → a ≤ b ∨ b ≤ a
| lt h := or.inl (le_of_lt h)
| eq h := or.inl (le_of_eq h)
| gt h := or.inr (le_of_lt h)
theorem compares.le_antisymm [preorder α] {a b : α} :
∀ {o}, compares o a b → a ≤ b → b ≤ a → a = b
| lt h _ hba := (not_le_of_lt h hba).elim
| eq h _ _ := h
| gt h hab _ := (not_le_of_lt h hab).elim
theorem compares.inj [preorder α] {o₁} :
∀ {o₂} {a b : α}, compares o₁ a b → compares o₂ a b → o₁ = o₂
| lt a b h₁ h₂ := h₁.eq_lt.2 h₂
| eq a b h₁ h₂ := h₁.eq_eq.2 h₂
| gt a b h₁ h₂ := h₁.eq_gt.2 h₂
theorem compares_iff_of_compares_impl {β : Type*} [linear_order α] [preorder β] {a b : α}
{a' b' : β} (h : ∀ {o}, compares o a b → compares o a' b') (o) :
compares o a b ↔ compares o a' b' :=
begin
refine ⟨h, λ ho, _⟩,
cases lt_trichotomy a b with hab hab,
{ change compares ordering.lt a b at hab,
rwa [ho.inj (h hab)] },
{ cases hab with hab hab,
{ change compares ordering.eq a b at hab,
rwa [ho.inj (h hab)] },
{ change compares ordering.gt a b at hab,
rwa [ho.inj (h hab)] } }
end
theorem swap_or_else (o₁ o₂) : (or_else o₁ o₂).swap = or_else o₁.swap o₂.swap :=
by cases o₁; try {refl}; cases o₂; refl
theorem or_else_eq_lt (o₁ o₂) : or_else o₁ o₂ = lt ↔ o₁ = lt ∨ (o₁ = eq ∧ o₂ = lt) :=
by cases o₁; cases o₂; exact dec_trivial
end ordering
theorem cmp_compares [linear_order α] (a b : α) : (cmp a b).compares a b :=
begin
unfold cmp cmp_using,
by_cases a < b; simp [h],
by_cases h₂ : b < a; simp [h₂, gt],
exact (lt_or_eq_of_le (le_of_not_gt h₂)).resolve_left h
end
theorem cmp_swap [preorder α] [@decidable_rel α (<)] (a b : α) : (cmp a b).swap = cmp b a :=
begin
unfold cmp cmp_using,
by_cases a < b; by_cases h₂ : b < a; simp [h, h₂, gt, ordering.swap],
exact lt_asymm h h₂
end
/-- Generate a linear order structure from a preorder and `cmp` function. -/
def linear_order_of_compares [preorder α] (cmp : α → α → ordering)
(h : ∀ a b, (cmp a b).compares a b) :
linear_order α :=
{ le_antisymm := λ a b, (h a b).le_antisymm,
le_total := λ a b, (h a b).le_total,
decidable_le := λ a b, decidable_of_iff _ (h a b).ne_gt,
decidable_lt := λ a b, decidable_of_iff _ (h a b).eq_lt,
decidable_eq := λ a b, decidable_of_iff _ (h a b).eq_eq,
.. ‹preorder α› }
variables [linear_order α] (x y : α)
@[simp] lemma cmp_eq_lt_iff : cmp x y = ordering.lt ↔ x < y :=
ordering.compares.eq_lt (cmp_compares x y)
@[simp] lemma cmp_eq_eq_iff : cmp x y = ordering.eq ↔ x = y :=
ordering.compares.eq_eq (cmp_compares x y)
@[simp] lemma cmp_eq_gt_iff : cmp x y = ordering.gt ↔ y < x :=
ordering.compares.eq_gt (cmp_compares x y)
@[simp] lemma cmp_self_eq_eq : cmp x x = ordering.eq :=
by rw cmp_eq_eq_iff
variables {x y} {β : Type*} [linear_order β] {x' y' : β}
lemma cmp_eq_cmp_symm : cmp x y = cmp x' y' ↔ cmp y x = cmp y' x' :=
by { split, rw [←cmp_swap _ y, ←cmp_swap _ y'], cc,
rw [←cmp_swap _ x, ←cmp_swap _ x'], cc, }
lemma lt_iff_lt_of_cmp_eq_cmp (h : cmp x y = cmp x' y') : x < y ↔ x' < y' :=
by rw [←cmp_eq_lt_iff, ←cmp_eq_lt_iff, h]
lemma le_iff_le_of_cmp_eq_cmp (h : cmp x y = cmp x' y') : x ≤ y ↔ x' ≤ y' :=
by { rw [←not_lt, ←not_lt, not_iff_not],
apply lt_iff_lt_of_cmp_eq_cmp, rwa cmp_eq_cmp_symm }
|
a2f8b4867faee7dd1586091bdaf5fc7f6b35e7f6
|
e19b506b59d98b1469dd0023680087d321d0293d
|
/src/test/test.lean
|
808bc022e6ce25c7ace10652fc0791a4f56f5102
|
[] |
no_license
|
khoek/lean-where
|
ea28c2a63b0c28159f9363ee5f3101348b2b4744
|
d2550de8655ece1cafbb9bf1beafe4d1c4481913
|
refs/heads/master
| 1,586,277,232,465
| 1,542,797,636,000
| 1,542,797,636,000
| 158,533,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 244
|
lean
|
import command.where
section
end
#where
namespace hi
variables (C : ℕ)
include C
#where
end hi
namespace hi.yo
variables (C : ℕ)
def b_fn (k : ℤ) : unit := let C := C in ()
def a_fn : unit := let C := C in ()
#where
end hi.yo
|
8b2b9110d6c428485638ad6348bb6cef0529f46d
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/data/list/basic.lean
|
60c909ffd5c53fd5417f3161c57ddb3c2f5fb217
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 149,345
|
lean
|
/-
Copyright (c) 2014 Parikshit Khanna. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro
-/
import data.nat.basic
/-!
# Basic properties of lists
-/
open function nat (hiding one_pos)
namespace list
universes u v w x
variables {ι : Type*} {α : Type u} {β : Type v} {γ : Type w} {δ : Type x}
attribute [inline] list.head
-- TODO[gh-6025]: make this an instance once safe to do so
/-- There is only one list of an empty type -/
def unique_of_is_empty [is_empty α] : unique (list α) :=
{ uniq := λ l, match l with
| [] := rfl
| (a :: l) := is_empty_elim a
end,
..list.inhabited α }
instance : is_left_id (list α) has_append.append [] :=
⟨ nil_append ⟩
instance : is_right_id (list α) has_append.append [] :=
⟨ append_nil ⟩
instance : is_associative (list α) has_append.append :=
⟨ append_assoc ⟩
theorem cons_ne_nil (a : α) (l : list α) : a::l ≠ [].
theorem cons_ne_self (a : α) (l : list α) : a::l ≠ l :=
mt (congr_arg length) (nat.succ_ne_self _)
theorem head_eq_of_cons_eq {h₁ h₂ : α} {t₁ t₂ : list α} :
(h₁::t₁) = (h₂::t₂) → h₁ = h₂ :=
assume Peq, list.no_confusion Peq (assume Pheq Pteq, Pheq)
theorem tail_eq_of_cons_eq {h₁ h₂ : α} {t₁ t₂ : list α} :
(h₁::t₁) = (h₂::t₂) → t₁ = t₂ :=
assume Peq, list.no_confusion Peq (assume Pheq Pteq, Pteq)
@[simp] theorem cons_injective {a : α} : injective (cons a) :=
assume l₁ l₂, assume Pe, tail_eq_of_cons_eq Pe
theorem cons_inj (a : α) {l l' : list α} : a::l = a::l' ↔ l = l' :=
cons_injective.eq_iff
theorem exists_cons_of_ne_nil {l : list α} (h : l ≠ nil) : ∃ b L, l = b :: L :=
by { induction l with c l', contradiction, use [c,l'], }
lemma set_of_mem_cons (l : list α) (a : α) : {x | x ∈ a :: l} = insert a {x | x ∈ l} := rfl
/-! ### mem -/
theorem mem_singleton_self (a : α) : a ∈ [a] := mem_cons_self _ _
theorem eq_of_mem_singleton {a b : α} : a ∈ [b] → a = b :=
assume : a ∈ [b], or.elim (eq_or_mem_of_mem_cons this)
(assume : a = b, this)
(assume : a ∈ [], absurd this (not_mem_nil a))
@[simp] theorem mem_singleton {a b : α} : a ∈ [b] ↔ a = b :=
⟨eq_of_mem_singleton, or.inl⟩
theorem mem_of_mem_cons_of_mem {a b : α} {l : list α} : a ∈ b::l → b ∈ l → a ∈ l :=
assume ainbl binl, or.elim (eq_or_mem_of_mem_cons ainbl)
(assume : a = b, begin subst a, exact binl end)
(assume : a ∈ l, this)
theorem _root_.decidable.list.eq_or_ne_mem_of_mem [decidable_eq α]
{a b : α} {l : list α} (h : a ∈ b :: l) : a = b ∨ (a ≠ b ∧ a ∈ l) :=
decidable.by_cases or.inl $ assume : a ≠ b, h.elim or.inl $ assume h, or.inr ⟨this, h⟩
theorem eq_or_ne_mem_of_mem {a b : α} {l : list α} : a ∈ b :: l → a = b ∨ (a ≠ b ∧ a ∈ l) :=
by classical; exact decidable.list.eq_or_ne_mem_of_mem
theorem not_mem_append {a : α} {s t : list α} (h₁ : a ∉ s) (h₂ : a ∉ t) : a ∉ s ++ t :=
mt mem_append.1 $ not_or_distrib.2 ⟨h₁, h₂⟩
theorem ne_nil_of_mem {a : α} {l : list α} (h : a ∈ l) : l ≠ [] :=
by intro e; rw e at h; cases h
theorem mem_split {a : α} {l : list α} (h : a ∈ l) : ∃ s t : list α, l = s ++ a :: t :=
begin
induction l with b l ih, {cases h}, rcases h with rfl | h,
{ exact ⟨[], l, rfl⟩ },
{ rcases ih h with ⟨s, t, rfl⟩,
exact ⟨b::s, t, rfl⟩ }
end
theorem mem_of_ne_of_mem {a y : α} {l : list α} (h₁ : a ≠ y) (h₂ : a ∈ y :: l) : a ∈ l :=
or.elim (eq_or_mem_of_mem_cons h₂) (λe, absurd e h₁) (λr, r)
theorem ne_of_not_mem_cons {a b : α} {l : list α} : a ∉ b::l → a ≠ b :=
assume nin aeqb, absurd (or.inl aeqb) nin
theorem not_mem_of_not_mem_cons {a b : α} {l : list α} : a ∉ b::l → a ∉ l :=
assume nin nainl, absurd (or.inr nainl) nin
theorem not_mem_cons_of_ne_of_not_mem {a y : α} {l : list α} : a ≠ y → a ∉ l → a ∉ y::l :=
assume p1 p2, not.intro (assume Pain, absurd (eq_or_mem_of_mem_cons Pain) (not_or p1 p2))
theorem ne_and_not_mem_of_not_mem_cons {a y : α} {l : list α} : a ∉ y::l → a ≠ y ∧ a ∉ l :=
assume p, and.intro (ne_of_not_mem_cons p) (not_mem_of_not_mem_cons p)
@[simp] theorem mem_map {f : α → β} {b : β} {l : list α} : b ∈ map f l ↔ ∃ a, a ∈ l ∧ f a = b :=
begin
-- This proof uses no axioms, that's why it's longer that `induction`; simp [...]
induction l with a l ihl,
{ split, { rintro ⟨_⟩ }, { rintro ⟨a, ⟨_⟩, _⟩ } },
{ refine (or_congr eq_comm ihl).trans _,
split,
{ rintro (h|⟨c, hcl, h⟩),
exacts [⟨a, or.inl rfl, h⟩, ⟨c, or.inr hcl, h⟩] },
{ rintro ⟨c, (hc|hc), h⟩,
exacts [or.inl $ (congr_arg f hc.symm).trans h, or.inr ⟨c, hc, h⟩] } }
end
alias mem_map ↔ list.exists_of_mem_map _
theorem mem_map_of_mem (f : α → β) {a : α} {l : list α} (h : a ∈ l) : f a ∈ map f l :=
mem_map.2 ⟨a, h, rfl⟩
theorem mem_map_of_injective {f : α → β} (H : injective f) {a : α} {l : list α} :
f a ∈ map f l ↔ a ∈ l :=
⟨λ m, let ⟨a', m', e⟩ := exists_of_mem_map m in H e ▸ m', mem_map_of_mem _⟩
lemma forall_mem_map_iff {f : α → β} {l : list α} {P : β → Prop} :
(∀ i ∈ l.map f, P i) ↔ ∀ j ∈ l, P (f j) :=
begin
split,
{ assume H j hj,
exact H (f j) (mem_map_of_mem f hj) },
{ assume H i hi,
rcases mem_map.1 hi with ⟨j, hj, ji⟩,
rw ← ji,
exact H j hj }
end
@[simp] lemma map_eq_nil {f : α → β} {l : list α} : list.map f l = [] ↔ l = [] :=
⟨by cases l; simp only [forall_prop_of_true, map, forall_prop_of_false, not_false_iff],
λ h, h.symm ▸ rfl⟩
@[simp] theorem mem_join {a : α} : ∀ {L : list (list α)}, a ∈ join L ↔ ∃ l, l ∈ L ∧ a ∈ l
| [] := ⟨false.elim, λ⟨_, h, _⟩, false.elim h⟩
| (c :: L) := by simp only [join, mem_append, @mem_join L, mem_cons_iff, or_and_distrib_right,
exists_or_distrib, exists_eq_left]
theorem exists_of_mem_join {a : α} {L : list (list α)} : a ∈ join L → ∃ l, l ∈ L ∧ a ∈ l :=
mem_join.1
theorem mem_join_of_mem {a : α} {L : list (list α)} {l} (lL : l ∈ L) (al : a ∈ l) : a ∈ join L :=
mem_join.2 ⟨l, lL, al⟩
@[simp]
theorem mem_bind {b : β} {l : list α} {f : α → list β} : b ∈ list.bind l f ↔ ∃ a ∈ l, b ∈ f a :=
iff.trans mem_join
⟨λ ⟨l', h1, h2⟩, let ⟨a, al, fa⟩ := exists_of_mem_map h1 in ⟨a, al, fa.symm ▸ h2⟩,
λ ⟨a, al, bfa⟩, ⟨f a, mem_map_of_mem _ al, bfa⟩⟩
theorem exists_of_mem_bind {b : β} {l : list α} {f : α → list β} :
b ∈ list.bind l f → ∃ a ∈ l, b ∈ f a :=
mem_bind.1
theorem mem_bind_of_mem {b : β} {l : list α} {f : α → list β} {a} (al : a ∈ l) (h : b ∈ f a) :
b ∈ list.bind l f :=
mem_bind.2 ⟨a, al, h⟩
lemma bind_map {g : α → list β} {f : β → γ} :
∀(l : list α), list.map f (l.bind g) = l.bind (λa, (g a).map f)
| [] := rfl
| (a::l) := by simp only [cons_bind, map_append, bind_map l]
lemma map_bind (g : β → list γ) (f : α → β) :
∀ l : list α, (list.map f l).bind g = l.bind (λ a, g (f a))
| [] := rfl
| (a::l) := by simp only [cons_bind, map_cons, map_bind l]
lemma range_map (f : α → β) : set.range (map f) = {l | ∀ x ∈ l, x ∈ set.range f} :=
begin
refine set.subset.antisymm (set.range_subset_iff.2 $
λ l, forall_mem_map_iff.2 $ λ y _, set.mem_range_self _) (λ l hl, _),
induction l with a l ihl, { exact ⟨[], rfl⟩ },
rcases ihl (λ x hx, hl x $ subset_cons _ _ hx) with ⟨l, rfl⟩,
rcases hl a (mem_cons_self _ _) with ⟨a, rfl⟩,
exact ⟨a :: l, map_cons _ _ _⟩
end
lemma range_map_coe (s : set α) : set.range (map (coe : s → α)) = {l | ∀ x ∈ l, x ∈ s} :=
by rw [range_map, subtype.range_coe]
/-- If each element of a list can be lifted to some type, then the whole list can be lifted to this
type. -/
instance [h : can_lift α β] : can_lift (list α) (list β) :=
{ coe := list.map h.coe,
cond := λ l, ∀ x ∈ l, can_lift.cond β x,
prf := λ l H,
begin
rw [← set.mem_range, range_map],
exact λ a ha, can_lift.prf a (H a ha),
end}
/-! ### length -/
theorem length_eq_zero {l : list α} : length l = 0 ↔ l = [] :=
⟨eq_nil_of_length_eq_zero, λ h, h.symm ▸ rfl⟩
@[simp] lemma length_singleton (a : α) : length [a] = 1 := rfl
theorem length_pos_of_mem {a : α} : ∀ {l : list α}, a ∈ l → 0 < length l
| (b::l) _ := zero_lt_succ _
theorem exists_mem_of_length_pos : ∀ {l : list α}, 0 < length l → ∃ a, a ∈ l
| (b::l) _ := ⟨b, mem_cons_self _ _⟩
theorem length_pos_iff_exists_mem {l : list α} : 0 < length l ↔ ∃ a, a ∈ l :=
⟨exists_mem_of_length_pos, λ ⟨a, h⟩, length_pos_of_mem h⟩
theorem ne_nil_of_length_pos {l : list α} : 0 < length l → l ≠ [] :=
λ h1 h2, lt_irrefl 0 ((length_eq_zero.2 h2).subst h1)
theorem length_pos_of_ne_nil {l : list α} : l ≠ [] → 0 < length l :=
λ h, pos_iff_ne_zero.2 $ λ h0, h $ length_eq_zero.1 h0
theorem length_pos_iff_ne_nil {l : list α} : 0 < length l ↔ l ≠ [] :=
⟨ne_nil_of_length_pos, length_pos_of_ne_nil⟩
lemma exists_mem_of_ne_nil (l : list α) (h : l ≠ []) : ∃ x, x ∈ l :=
exists_mem_of_length_pos (length_pos_of_ne_nil h)
theorem length_eq_one {l : list α} : length l = 1 ↔ ∃ a, l = [a] :=
⟨match l with [a], _ := ⟨a, rfl⟩ end, λ ⟨a, e⟩, e.symm ▸ rfl⟩
lemma exists_of_length_succ {n} :
∀ l : list α, l.length = n + 1 → ∃ h t, l = h :: t
| [] H := absurd H.symm $ succ_ne_zero n
| (h :: t) H := ⟨h, t, rfl⟩
@[simp] lemma length_injective_iff : injective (list.length : list α → ℕ) ↔ subsingleton α :=
begin
split,
{ intro h, refine ⟨λ x y, _⟩, suffices : [x] = [y], { simpa using this }, apply h, refl },
{ intros hα l1 l2 hl, induction l1 generalizing l2; cases l2,
{ refl }, { cases hl }, { cases hl },
congr, exactI subsingleton.elim _ _, apply l1_ih, simpa using hl }
end
@[simp] lemma length_injective [subsingleton α] : injective (length : list α → ℕ) :=
length_injective_iff.mpr $ by apply_instance
lemma length_eq_two {l : list α} : l.length = 2 ↔ ∃ a b, l = [a, b] :=
⟨match l with [a, b], _ := ⟨a, b, rfl⟩ end, λ ⟨a, b, e⟩, e.symm ▸ rfl⟩
lemma length_eq_three {l : list α} : l.length = 3 ↔ ∃ a b c, l = [a, b, c] :=
⟨match l with [a, b, c], _ := ⟨a, b, c, rfl⟩ end, λ ⟨a, b, c, e⟩, e.symm ▸ rfl⟩
/-! ### set-theoretic notation of lists -/
lemma empty_eq : (∅ : list α) = [] := by refl
lemma singleton_eq (x : α) : ({x} : list α) = [x] := rfl
lemma insert_neg [decidable_eq α] {x : α} {l : list α} (h : x ∉ l) :
has_insert.insert x l = x :: l :=
if_neg h
lemma insert_pos [decidable_eq α] {x : α} {l : list α} (h : x ∈ l) :
has_insert.insert x l = l :=
if_pos h
lemma doubleton_eq [decidable_eq α] {x y : α} (h : x ≠ y) : ({x, y} : list α) = [x, y] :=
by { rw [insert_neg, singleton_eq], rwa [singleton_eq, mem_singleton] }
/-! ### bounded quantifiers over lists -/
theorem forall_mem_nil (p : α → Prop) : ∀ x ∈ @nil α, p x.
theorem forall_mem_cons : ∀ {p : α → Prop} {a : α} {l : list α},
(∀ x ∈ a :: l, p x) ↔ p a ∧ ∀ x ∈ l, p x :=
ball_cons
theorem forall_mem_of_forall_mem_cons {p : α → Prop} {a : α} {l : list α}
(h : ∀ x ∈ a :: l, p x) :
∀ x ∈ l, p x :=
(forall_mem_cons.1 h).2
theorem forall_mem_singleton {p : α → Prop} {a : α} : (∀ x ∈ [a], p x) ↔ p a :=
by simp only [mem_singleton, forall_eq]
theorem forall_mem_append {p : α → Prop} {l₁ l₂ : list α} :
(∀ x ∈ l₁ ++ l₂, p x) ↔ (∀ x ∈ l₁, p x) ∧ (∀ x ∈ l₂, p x) :=
by simp only [mem_append, or_imp_distrib, forall_and_distrib]
theorem not_exists_mem_nil (p : α → Prop) : ¬ ∃ x ∈ @nil α, p x.
theorem exists_mem_cons_of {p : α → Prop} {a : α} (l : list α) (h : p a) :
∃ x ∈ a :: l, p x :=
bex.intro a (mem_cons_self _ _) h
theorem exists_mem_cons_of_exists {p : α → Prop} {a : α} {l : list α} (h : ∃ x ∈ l, p x) :
∃ x ∈ a :: l, p x :=
bex.elim h (λ x xl px, bex.intro x (mem_cons_of_mem _ xl) px)
theorem or_exists_of_exists_mem_cons {p : α → Prop} {a : α} {l : list α} (h : ∃ x ∈ a :: l, p x) :
p a ∨ ∃ x ∈ l, p x :=
bex.elim h (λ x xal px,
or.elim (eq_or_mem_of_mem_cons xal)
(assume : x = a, begin rw ←this, left, exact px end)
(assume : x ∈ l, or.inr (bex.intro x this px)))
theorem exists_mem_cons_iff (p : α → Prop) (a : α) (l : list α) :
(∃ x ∈ a :: l, p x) ↔ p a ∨ ∃ x ∈ l, p x :=
iff.intro or_exists_of_exists_mem_cons
(assume h, or.elim h (exists_mem_cons_of l) exists_mem_cons_of_exists)
/-! ### list subset -/
theorem subset_def {l₁ l₂ : list α} : l₁ ⊆ l₂ ↔ ∀ ⦃a : α⦄, a ∈ l₁ → a ∈ l₂ := iff.rfl
theorem subset_append_of_subset_left (l l₁ l₂ : list α) : l ⊆ l₁ → l ⊆ l₁++l₂ :=
λ s, subset.trans s $ subset_append_left _ _
theorem subset_append_of_subset_right (l l₁ l₂ : list α) : l ⊆ l₂ → l ⊆ l₁++l₂ :=
λ s, subset.trans s $ subset_append_right _ _
@[simp] theorem cons_subset {a : α} {l m : list α} :
a::l ⊆ m ↔ a ∈ m ∧ l ⊆ m :=
by simp only [subset_def, mem_cons_iff, or_imp_distrib, forall_and_distrib, forall_eq]
theorem cons_subset_of_subset_of_mem {a : α} {l m : list α}
(ainm : a ∈ m) (lsubm : l ⊆ m) : a::l ⊆ m :=
cons_subset.2 ⟨ainm, lsubm⟩
theorem append_subset_of_subset_of_subset {l₁ l₂ l : list α} (l₁subl : l₁ ⊆ l) (l₂subl : l₂ ⊆ l) :
l₁ ++ l₂ ⊆ l :=
λ a h, (mem_append.1 h).elim (@l₁subl _) (@l₂subl _)
@[simp] theorem append_subset_iff {l₁ l₂ l : list α} :
l₁ ++ l₂ ⊆ l ↔ l₁ ⊆ l ∧ l₂ ⊆ l :=
begin
split,
{ intro h, simp only [subset_def] at *, split; intros; simp* },
{ rintro ⟨h1, h2⟩, apply append_subset_of_subset_of_subset h1 h2 }
end
theorem eq_nil_of_subset_nil : ∀ {l : list α}, l ⊆ [] → l = []
| [] s := rfl
| (a::l) s := false.elim $ s $ mem_cons_self a l
theorem eq_nil_iff_forall_not_mem {l : list α} : l = [] ↔ ∀ a, a ∉ l :=
show l = [] ↔ l ⊆ [], from ⟨λ e, e ▸ subset.refl _, eq_nil_of_subset_nil⟩
theorem map_subset {l₁ l₂ : list α} (f : α → β) (H : l₁ ⊆ l₂) : map f l₁ ⊆ map f l₂ :=
λ x, by simp only [mem_map, not_and, exists_imp_distrib, and_imp]; exact λ a h e, ⟨a, H h, e⟩
theorem map_subset_iff {l₁ l₂ : list α} (f : α → β) (h : injective f) :
map f l₁ ⊆ map f l₂ ↔ l₁ ⊆ l₂ :=
begin
refine ⟨_, map_subset f⟩, intros h2 x hx,
rcases mem_map.1 (h2 (mem_map_of_mem f hx)) with ⟨x', hx', hxx'⟩,
cases h hxx', exact hx'
end
/-! ### append -/
lemma append_eq_has_append {L₁ L₂ : list α} : list.append L₁ L₂ = L₁ ++ L₂ := rfl
@[simp] lemma singleton_append {x : α} {l : list α} : [x] ++ l = x :: l := rfl
theorem append_ne_nil_of_ne_nil_left (s t : list α) : s ≠ [] → s ++ t ≠ [] :=
by induction s; intros; contradiction
theorem append_ne_nil_of_ne_nil_right (s t : list α) : t ≠ [] → s ++ t ≠ [] :=
by induction s; intros; contradiction
@[simp] lemma append_eq_nil {p q : list α} : (p ++ q) = [] ↔ p = [] ∧ q = [] :=
by cases p; simp only [nil_append, cons_append, eq_self_iff_true, true_and, false_and]
@[simp] lemma nil_eq_append_iff {a b : list α} : [] = a ++ b ↔ a = [] ∧ b = [] :=
by rw [eq_comm, append_eq_nil]
lemma append_eq_cons_iff {a b c : list α} {x : α} :
a ++ b = x :: c ↔ (a = [] ∧ b = x :: c) ∨ (∃a', a = x :: a' ∧ c = a' ++ b) :=
by cases a; simp only [and_assoc, @eq_comm _ c, nil_append, cons_append, eq_self_iff_true,
true_and, false_and, exists_false, false_or, or_false, exists_and_distrib_left, exists_eq_left']
lemma cons_eq_append_iff {a b c : list α} {x : α} :
(x :: c : list α) = a ++ b ↔ (a = [] ∧ b = x :: c) ∨ (∃a', a = x :: a' ∧ c = a' ++ b) :=
by rw [eq_comm, append_eq_cons_iff]
lemma append_eq_append_iff {a b c d : list α} :
a ++ b = c ++ d ↔ (∃a', c = a ++ a' ∧ b = a' ++ d) ∨ (∃c', a = c ++ c' ∧ d = c' ++ b) :=
begin
induction a generalizing c,
case nil { rw nil_append, split,
{ rintro rfl, left, exact ⟨_, rfl, rfl⟩ },
{ rintro (⟨a', rfl, rfl⟩ | ⟨a', H, rfl⟩), {refl}, {rw [← append_assoc, ← H], refl} } },
case cons : a as ih
{ cases c,
{ simp only [cons_append, nil_append, false_and, exists_false, false_or, exists_eq_left'],
exact eq_comm },
{ simp only [cons_append, @eq_comm _ a, ih, and_assoc, and_or_distrib_left,
exists_and_distrib_left] } }
end
@[simp] theorem take_append_drop : ∀ (n : ℕ) (l : list α), take n l ++ drop n l = l
| 0 a := rfl
| (succ n) [] := rfl
| (succ n) (x :: xs) := congr_arg (cons x) $ take_append_drop n xs
-- TODO(Leo): cleanup proof after arith dec proc
theorem append_inj :
∀ {s₁ s₂ t₁ t₂ : list α}, s₁ ++ t₁ = s₂ ++ t₂ → length s₁ = length s₂ → s₁ = s₂ ∧ t₁ = t₂
| [] [] t₁ t₂ h hl := ⟨rfl, h⟩
| (a::s₁) [] t₁ t₂ h hl := list.no_confusion $ eq_nil_of_length_eq_zero hl
| [] (b::s₂) t₁ t₂ h hl := list.no_confusion $ eq_nil_of_length_eq_zero hl.symm
| (a::s₁) (b::s₂) t₁ t₂ h hl := list.no_confusion h $ λab hap,
let ⟨e1, e2⟩ := @append_inj s₁ s₂ t₁ t₂ hap (succ.inj hl) in
by rw [ab, e1, e2]; exact ⟨rfl, rfl⟩
theorem append_inj_right {s₁ s₂ t₁ t₂ : list α} (h : s₁ ++ t₁ = s₂ ++ t₂)
(hl : length s₁ = length s₂) : t₁ = t₂ :=
(append_inj h hl).right
theorem append_inj_left {s₁ s₂ t₁ t₂ : list α} (h : s₁ ++ t₁ = s₂ ++ t₂)
(hl : length s₁ = length s₂) : s₁ = s₂ :=
(append_inj h hl).left
theorem append_inj' {s₁ s₂ t₁ t₂ : list α} (h : s₁ ++ t₁ = s₂ ++ t₂) (hl : length t₁ = length t₂) :
s₁ = s₂ ∧ t₁ = t₂ :=
append_inj h $ @nat.add_right_cancel _ (length t₁) _ $
let hap := congr_arg length h in by simp only [length_append] at hap; rwa [← hl] at hap
theorem append_inj_right' {s₁ s₂ t₁ t₂ : list α} (h : s₁ ++ t₁ = s₂ ++ t₂)
(hl : length t₁ = length t₂) : t₁ = t₂ :=
(append_inj' h hl).right
theorem append_inj_left' {s₁ s₂ t₁ t₂ : list α} (h : s₁ ++ t₁ = s₂ ++ t₂)
(hl : length t₁ = length t₂) : s₁ = s₂ :=
(append_inj' h hl).left
theorem append_left_cancel {s t₁ t₂ : list α} (h : s ++ t₁ = s ++ t₂) : t₁ = t₂ :=
append_inj_right h rfl
theorem append_right_cancel {s₁ s₂ t : list α} (h : s₁ ++ t = s₂ ++ t) : s₁ = s₂ :=
append_inj_left' h rfl
theorem append_right_injective (s : list α) : function.injective (λ t, s ++ t) :=
λ t₁ t₂, append_left_cancel
theorem append_right_inj {t₁ t₂ : list α} (s) : s ++ t₁ = s ++ t₂ ↔ t₁ = t₂ :=
(append_right_injective s).eq_iff
theorem append_left_injective (t : list α) : function.injective (λ s, s ++ t) :=
λ s₁ s₂, append_right_cancel
theorem append_left_inj {s₁ s₂ : list α} (t) : s₁ ++ t = s₂ ++ t ↔ s₁ = s₂ :=
(append_left_injective t).eq_iff
theorem map_eq_append_split {f : α → β} {l : list α} {s₁ s₂ : list β}
(h : map f l = s₁ ++ s₂) : ∃ l₁ l₂, l = l₁ ++ l₂ ∧ map f l₁ = s₁ ∧ map f l₂ = s₂ :=
begin
have := h, rw [← take_append_drop (length s₁) l] at this ⊢,
rw map_append at this,
refine ⟨_, _, rfl, append_inj this _⟩,
rw [length_map, length_take, min_eq_left],
rw [← length_map f l, h, length_append],
apply nat.le_add_right
end
/-! ### repeat -/
@[simp] theorem repeat_succ (a : α) (n) : repeat a (n + 1) = a :: repeat a n := rfl
theorem mem_repeat {a b : α} : ∀ {n}, b ∈ repeat a n ↔ n ≠ 0 ∧ b = a
| 0 := by simp
| (n + 1) := by simp [mem_repeat]
theorem eq_of_mem_repeat {a b : α} {n} (h : b ∈ repeat a n) : b = a :=
(mem_repeat.1 h).2
theorem eq_repeat_of_mem {a : α} : ∀ {l : list α}, (∀ b ∈ l, b = a) → l = repeat a l.length
| [] H := rfl
| (b::l) H := by cases forall_mem_cons.1 H with H₁ H₂;
unfold length repeat; congr; [exact H₁, exact eq_repeat_of_mem H₂]
theorem eq_repeat' {a : α} {l : list α} : l = repeat a l.length ↔ ∀ b ∈ l, b = a :=
⟨λ h, h.symm ▸ λ b, eq_of_mem_repeat, eq_repeat_of_mem⟩
theorem eq_repeat {a : α} {n} {l : list α} : l = repeat a n ↔ length l = n ∧ ∀ b ∈ l, b = a :=
⟨λ h, h.symm ▸ ⟨length_repeat _ _, λ b, eq_of_mem_repeat⟩,
λ ⟨e, al⟩, e ▸ eq_repeat_of_mem al⟩
theorem repeat_add (a : α) (m n) : repeat a (m + n) = repeat a m ++ repeat a n :=
by induction m; simp only [*, zero_add, succ_add, repeat]; split; refl
theorem repeat_subset_singleton (a : α) (n) : repeat a n ⊆ [a] :=
λ b h, mem_singleton.2 (eq_of_mem_repeat h)
@[simp] theorem map_const (l : list α) (b : β) : map (function.const α b) l = repeat b l.length :=
by induction l; [refl, simp only [*, map]]; split; refl
theorem eq_of_mem_map_const {b₁ b₂ : β} {l : list α} (h : b₁ ∈ map (function.const α b₂) l) :
b₁ = b₂ :=
by rw map_const at h; exact eq_of_mem_repeat h
@[simp] theorem map_repeat (f : α → β) (a : α) (n) : map f (repeat a n) = repeat (f a) n :=
by induction n; [refl, simp only [*, repeat, map]]; split; refl
@[simp] theorem tail_repeat (a : α) (n) : tail (repeat a n) = repeat a n.pred :=
by cases n; refl
@[simp] theorem join_repeat_nil (n : ℕ) : join (repeat [] n) = @nil α :=
by induction n; [refl, simp only [*, repeat, join, append_nil]]
lemma repeat_left_injective {n : ℕ} (hn : n ≠ 0) :
function.injective (λ a : α, repeat a n) :=
λ a b h, (eq_repeat.1 h).2 _ $ mem_repeat.2 ⟨hn, rfl⟩
lemma repeat_left_inj {a b : α} {n : ℕ} (hn : n ≠ 0) :
repeat a n = repeat b n ↔ a = b :=
(repeat_left_injective hn).eq_iff
@[simp] lemma repeat_left_inj' {a b : α} :
∀ {n}, repeat a n = repeat b n ↔ n = 0 ∨ a = b
| 0 := by simp
| (n + 1) := (repeat_left_inj n.succ_ne_zero).trans $ by simp only [n.succ_ne_zero, false_or]
lemma repeat_right_injective (a : α) : function.injective (repeat a) :=
function.left_inverse.injective (length_repeat a)
@[simp] lemma repeat_right_inj {a : α} {n m : ℕ} :
repeat a n = repeat a m ↔ n = m :=
(repeat_right_injective a).eq_iff
/-! ### pure -/
@[simp] theorem mem_pure {α} (x y : α) :
x ∈ (pure y : list α) ↔ x = y := by simp! [pure,list.ret]
/-! ### bind -/
@[simp] theorem bind_eq_bind {α β} (f : α → list β) (l : list α) :
l >>= f = l.bind f := rfl
-- TODO: duplicate of a lemma in core
theorem bind_append (f : α → list β) (l₁ l₂ : list α) :
(l₁ ++ l₂).bind f = l₁.bind f ++ l₂.bind f :=
append_bind _ _ _
@[simp] theorem bind_singleton (f : α → list β) (x : α) : [x].bind f = f x :=
append_nil (f x)
@[simp] theorem bind_singleton' (l : list α) : l.bind (λ x, [x]) = l := bind_pure l
theorem map_eq_bind {α β} (f : α → β) (l : list α) : map f l = l.bind (λ x, [f x]) :=
by { transitivity, rw [← bind_singleton' l, bind_map], refl }
theorem bind_assoc {α β} (l : list α) (f : α → list β) (g : β → list γ) :
(l.bind f).bind g = l.bind (λ x, (f x).bind g) :=
by induction l; simp *
/-! ### concat -/
theorem concat_nil (a : α) : concat [] a = [a] := rfl
theorem concat_cons (a b : α) (l : list α) : concat (a :: l) b = a :: concat l b := rfl
@[simp] theorem concat_eq_append (a : α) (l : list α) : concat l a = l ++ [a] :=
by induction l; simp only [*, concat]; split; refl
theorem init_eq_of_concat_eq {a : α} {l₁ l₂ : list α} : concat l₁ a = concat l₂ a → l₁ = l₂ :=
begin
intro h,
rw [concat_eq_append, concat_eq_append] at h,
exact append_right_cancel h
end
theorem last_eq_of_concat_eq {a b : α} {l : list α} : concat l a = concat l b → a = b :=
begin
intro h,
rw [concat_eq_append, concat_eq_append] at h,
exact head_eq_of_cons_eq (append_left_cancel h)
end
theorem concat_ne_nil (a : α) (l : list α) : concat l a ≠ [] :=
by simp
theorem concat_append (a : α) (l₁ l₂ : list α) : concat l₁ a ++ l₂ = l₁ ++ a :: l₂ :=
by simp
theorem length_concat (a : α) (l : list α) : length (concat l a) = succ (length l) :=
by simp only [concat_eq_append, length_append, length]
theorem append_concat (a : α) (l₁ l₂ : list α) : l₁ ++ concat l₂ a = concat (l₁ ++ l₂) a :=
by simp
/-! ### reverse -/
@[simp] theorem reverse_nil : reverse (@nil α) = [] := rfl
local attribute [simp] reverse_core
@[simp] theorem reverse_cons (a : α) (l : list α) : reverse (a::l) = reverse l ++ [a] :=
have aux : ∀ l₁ l₂, reverse_core l₁ l₂ ++ [a] = reverse_core l₁ (l₂ ++ [a]),
by intro l₁; induction l₁; intros; [refl, simp only [*, reverse_core, cons_append]],
(aux l nil).symm
theorem reverse_core_eq (l₁ l₂ : list α) : reverse_core l₁ l₂ = reverse l₁ ++ l₂ :=
by induction l₁ generalizing l₂; [refl, simp only [*, reverse_core, reverse_cons, append_assoc]];
refl
theorem reverse_cons' (a : α) (l : list α) : reverse (a::l) = concat (reverse l) a :=
by simp only [reverse_cons, concat_eq_append]
@[simp] theorem reverse_singleton (a : α) : reverse [a] = [a] := rfl
@[simp] theorem reverse_append (s t : list α) : reverse (s ++ t) = (reverse t) ++ (reverse s) :=
by induction s; [rw [nil_append, reverse_nil, append_nil],
simp only [*, cons_append, reverse_cons, append_assoc]]
theorem reverse_concat (l : list α) (a : α) : reverse (concat l a) = a :: reverse l :=
by rw [concat_eq_append, reverse_append, reverse_singleton, singleton_append]
@[simp] theorem reverse_reverse (l : list α) : reverse (reverse l) = l :=
by induction l; [refl, simp only [*, reverse_cons, reverse_append]]; refl
@[simp] theorem reverse_involutive : involutive (@reverse α) := reverse_reverse
@[simp] theorem reverse_injective : injective (@reverse α) := reverse_involutive.injective
theorem reverse_surjective : surjective (@reverse α) := reverse_involutive.surjective
theorem reverse_bijective : bijective (@reverse α) := reverse_involutive.bijective
@[simp] theorem reverse_inj {l₁ l₂ : list α} : reverse l₁ = reverse l₂ ↔ l₁ = l₂ :=
reverse_injective.eq_iff
lemma reverse_eq_iff {l l' : list α} :
l.reverse = l' ↔ l = l'.reverse :=
reverse_involutive.eq_iff
@[simp] theorem reverse_eq_nil {l : list α} : reverse l = [] ↔ l = [] :=
@reverse_inj _ l []
theorem concat_eq_reverse_cons (a : α) (l : list α) : concat l a = reverse (a :: reverse l) :=
by simp only [concat_eq_append, reverse_cons, reverse_reverse]
@[simp] theorem length_reverse (l : list α) : length (reverse l) = length l :=
by induction l; [refl, simp only [*, reverse_cons, length_append, length]]
@[simp] theorem map_reverse (f : α → β) (l : list α) : map f (reverse l) = reverse (map f l) :=
by induction l; [refl, simp only [*, map, reverse_cons, map_append]]
theorem map_reverse_core (f : α → β) (l₁ l₂ : list α) :
map f (reverse_core l₁ l₂) = reverse_core (map f l₁) (map f l₂) :=
by simp only [reverse_core_eq, map_append, map_reverse]
@[simp] theorem mem_reverse {a : α} {l : list α} : a ∈ reverse l ↔ a ∈ l :=
by induction l; [refl, simp only [*, reverse_cons, mem_append, mem_singleton, mem_cons_iff,
not_mem_nil, false_or, or_false, or_comm]]
@[simp] theorem reverse_repeat (a : α) (n) : reverse (repeat a n) = repeat a n :=
eq_repeat.2 ⟨by simp only [length_reverse, length_repeat],
λ b h, eq_of_mem_repeat (mem_reverse.1 h)⟩
/-! ### empty -/
attribute [simp] list.empty
lemma empty_iff_eq_nil {l : list α} : l.empty ↔ l = [] :=
list.cases_on l (by simp) (by simp)
/-! ### init -/
@[simp] theorem length_init : ∀ (l : list α), length (init l) = length l - 1
| [] := rfl
| [a] := rfl
| (a :: b :: l) :=
begin
rw init,
simp only [add_left_inj, length, succ_add_sub_one],
exact length_init (b :: l)
end
/-! ### last -/
@[simp] theorem last_cons {a : α} {l : list α} :
∀ (h : l ≠ nil), last (a :: l) (cons_ne_nil a l) = last l h :=
by {induction l; intros, contradiction, reflexivity}
@[simp] theorem last_append_singleton {a : α} (l : list α) :
last (l ++ [a]) (append_ne_nil_of_ne_nil_right l _ (cons_ne_nil a _)) = a :=
by induction l;
[refl, simp only [cons_append, last_cons (λ H, cons_ne_nil _ _ (append_eq_nil.1 H).2), *]]
theorem last_append (l₁ l₂ : list α) (h : l₂ ≠ []) :
last (l₁ ++ l₂) (append_ne_nil_of_ne_nil_right l₁ l₂ h) = last l₂ h :=
begin
induction l₁ with _ _ ih,
{ simp },
{ simp only [cons_append], rw list.last_cons, exact ih },
end
theorem last_concat {a : α} (l : list α) : last (concat l a) (concat_ne_nil a l) = a :=
by simp only [concat_eq_append, last_append_singleton]
@[simp] theorem last_singleton (a : α) : last [a] (cons_ne_nil a []) = a := rfl
@[simp] theorem last_cons_cons (a₁ a₂ : α) (l : list α) :
last (a₁::a₂::l) (cons_ne_nil _ _) = last (a₂::l) (cons_ne_nil a₂ l) := rfl
theorem init_append_last : ∀ {l : list α} (h : l ≠ []), init l ++ [last l h] = l
| [] h := absurd rfl h
| [a] h := rfl
| (a::b::l) h :=
begin
rw [init, cons_append, last_cons (cons_ne_nil _ _)],
congr,
exact init_append_last (cons_ne_nil b l)
end
theorem last_congr {l₁ l₂ : list α} (h₁ : l₁ ≠ []) (h₂ : l₂ ≠ []) (h₃ : l₁ = l₂) :
last l₁ h₁ = last l₂ h₂ :=
by subst l₁
theorem last_mem : ∀ {l : list α} (h : l ≠ []), last l h ∈ l
| [] h := absurd rfl h
| [a] h := or.inl rfl
| (a::b::l) h := or.inr $ by { rw [last_cons_cons], exact last_mem (cons_ne_nil b l) }
lemma last_repeat_succ (a m : ℕ) :
(repeat a m.succ).last (ne_nil_of_length_eq_succ
(show (repeat a m.succ).length = m.succ, by rw length_repeat)) = a :=
begin
induction m with k IH,
{ simp },
{ simpa only [repeat_succ, last] }
end
/-! ### last' -/
@[simp] theorem last'_is_none :
∀ {l : list α}, (last' l).is_none ↔ l = []
| [] := by simp
| [a] := by simp
| (a::b::l) := by simp [@last'_is_none (b::l)]
@[simp] theorem last'_is_some : ∀ {l : list α}, l.last'.is_some ↔ l ≠ []
| [] := by simp
| [a] := by simp
| (a::b::l) := by simp [@last'_is_some (b::l)]
theorem mem_last'_eq_last : ∀ {l : list α} {x : α}, x ∈ l.last' → ∃ h, x = last l h
| [] x hx := false.elim $ by simpa using hx
| [a] x hx := have a = x, by simpa using hx, this ▸ ⟨cons_ne_nil a [], rfl⟩
| (a::b::l) x hx :=
begin
rw last' at hx,
rcases mem_last'_eq_last hx with ⟨h₁, h₂⟩,
use cons_ne_nil _ _,
rwa [last_cons]
end
theorem last'_eq_last_of_ne_nil : ∀ {l : list α} (h : l ≠ []), l.last' = some (l.last h)
| [] h := (h rfl).elim
| [a] _ := by {unfold last, unfold last'}
| (a::b::l) _ := @last'_eq_last_of_ne_nil (b::l) (cons_ne_nil _ _)
theorem mem_last'_cons {x y : α} : ∀ {l : list α} (h : x ∈ l.last'), x ∈ (y :: l).last'
| [] _ := by contradiction
| (a::l) h := h
theorem mem_of_mem_last' {l : list α} {a : α} (ha : a ∈ l.last') : a ∈ l :=
let ⟨h₁, h₂⟩ := mem_last'_eq_last ha in h₂.symm ▸ last_mem _
theorem init_append_last' : ∀ {l : list α} (a ∈ l.last'), init l ++ [a] = l
| [] a ha := (option.not_mem_none a ha).elim
| [a] _ rfl := rfl
| (a :: b :: l) c hc := by { rw [last'] at hc, rw [init, cons_append, init_append_last' _ hc] }
theorem ilast_eq_last' [inhabited α] : ∀ l : list α, l.ilast = l.last'.iget
| [] := by simp [ilast, arbitrary]
| [a] := rfl
| [a, b] := rfl
| [a, b, c] := rfl
| (a :: b :: c :: l) := by simp [ilast, ilast_eq_last' (c :: l)]
@[simp] theorem last'_append_cons : ∀ (l₁ : list α) (a : α) (l₂ : list α),
last' (l₁ ++ a :: l₂) = last' (a :: l₂)
| [] a l₂ := rfl
| [b] a l₂ := rfl
| (b::c::l₁) a l₂ := by rw [cons_append, cons_append, last', ← cons_append, last'_append_cons]
@[simp] theorem last'_cons_cons (x y : α) (l : list α) :
last' (x :: y :: l) = last' (y :: l) := rfl
theorem last'_append_of_ne_nil (l₁ : list α) : ∀ {l₂ : list α} (hl₂ : l₂ ≠ []),
last' (l₁ ++ l₂) = last' l₂
| [] hl₂ := by contradiction
| (b::l₂) _ := last'_append_cons l₁ b l₂
theorem last'_append {l₁ l₂ : list α} {x : α} (h : x ∈ l₂.last') :
x ∈ (l₁ ++ l₂).last' :=
by { cases l₂, { contradiction, }, { rw list.last'_append_cons, exact h } }
/-! ### head(') and tail -/
theorem head_eq_head' [inhabited α] (l : list α) : head l = (head' l).iget :=
by cases l; refl
theorem mem_of_mem_head' {x : α} : ∀ {l : list α}, x ∈ l.head' → x ∈ l
| [] h := (option.not_mem_none _ h).elim
| (a::l) h := by { simp only [head', option.mem_def] at h, exact h ▸ or.inl rfl }
@[simp] theorem head_cons [inhabited α] (a : α) (l : list α) : head (a::l) = a := rfl
@[simp] theorem tail_nil : tail (@nil α) = [] := rfl
@[simp] theorem tail_cons (a : α) (l : list α) : tail (a::l) = l := rfl
@[simp] theorem head_append [inhabited α] (t : list α) {s : list α} (h : s ≠ []) :
head (s ++ t) = head s :=
by {induction s, contradiction, refl}
theorem head'_append {s t : list α} {x : α} (h : x ∈ s.head') :
x ∈ (s ++ t).head' :=
by { cases s, contradiction, exact h }
theorem head'_append_of_ne_nil : ∀ (l₁ : list α) {l₂ : list α} (hl₁ : l₁ ≠ []),
head' (l₁ ++ l₂) = head' l₁
| [] _ hl₁ := by contradiction
| (x::l₁) _ _ := rfl
theorem tail_append_singleton_of_ne_nil {a : α} {l : list α} (h : l ≠ nil) :
tail (l ++ [a]) = tail l ++ [a] :=
by { induction l, contradiction, rw [tail,cons_append,tail], }
theorem cons_head'_tail : ∀ {l : list α} {a : α} (h : a ∈ head' l), a :: tail l = l
| [] a h := by contradiction
| (b::l) a h := by { simp at h, simp [h] }
theorem head_mem_head' [inhabited α] : ∀ {l : list α} (h : l ≠ []), head l ∈ head' l
| [] h := by contradiction
| (a::l) h := rfl
theorem cons_head_tail [inhabited α] {l : list α} (h : l ≠ []) : (head l)::(tail l) = l :=
cons_head'_tail (head_mem_head' h)
lemma head_mem_self [inhabited α] {l : list α} (h : l ≠ nil) : l.head ∈ l :=
begin
have h' := mem_cons_self l.head l.tail,
rwa cons_head_tail h at h',
end
@[simp] theorem head'_map (f : α → β) (l) : head' (map f l) = (head' l).map f := by cases l; refl
lemma tail_append_of_ne_nil (l l' : list α) (h : l ≠ []) :
(l ++ l').tail = l.tail ++ l' :=
begin
cases l,
{ contradiction },
{ simp }
end
@[simp]
lemma nth_le_tail (l : list α) (i) (h : i < l.tail.length)
(h' : i + 1 < l.length := by simpa [←lt_tsub_iff_right] using h) :
l.tail.nth_le i h = l.nth_le (i + 1) h' :=
begin
cases l,
{ cases h, },
{ simpa }
end
lemma nth_le_cons_aux {l : list α} {a : α} {n} (hn : n ≠ 0) (h : n < (a :: l).length) :
n - 1 < l.length :=
begin
contrapose! h,
rw length_cons,
convert succ_le_succ h,
exact (nat.succ_pred_eq_of_pos hn.bot_lt).symm
end
lemma nth_le_cons {l : list α} {a : α} {n} (hl) :
(a :: l).nth_le n hl = if hn : n = 0 then a else l.nth_le (n - 1) (nth_le_cons_aux hn hl) :=
begin
split_ifs,
{ simp [nth_le, h] },
cases l,
{ rw [length_singleton, nat.lt_one_iff] at hl, contradiction },
cases n,
{ contradiction },
refl
end
@[simp] lemma modify_head_modify_head (l : list α) (f g : α → α) :
(l.modify_head f).modify_head g = l.modify_head (g ∘ f) :=
by cases l; simp
/-! ### Induction from the right -/
/-- Induction principle from the right for lists: if a property holds for the empty list, and
for `l ++ [a]` if it holds for `l`, then it holds for all lists. The principle is given for
a `Sort`-valued predicate, i.e., it can also be used to construct data. -/
@[elab_as_eliminator] def reverse_rec_on {C : list α → Sort*}
(l : list α) (H0 : C [])
(H1 : ∀ (l : list α) (a : α), C l → C (l ++ [a])) : C l :=
begin
rw ← reverse_reverse l,
induction reverse l,
{ exact H0 },
{ rw reverse_cons, exact H1 _ _ ih }
end
/-- Bidirectional induction principle for lists: if a property holds for the empty list, the
singleton list, and `a :: (l ++ [b])` from `l`, then it holds for all lists. This can be used to
prove statements about palindromes. The principle is given for a `Sort`-valued predicate, i.e., it
can also be used to construct data. -/
def bidirectional_rec {C : list α → Sort*}
(H0 : C []) (H1 : ∀ (a : α), C [a])
(Hn : ∀ (a : α) (l : list α) (b : α), C l → C (a :: (l ++ [b]))) : ∀ l, C l
| [] := H0
| [a] := H1 a
| (a :: b :: l) :=
let l' := init (b :: l), b' := last (b :: l) (cons_ne_nil _ _) in
have length l' < length (a :: b :: l), by { change _ < length l + 2, simp },
begin
rw ←init_append_last (cons_ne_nil b l),
have : C l', from bidirectional_rec l',
exact Hn a l' b' ‹C l'›
end
using_well_founded { rel_tac := λ _ _, `[exact ⟨_, measure_wf list.length⟩] }
/-- Like `bidirectional_rec`, but with the list parameter placed first. -/
@[elab_as_eliminator] def bidirectional_rec_on {C : list α → Sort*}
(l : list α) (H0 : C []) (H1 : ∀ (a : α), C [a])
(Hn : ∀ (a : α) (l : list α) (b : α), C l → C (a :: (l ++ [b]))) : C l :=
bidirectional_rec H0 H1 Hn l
/-! ### sublists -/
@[simp] theorem nil_sublist : Π (l : list α), [] <+ l
| [] := sublist.slnil
| (a :: l) := sublist.cons _ _ a (nil_sublist l)
@[refl, simp] theorem sublist.refl : Π (l : list α), l <+ l
| [] := sublist.slnil
| (a :: l) := sublist.cons2 _ _ a (sublist.refl l)
@[trans] theorem sublist.trans {l₁ l₂ l₃ : list α} (h₁ : l₁ <+ l₂) (h₂ : l₂ <+ l₃) : l₁ <+ l₃ :=
sublist.rec_on h₂ (λ_ s, s)
(λl₂ l₃ a h₂ IH l₁ h₁, sublist.cons _ _ _ (IH l₁ h₁))
(λl₂ l₃ a h₂ IH l₁ h₁, @sublist.cases_on _ (λl₁ l₂', l₂' = a :: l₂ → l₁ <+ a :: l₃) _ _ h₁
(λ_, nil_sublist _)
(λl₁ l₂' a' h₁' e, match a', l₂', e, h₁' with ._, ._, rfl, h₁ :=
sublist.cons _ _ _ (IH _ h₁) end)
(λl₁ l₂' a' h₁' e, match a', l₂', e, h₁' with ._, ._, rfl, h₁ :=
sublist.cons2 _ _ _ (IH _ h₁) end) rfl)
l₁ h₁
@[simp] theorem sublist_cons (a : α) (l : list α) : l <+ a::l :=
sublist.cons _ _ _ (sublist.refl l)
theorem sublist_of_cons_sublist {a : α} {l₁ l₂ : list α} : a::l₁ <+ l₂ → l₁ <+ l₂ :=
sublist.trans (sublist_cons a l₁)
theorem sublist.cons_cons {l₁ l₂ : list α} (a : α) (s : l₁ <+ l₂) : a::l₁ <+ a::l₂ :=
sublist.cons2 _ _ _ s
@[simp] theorem sublist_append_left : Π (l₁ l₂ : list α), l₁ <+ l₁++l₂
| [] l₂ := nil_sublist _
| (a::l₁) l₂ := (sublist_append_left l₁ l₂).cons_cons _
@[simp] theorem sublist_append_right : Π (l₁ l₂ : list α), l₂ <+ l₁++l₂
| [] l₂ := sublist.refl _
| (a::l₁) l₂ := sublist.cons _ _ _ (sublist_append_right l₁ l₂)
theorem sublist_cons_of_sublist (a : α) {l₁ l₂ : list α} : l₁ <+ l₂ → l₁ <+ a::l₂ :=
sublist.cons _ _ _
theorem sublist_append_of_sublist_left {l l₁ l₂ : list α} (s : l <+ l₁) : l <+ l₁++l₂ :=
s.trans $ sublist_append_left _ _
theorem sublist_append_of_sublist_right {l l₁ l₂ : list α} (s : l <+ l₂) : l <+ l₁++l₂ :=
s.trans $ sublist_append_right _ _
theorem sublist_of_cons_sublist_cons {l₁ l₂ : list α} : ∀ {a : α}, a::l₁ <+ a::l₂ → l₁ <+ l₂
| ._ (sublist.cons ._ ._ a s) := sublist_of_cons_sublist s
| ._ (sublist.cons2 ._ ._ a s) := s
theorem cons_sublist_cons_iff {l₁ l₂ : list α} {a : α} : a::l₁ <+ a::l₂ ↔ l₁ <+ l₂ :=
⟨sublist_of_cons_sublist_cons, sublist.cons_cons _⟩
@[simp] theorem append_sublist_append_left {l₁ l₂ : list α} : ∀ l, l++l₁ <+ l++l₂ ↔ l₁ <+ l₂
| [] := iff.rfl
| (a::l) := cons_sublist_cons_iff.trans (append_sublist_append_left l)
theorem sublist.append_right {l₁ l₂ : list α} (h : l₁ <+ l₂) (l) : l₁++l <+ l₂++l :=
begin
induction h with _ _ a _ ih _ _ a _ ih,
{ refl },
{ apply sublist_cons_of_sublist a ih },
{ apply ih.cons_cons a }
end
theorem sublist_or_mem_of_sublist {l l₁ l₂ : list α} {a : α} (h : l <+ l₁ ++ a::l₂) :
l <+ l₁ ++ l₂ ∨ a ∈ l :=
begin
induction l₁ with b l₁ IH generalizing l,
{ cases h, { left, exact ‹l <+ l₂› }, { right, apply mem_cons_self } },
{ cases h with _ _ _ h _ _ _ h,
{ exact or.imp_left (sublist_cons_of_sublist _) (IH h) },
{ exact (IH h).imp (sublist.cons_cons _) (mem_cons_of_mem _) } }
end
theorem sublist.reverse {l₁ l₂ : list α} (h : l₁ <+ l₂) : l₁.reverse <+ l₂.reverse :=
begin
induction h with _ _ _ _ ih _ _ a _ ih, {refl},
{ rw reverse_cons, exact sublist_append_of_sublist_left ih },
{ rw [reverse_cons, reverse_cons], exact ih.append_right [a] }
end
@[simp] theorem reverse_sublist_iff {l₁ l₂ : list α} : l₁.reverse <+ l₂.reverse ↔ l₁ <+ l₂ :=
⟨λ h, l₁.reverse_reverse ▸ l₂.reverse_reverse ▸ h.reverse, sublist.reverse⟩
@[simp] theorem append_sublist_append_right {l₁ l₂ : list α} (l) : l₁++l <+ l₂++l ↔ l₁ <+ l₂ :=
⟨λ h, by simpa only [reverse_append, append_sublist_append_left, reverse_sublist_iff]
using h.reverse,
λ h, h.append_right l⟩
theorem sublist.append {l₁ l₂ r₁ r₂ : list α}
(hl : l₁ <+ l₂) (hr : r₁ <+ r₂) : l₁ ++ r₁ <+ l₂ ++ r₂ :=
(hl.append_right _).trans ((append_sublist_append_left _).2 hr)
theorem sublist.subset : Π {l₁ l₂ : list α}, l₁ <+ l₂ → l₁ ⊆ l₂
| ._ ._ sublist.slnil b h := h
| ._ ._ (sublist.cons l₁ l₂ a s) b h := mem_cons_of_mem _ (sublist.subset s h)
| ._ ._ (sublist.cons2 l₁ l₂ a s) b h :=
match eq_or_mem_of_mem_cons h with
| or.inl h := h ▸ mem_cons_self _ _
| or.inr h := mem_cons_of_mem _ (sublist.subset s h)
end
@[simp] theorem singleton_sublist {a : α} {l} : [a] <+ l ↔ a ∈ l :=
⟨λ h, h.subset (mem_singleton_self _), λ h,
let ⟨s, t, e⟩ := mem_split h in e.symm ▸
((nil_sublist _).cons_cons _ ).trans (sublist_append_right _ _)⟩
theorem eq_nil_of_sublist_nil {l : list α} (s : l <+ []) : l = [] :=
eq_nil_of_subset_nil $ s.subset
@[simp] theorem sublist_nil_iff_eq_nil {l : list α} : l <+ [] ↔ l = [] :=
⟨eq_nil_of_sublist_nil, λ H, H ▸ sublist.refl _⟩
@[simp] theorem repeat_sublist_repeat (a : α) {m n} : repeat a m <+ repeat a n ↔ m ≤ n :=
⟨λ h, by simpa only [length_repeat] using length_le_of_sublist h,
λ h, by induction h; [refl, simp only [*, repeat_succ, sublist.cons]] ⟩
theorem eq_of_sublist_of_length_eq : ∀ {l₁ l₂ : list α}, l₁ <+ l₂ → length l₁ = length l₂ → l₁ = l₂
| ._ ._ sublist.slnil h := rfl
| ._ ._ (sublist.cons l₁ l₂ a s) h :=
absurd (length_le_of_sublist s) $ not_le_of_gt $ by rw h; apply lt_succ_self
| ._ ._ (sublist.cons2 l₁ l₂ a s) h :=
by rw [length, length] at h; injection h with h; rw eq_of_sublist_of_length_eq s h
theorem eq_of_sublist_of_length_le {l₁ l₂ : list α} (s : l₁ <+ l₂) (h : length l₂ ≤ length l₁) :
l₁ = l₂ :=
eq_of_sublist_of_length_eq s (le_antisymm (length_le_of_sublist s) h)
theorem sublist.antisymm {l₁ l₂ : list α} (s₁ : l₁ <+ l₂) (s₂ : l₂ <+ l₁) : l₁ = l₂ :=
eq_of_sublist_of_length_le s₁ (length_le_of_sublist s₂)
instance decidable_sublist [decidable_eq α] : ∀ (l₁ l₂ : list α), decidable (l₁ <+ l₂)
| [] l₂ := is_true $ nil_sublist _
| (a::l₁) [] := is_false $ λh, list.no_confusion $ eq_nil_of_sublist_nil h
| (a::l₁) (b::l₂) :=
if h : a = b then
decidable_of_decidable_of_iff (decidable_sublist l₁ l₂) $
by rw [← h]; exact ⟨sublist.cons_cons _, sublist_of_cons_sublist_cons⟩
else decidable_of_decidable_of_iff (decidable_sublist (a::l₁) l₂)
⟨sublist_cons_of_sublist _, λs, match a, l₁, s, h with
| a, l₁, sublist.cons ._ ._ ._ s', h := s'
| ._, ._, sublist.cons2 t ._ ._ s', h := absurd rfl h
end⟩
/-! ### index_of -/
section index_of
variable [decidable_eq α]
@[simp] theorem index_of_nil (a : α) : index_of a [] = 0 := rfl
theorem index_of_cons (a b : α) (l : list α) :
index_of a (b::l) = if a = b then 0 else succ (index_of a l) := rfl
theorem index_of_cons_eq {a b : α} (l : list α) : a = b → index_of a (b::l) = 0 :=
assume e, if_pos e
@[simp] theorem index_of_cons_self (a : α) (l : list α) : index_of a (a::l) = 0 :=
index_of_cons_eq _ rfl
@[simp, priority 990]
theorem index_of_cons_ne {a b : α} (l : list α) : a ≠ b → index_of a (b::l) = succ (index_of a l) :=
assume n, if_neg n
theorem index_of_eq_length {a : α} {l : list α} : index_of a l = length l ↔ a ∉ l :=
begin
induction l with b l ih,
{ exact iff_of_true rfl (not_mem_nil _) },
simp only [length, mem_cons_iff, index_of_cons], split_ifs,
{ exact iff_of_false (by rintro ⟨⟩) (λ H, H $ or.inl h) },
{ simp only [h, false_or], rw ← ih, exact succ_inj' }
end
@[simp, priority 980]
theorem index_of_of_not_mem {l : list α} {a : α} : a ∉ l → index_of a l = length l :=
index_of_eq_length.2
theorem index_of_le_length {a : α} {l : list α} : index_of a l ≤ length l :=
begin
induction l with b l ih, {refl},
simp only [length, index_of_cons],
by_cases h : a = b, {rw if_pos h, exact nat.zero_le _},
rw if_neg h, exact succ_le_succ ih
end
theorem index_of_lt_length {a} {l : list α} : index_of a l < length l ↔ a ∈ l :=
⟨λh, decidable.by_contradiction $ λ al, ne_of_lt h $ index_of_eq_length.2 al,
λal, lt_of_le_of_ne index_of_le_length $ λ h, index_of_eq_length.1 h al⟩
end index_of
/-! ### nth element -/
theorem nth_le_of_mem : ∀ {a} {l : list α}, a ∈ l → ∃ n h, nth_le l n h = a
| a (_ :: l) (or.inl rfl) := ⟨0, succ_pos _, rfl⟩
| a (b :: l) (or.inr m) :=
let ⟨n, h, e⟩ := nth_le_of_mem m in ⟨n+1, succ_lt_succ h, e⟩
theorem nth_le_nth : ∀ {l : list α} {n} h, nth l n = some (nth_le l n h)
| (a :: l) 0 h := rfl
| (a :: l) (n+1) h := @nth_le_nth l n _
theorem nth_len_le : ∀ {l : list α} {n}, length l ≤ n → nth l n = none
| [] n h := rfl
| (a :: l) (n+1) h := nth_len_le (le_of_succ_le_succ h)
theorem nth_eq_some {l : list α} {n a} : nth l n = some a ↔ ∃ h, nth_le l n h = a :=
⟨λ e,
have h : n < length l, from lt_of_not_ge $ λ hn,
by rw nth_len_le hn at e; contradiction,
⟨h, by rw nth_le_nth h at e;
injection e with e; apply nth_le_mem⟩,
λ ⟨h, e⟩, e ▸ nth_le_nth _⟩
@[simp]
theorem nth_eq_none_iff : ∀ {l : list α} {n}, nth l n = none ↔ length l ≤ n :=
begin
intros, split,
{ intro h, by_contradiction h',
have h₂ : ∃ h, l.nth_le n h = l.nth_le n (lt_of_not_ge h') := ⟨lt_of_not_ge h', rfl⟩,
rw [← nth_eq_some, h] at h₂, cases h₂ },
{ solve_by_elim [nth_len_le] },
end
theorem nth_of_mem {a} {l : list α} (h : a ∈ l) : ∃ n, nth l n = some a :=
let ⟨n, h, e⟩ := nth_le_of_mem h in ⟨n, by rw [nth_le_nth, e]⟩
theorem nth_le_mem : ∀ (l : list α) n h, nth_le l n h ∈ l
| (a :: l) 0 h := mem_cons_self _ _
| (a :: l) (n+1) h := mem_cons_of_mem _ (nth_le_mem l _ _)
theorem nth_mem {l : list α} {n a} (e : nth l n = some a) : a ∈ l :=
let ⟨h, e⟩ := nth_eq_some.1 e in e ▸ nth_le_mem _ _ _
theorem mem_iff_nth_le {a} {l : list α} : a ∈ l ↔ ∃ n h, nth_le l n h = a :=
⟨nth_le_of_mem, λ ⟨n, h, e⟩, e ▸ nth_le_mem _ _ _⟩
theorem mem_iff_nth {a} {l : list α} : a ∈ l ↔ ∃ n, nth l n = some a :=
mem_iff_nth_le.trans $ exists_congr $ λ n, nth_eq_some.symm
lemma nth_zero (l : list α) : l.nth 0 = l.head' := by cases l; refl
lemma nth_injective {α : Type u} {xs : list α} {i j : ℕ}
(h₀ : i < xs.length)
(h₁ : nodup xs)
(h₂ : xs.nth i = xs.nth j) : i = j :=
begin
induction xs with x xs generalizing i j,
{ cases h₀ },
{ cases i; cases j,
case nat.zero nat.zero
{ refl },
case nat.succ nat.succ
{ congr, cases h₁,
apply xs_ih;
solve_by_elim [lt_of_succ_lt_succ] },
iterate 2
{ dsimp at h₂,
cases h₁ with _ _ h h',
cases h x _ rfl,
rw mem_iff_nth,
exact ⟨_, h₂.symm⟩ <|>
exact ⟨_, h₂⟩ } },
end
@[simp] theorem nth_map (f : α → β) : ∀ l n, nth (map f l) n = (nth l n).map f
| [] n := rfl
| (a :: l) 0 := rfl
| (a :: l) (n+1) := nth_map l n
theorem nth_le_map (f : α → β) {l n} (H1 H2) : nth_le (map f l) n H1 = f (nth_le l n H2) :=
option.some.inj $ by rw [← nth_le_nth, nth_map, nth_le_nth]; refl
/-- A version of `nth_le_map` that can be used for rewriting. -/
theorem nth_le_map_rev (f : α → β) {l n} (H) :
f (nth_le l n H) = nth_le (map f l) n ((length_map f l).symm ▸ H) :=
(nth_le_map f _ _).symm
@[simp] theorem nth_le_map' (f : α → β) {l n} (H) :
nth_le (map f l) n H = f (nth_le l n (length_map f l ▸ H)) :=
nth_le_map f _ _
/-- If one has `nth_le L i hi` in a formula and `h : L = L'`, one can not `rw h` in the formula as
`hi` gives `i < L.length` and not `i < L'.length`. The lemma `nth_le_of_eq` can be used to make
such a rewrite, with `rw (nth_le_of_eq h)`. -/
lemma nth_le_of_eq {L L' : list α} (h : L = L') {i : ℕ} (hi : i < L.length) :
nth_le L i hi = nth_le L' i (h ▸ hi) :=
by { congr, exact h}
@[simp] lemma nth_le_singleton (a : α) {n : ℕ} (hn : n < 1) :
nth_le [a] n hn = a :=
have hn0 : n = 0 := le_zero_iff.1 (le_of_lt_succ hn),
by subst hn0; refl
lemma nth_le_zero [inhabited α] {L : list α} (h : 0 < L.length) :
L.nth_le 0 h = L.head :=
by { cases L, cases h, simp, }
lemma nth_le_append : ∀ {l₁ l₂ : list α} {n : ℕ} (hn₁) (hn₂),
(l₁ ++ l₂).nth_le n hn₁ = l₁.nth_le n hn₂
| [] _ n hn₁ hn₂ := (nat.not_lt_zero _ hn₂).elim
| (a::l) _ 0 hn₁ hn₂ := rfl
| (a::l) _ (n+1) hn₁ hn₂ := by simp only [nth_le, cons_append];
exact nth_le_append _ _
lemma nth_le_append_right_aux {l₁ l₂ : list α} {n : ℕ}
(h₁ : l₁.length ≤ n) (h₂ : n < (l₁ ++ l₂).length) : n - l₁.length < l₂.length :=
begin
rw list.length_append at h₂,
apply lt_of_add_lt_add_right,
rwa [nat.sub_add_cancel h₁, nat.add_comm],
end
lemma nth_le_append_right : ∀ {l₁ l₂ : list α} {n : ℕ} (h₁ : l₁.length ≤ n) (h₂),
(l₁ ++ l₂).nth_le n h₂ = l₂.nth_le (n - l₁.length) (nth_le_append_right_aux h₁ h₂)
| [] _ n h₁ h₂ := rfl
| (a :: l) _ (n+1) h₁ h₂ :=
begin
dsimp,
conv { to_rhs, congr, skip, rw [nat.add_sub_add_right], },
rw nth_le_append_right (nat.lt_succ_iff.mp h₁),
end
@[simp] lemma nth_le_repeat (a : α) {n m : ℕ} (h : m < (list.repeat a n).length) :
(list.repeat a n).nth_le m h = a :=
eq_of_mem_repeat (nth_le_mem _ _ _)
lemma nth_append {l₁ l₂ : list α} {n : ℕ} (hn : n < l₁.length) :
(l₁ ++ l₂).nth n = l₁.nth n :=
have hn' : n < (l₁ ++ l₂).length := lt_of_lt_of_le hn
(by rw length_append; exact nat.le_add_right _ _),
by rw [nth_le_nth hn, nth_le_nth hn', nth_le_append]
lemma nth_append_right {l₁ l₂ : list α} {n : ℕ} (hn : l₁.length ≤ n) :
(l₁ ++ l₂).nth n = l₂.nth (n - l₁.length) :=
begin
by_cases hl : n < (l₁ ++ l₂).length,
{ rw [nth_le_nth hl, nth_le_nth, nth_le_append_right hn] },
{ rw [nth_len_le (le_of_not_lt hl), nth_len_le],
rw [not_lt, length_append] at hl,
exact le_tsub_of_add_le_left hl }
end
lemma last_eq_nth_le : ∀ (l : list α) (h : l ≠ []),
last l h = l.nth_le (l.length - 1) (nat.sub_lt (length_pos_of_ne_nil h) one_pos)
| [] h := rfl
| [a] h := by rw [last_singleton, nth_le_singleton]
| (a :: b :: l) h := by { rw [last_cons, last_eq_nth_le (b :: l)],
refl, exact cons_ne_nil b l }
@[simp] lemma nth_concat_length : ∀ (l : list α) (a : α), (l ++ [a]).nth l.length = some a
| [] a := rfl
| (b::l) a := by rw [cons_append, length_cons, nth, nth_concat_length]
lemma nth_le_cons_length (x : α) (xs : list α) (n : ℕ) (h : n = xs.length) :
(x :: xs).nth_le n (by simp [h]) = (x :: xs).last (cons_ne_nil x xs) :=
begin
rw last_eq_nth_le,
congr,
simp [h]
end
@[ext]
theorem ext : ∀ {l₁ l₂ : list α}, (∀n, nth l₁ n = nth l₂ n) → l₁ = l₂
| [] [] h := rfl
| (a::l₁) [] h := by have h0 := h 0; contradiction
| [] (a'::l₂) h := by have h0 := h 0; contradiction
| (a::l₁) (a'::l₂) h := by have h0 : some a = some a' := h 0; injection h0 with aa;
simp only [aa, ext (λn, h (n+1))]; split; refl
theorem ext_le {l₁ l₂ : list α} (hl : length l₁ = length l₂)
(h : ∀n h₁ h₂, nth_le l₁ n h₁ = nth_le l₂ n h₂) : l₁ = l₂ :=
ext $ λn, if h₁ : n < length l₁
then by rw [nth_le_nth, nth_le_nth, h n h₁ (by rwa [← hl])]
else let h₁ := le_of_not_gt h₁ in by { rw [nth_len_le h₁, nth_len_le], rwa [←hl], }
@[simp] theorem index_of_nth_le [decidable_eq α] {a : α} :
∀ {l : list α} h, nth_le l (index_of a l) h = a
| (b::l) h := by by_cases h' : a = b;
simp only [h', if_pos, if_false, index_of_cons, nth_le, @index_of_nth_le l]
@[simp] theorem index_of_nth [decidable_eq α] {a : α} {l : list α} (h : a ∈ l) :
nth l (index_of a l) = some a :=
by rw [nth_le_nth, index_of_nth_le (index_of_lt_length.2 h)]
theorem nth_le_reverse_aux1 :
∀ (l r : list α) (i h1 h2), nth_le (reverse_core l r) (i + length l) h1 = nth_le r i h2
| [] r i := λh1 h2, rfl
| (a :: l) r i :=
by rw (show i + length (a :: l) = i + 1 + length l, from add_right_comm i (length l) 1);
exact λh1 h2, nth_le_reverse_aux1 l (a :: r) (i+1) h1 (succ_lt_succ h2)
lemma index_of_inj [decidable_eq α] {l : list α} {x y : α}
(hx : x ∈ l) (hy : y ∈ l) : index_of x l = index_of y l ↔ x = y :=
⟨λ h, have nth_le l (index_of x l) (index_of_lt_length.2 hx) =
nth_le l (index_of y l) (index_of_lt_length.2 hy),
by simp only [h],
by simpa only [index_of_nth_le],
λ h, by subst h⟩
theorem nth_le_reverse_aux2 : ∀ (l r : list α) (i : nat) (h1) (h2),
nth_le (reverse_core l r) (length l - 1 - i) h1 = nth_le l i h2
| [] r i h1 h2 := absurd h2 (nat.not_lt_zero _)
| (a :: l) r 0 h1 h2 := begin
have aux := nth_le_reverse_aux1 l (a :: r) 0,
rw zero_add at aux,
exact aux _ (zero_lt_succ _)
end
| (a :: l) r (i+1) h1 h2 := begin
have aux := nth_le_reverse_aux2 l (a :: r) i,
have heq := calc length (a :: l) - 1 - (i + 1)
= length l - (1 + i) : by rw add_comm; refl
... = length l - 1 - i : by rw ← tsub_add_eq_tsub_tsub,
rw [← heq] at aux,
apply aux
end
@[simp] theorem nth_le_reverse (l : list α) (i : nat) (h1 h2) :
nth_le (reverse l) (length l - 1 - i) h1 = nth_le l i h2 :=
nth_le_reverse_aux2 _ _ _ _ _
lemma nth_le_reverse' (l : list α) (n : ℕ) (hn : n < l.reverse.length) (hn') :
l.reverse.nth_le n hn = l.nth_le (l.length - 1 - n) hn' :=
begin
rw eq_comm,
convert nth_le_reverse l.reverse _ _ _ using 1,
{ simp },
{ simpa }
end
lemma eq_cons_of_length_one {l : list α} (h : l.length = 1) :
l = [l.nth_le 0 (h.symm ▸ zero_lt_one)] :=
begin
refine ext_le (by convert h) (λ n h₁ h₂, _),
simp only [nth_le_singleton],
congr,
exact eq_bot_iff.mpr (nat.lt_succ_iff.mp h₂)
end
lemma modify_nth_tail_modify_nth_tail {f g : list α → list α} (m : ℕ) :
∀n (l:list α), (l.modify_nth_tail f n).modify_nth_tail g (m + n) =
l.modify_nth_tail (λl, (f l).modify_nth_tail g m) n
| 0 l := rfl
| (n+1) [] := rfl
| (n+1) (a::l) := congr_arg (list.cons a) (modify_nth_tail_modify_nth_tail n l)
lemma modify_nth_tail_modify_nth_tail_le
{f g : list α → list α} (m n : ℕ) (l : list α) (h : n ≤ m) :
(l.modify_nth_tail f n).modify_nth_tail g m =
l.modify_nth_tail (λl, (f l).modify_nth_tail g (m - n)) n :=
begin
rcases le_iff_exists_add.1 h with ⟨m, rfl⟩,
rw [add_tsub_cancel_left, add_comm, modify_nth_tail_modify_nth_tail]
end
lemma modify_nth_tail_modify_nth_tail_same {f g : list α → list α} (n : ℕ) (l:list α) :
(l.modify_nth_tail f n).modify_nth_tail g n = l.modify_nth_tail (g ∘ f) n :=
by rw [modify_nth_tail_modify_nth_tail_le n n l (le_refl n), tsub_self]; refl
lemma modify_nth_tail_id :
∀n (l:list α), l.modify_nth_tail id n = l
| 0 l := rfl
| (n+1) [] := rfl
| (n+1) (a::l) := congr_arg (list.cons a) (modify_nth_tail_id n l)
theorem remove_nth_eq_nth_tail : ∀ n (l : list α), remove_nth l n = modify_nth_tail tail n l
| 0 l := by cases l; refl
| (n+1) [] := rfl
| (n+1) (a::l) := congr_arg (cons _) (remove_nth_eq_nth_tail _ _)
theorem update_nth_eq_modify_nth (a : α) : ∀ n (l : list α),
update_nth l n a = modify_nth (λ _, a) n l
| 0 l := by cases l; refl
| (n+1) [] := rfl
| (n+1) (b::l) := congr_arg (cons _) (update_nth_eq_modify_nth _ _)
theorem modify_nth_eq_update_nth (f : α → α) : ∀ n (l : list α),
modify_nth f n l = ((λ a, update_nth l n (f a)) <$> nth l n).get_or_else l
| 0 l := by cases l; refl
| (n+1) [] := rfl
| (n+1) (b::l) := (congr_arg (cons b)
(modify_nth_eq_update_nth n l)).trans $ by cases nth l n; refl
theorem nth_modify_nth (f : α → α) : ∀ n (l : list α) m,
nth (modify_nth f n l) m = (λ a, if n = m then f a else a) <$> nth l m
| n l 0 := by cases l; cases n; refl
| n [] (m+1) := by cases n; refl
| 0 (a::l) (m+1) := by cases nth l m; refl
| (n+1) (a::l) (m+1) := (nth_modify_nth n l m).trans $
by cases nth l m with b; by_cases n = m;
simp only [h, if_pos, if_true, if_false, option.map_none, option.map_some, mt succ.inj,
not_false_iff]
theorem modify_nth_tail_length (f : list α → list α) (H : ∀ l, length (f l) = length l) :
∀ n l, length (modify_nth_tail f n l) = length l
| 0 l := H _
| (n+1) [] := rfl
| (n+1) (a::l) := @congr_arg _ _ _ _ (+1) (modify_nth_tail_length _ _)
@[simp] theorem modify_nth_length (f : α → α) :
∀ n l, length (modify_nth f n l) = length l :=
modify_nth_tail_length _ (λ l, by cases l; refl)
@[simp] theorem update_nth_length (l : list α) (n) (a : α) :
length (update_nth l n a) = length l :=
by simp only [update_nth_eq_modify_nth, modify_nth_length]
@[simp] theorem nth_modify_nth_eq (f : α → α) (n) (l : list α) :
nth (modify_nth f n l) n = f <$> nth l n :=
by simp only [nth_modify_nth, if_pos]
@[simp] theorem nth_modify_nth_ne (f : α → α) {m n} (l : list α) (h : m ≠ n) :
nth (modify_nth f m l) n = nth l n :=
by simp only [nth_modify_nth, if_neg h, id_map']
theorem nth_update_nth_eq (a : α) (n) (l : list α) :
nth (update_nth l n a) n = (λ _, a) <$> nth l n :=
by simp only [update_nth_eq_modify_nth, nth_modify_nth_eq]
theorem nth_update_nth_of_lt (a : α) {n} {l : list α} (h : n < length l) :
nth (update_nth l n a) n = some a :=
by rw [nth_update_nth_eq, nth_le_nth h]; refl
theorem nth_update_nth_ne (a : α) {m n} (l : list α) (h : m ≠ n) :
nth (update_nth l m a) n = nth l n :=
by simp only [update_nth_eq_modify_nth, nth_modify_nth_ne _ _ h]
@[simp] lemma update_nth_nil (n : ℕ) (a : α) : [].update_nth n a = [] := rfl
@[simp] lemma update_nth_succ (x : α) (xs : list α) (n : ℕ) (a : α) :
(x :: xs).update_nth n.succ a = x :: xs.update_nth n a := rfl
lemma update_nth_comm (a b : α) : Π {n m : ℕ} (l : list α) (h : n ≠ m),
(l.update_nth n a).update_nth m b = (l.update_nth m b).update_nth n a
| _ _ [] _ := by simp
| 0 0 (x :: t) h := absurd rfl h
| (n + 1) 0 (x :: t) h := by simp [list.update_nth]
| 0 (m + 1) (x :: t) h := by simp [list.update_nth]
| (n + 1) (m + 1) (x :: t) h := by { simp only [update_nth, true_and, eq_self_iff_true],
exact update_nth_comm t (λ h', h $ nat.succ_inj'.mpr h'), }
@[simp] lemma nth_le_update_nth_eq (l : list α) (i : ℕ) (a : α)
(h : i < (l.update_nth i a).length) : (l.update_nth i a).nth_le i h = a :=
by rw [← option.some_inj, ← nth_le_nth, nth_update_nth_eq, nth_le_nth]; simp * at *
@[simp] lemma nth_le_update_nth_of_ne {l : list α} {i j : ℕ} (h : i ≠ j) (a : α)
(hj : j < (l.update_nth i a).length) :
(l.update_nth i a).nth_le j hj = l.nth_le j (by simpa using hj) :=
by rw [← option.some_inj, ← list.nth_le_nth, list.nth_update_nth_ne _ _ h, list.nth_le_nth]
lemma mem_or_eq_of_mem_update_nth : ∀ {l : list α} {n : ℕ} {a b : α}
(h : a ∈ l.update_nth n b), a ∈ l ∨ a = b
| [] n a b h := false.elim h
| (c::l) 0 a b h := ((mem_cons_iff _ _ _).1 h).elim
or.inr (or.inl ∘ mem_cons_of_mem _)
| (c::l) (n+1) a b h := ((mem_cons_iff _ _ _).1 h).elim
(λ h, h ▸ or.inl (mem_cons_self _ _))
(λ h, (mem_or_eq_of_mem_update_nth h).elim
(or.inl ∘ mem_cons_of_mem _) or.inr)
section insert_nth
variable {a : α}
@[simp] lemma insert_nth_zero (s : list α) (x : α) : insert_nth 0 x s = x :: s := rfl
@[simp] lemma insert_nth_succ_nil (n : ℕ) (a : α) : insert_nth (n + 1) a [] = [] := rfl
@[simp] lemma insert_nth_succ_cons (s : list α) (hd x : α) (n : ℕ) :
insert_nth (n + 1) x (hd :: s) = hd :: (insert_nth n x s) := rfl
lemma length_insert_nth : ∀n as, n ≤ length as → length (insert_nth n a as) = length as + 1
| 0 as h := rfl
| (n+1) [] h := (nat.not_succ_le_zero _ h).elim
| (n+1) (a'::as) h := congr_arg nat.succ $ length_insert_nth n as (nat.le_of_succ_le_succ h)
lemma remove_nth_insert_nth (n:ℕ) (l : list α) : (l.insert_nth n a).remove_nth n = l :=
by rw [remove_nth_eq_nth_tail, insert_nth, modify_nth_tail_modify_nth_tail_same];
from modify_nth_tail_id _ _
lemma insert_nth_remove_nth_of_ge : ∀n m as, n < length as → n ≤ m →
insert_nth m a (as.remove_nth n) = (as.insert_nth (m + 1) a).remove_nth n
| 0 0 [] has _ := (lt_irrefl _ has).elim
| 0 0 (a::as) has hmn := by simp [remove_nth, insert_nth]
| 0 (m+1) (a::as) has hmn := rfl
| (n+1) (m+1) (a::as) has hmn :=
congr_arg (cons a) $
insert_nth_remove_nth_of_ge n m as (nat.lt_of_succ_lt_succ has) (nat.le_of_succ_le_succ hmn)
lemma insert_nth_remove_nth_of_le : ∀n m as, n < length as → m ≤ n →
insert_nth m a (as.remove_nth n) = (as.insert_nth m a).remove_nth (n + 1)
| n 0 (a :: as) has hmn := rfl
| (n + 1) (m + 1) (a :: as) has hmn :=
congr_arg (cons a) $
insert_nth_remove_nth_of_le n m as (nat.lt_of_succ_lt_succ has) (nat.le_of_succ_le_succ hmn)
lemma insert_nth_comm (a b : α) :
∀(i j : ℕ) (l : list α) (h : i ≤ j) (hj : j ≤ length l),
(l.insert_nth i a).insert_nth (j + 1) b = (l.insert_nth j b).insert_nth i a
| 0 j l := by simp [insert_nth]
| (i + 1) 0 l := assume h, (nat.not_lt_zero _ h).elim
| (i + 1) (j+1) [] := by simp
| (i + 1) (j+1) (c::l) :=
assume h₀ h₁,
by simp [insert_nth];
exact insert_nth_comm i j l (nat.le_of_succ_le_succ h₀) (nat.le_of_succ_le_succ h₁)
lemma mem_insert_nth {a b : α} : ∀ {n : ℕ} {l : list α} (hi : n ≤ l.length),
a ∈ l.insert_nth n b ↔ a = b ∨ a ∈ l
| 0 as h := iff.rfl
| (n+1) [] h := (nat.not_succ_le_zero _ h).elim
| (n+1) (a'::as) h := begin
dsimp [list.insert_nth],
erw [list.mem_cons_iff, mem_insert_nth (nat.le_of_succ_le_succ h), list.mem_cons_iff,
← or.assoc, or_comm (a = a'), or.assoc]
end
lemma inj_on_insert_nth_index_of_not_mem (l : list α) (x : α) (hx : x ∉ l) :
set.inj_on (λ k, insert_nth k x l) {n | n ≤ l.length} :=
begin
induction l with hd tl IH,
{ intros n hn m hm h,
simp only [set.mem_singleton_iff, set.set_of_eq_eq_singleton, length, nonpos_iff_eq_zero]
at hn hm,
simp [hn, hm] },
{ intros n hn m hm h,
simp only [length, set.mem_set_of_eq] at hn hm,
simp only [mem_cons_iff, not_or_distrib] at hx,
cases n;
cases m,
{ refl },
{ simpa [hx.left] using h },
{ simpa [ne.symm hx.left] using h },
{ simp only [true_and, eq_self_iff_true, insert_nth_succ_cons] at h,
rw nat.succ_inj',
refine IH hx.right _ _ h,
{ simpa [nat.succ_le_succ_iff] using hn },
{ simpa [nat.succ_le_succ_iff] using hm } } }
end
lemma insert_nth_of_length_lt (l : list α) (x : α) (n : ℕ) (h : l.length < n) :
insert_nth n x l = l :=
begin
induction l with hd tl IH generalizing n,
{ cases n,
{ simpa using h },
{ simp } },
{ cases n,
{ simpa using h },
{ simp only [nat.succ_lt_succ_iff, length] at h,
simpa using IH _ h } }
end
@[simp] lemma insert_nth_length_self (l : list α) (x : α) :
insert_nth l.length x l = l ++ [x] :=
begin
induction l with hd tl IH,
{ simp },
{ simpa using IH }
end
lemma length_le_length_insert_nth (l : list α) (x : α) (n : ℕ) :
l.length ≤ (insert_nth n x l).length :=
begin
cases le_or_lt n l.length with hn hn,
{ rw length_insert_nth _ _ hn,
exact (nat.lt_succ_self _).le },
{ rw insert_nth_of_length_lt _ _ _ hn }
end
lemma length_insert_nth_le_succ (l : list α) (x : α) (n : ℕ) :
(insert_nth n x l).length ≤ l.length + 1 :=
begin
cases le_or_lt n l.length with hn hn,
{ rw length_insert_nth _ _ hn },
{ rw insert_nth_of_length_lt _ _ _ hn,
exact (nat.lt_succ_self _).le }
end
lemma nth_le_insert_nth_of_lt (l : list α) (x : α) (n k : ℕ) (hn : k < n)
(hk : k < l.length)
(hk' : k < (insert_nth n x l).length := hk.trans_le (length_le_length_insert_nth _ _ _)):
(insert_nth n x l).nth_le k hk' = l.nth_le k hk :=
begin
induction n with n IH generalizing k l,
{ simpa using hn },
{ cases l with hd tl,
{ simp },
{ cases k,
{ simp },
{ rw nat.succ_lt_succ_iff at hn,
simpa using IH _ _ hn _ } } }
end
@[simp] lemma nth_le_insert_nth_self (l : list α) (x : α) (n : ℕ)
(hn : n ≤ l.length) (hn' : n < (insert_nth n x l).length :=
by rwa [length_insert_nth _ _ hn, nat.lt_succ_iff]) :
(insert_nth n x l).nth_le n hn' = x :=
begin
induction l with hd tl IH generalizing n,
{ simp only [length, nonpos_iff_eq_zero] at hn,
simp [hn] },
{ cases n,
{ simp },
{ simp only [nat.succ_le_succ_iff, length] at hn,
simpa using IH _ hn } }
end
lemma nth_le_insert_nth_add_succ (l : list α) (x : α) (n k : ℕ)
(hk' : n + k < l.length)
(hk : n + k + 1 < (insert_nth n x l).length :=
by rwa [length_insert_nth _ _ (le_self_add.trans hk'.le), nat.succ_lt_succ_iff]) :
(insert_nth n x l).nth_le (n + k + 1) hk = nth_le l (n + k) hk' :=
begin
induction l with hd tl IH generalizing n k,
{ simpa using hk' },
{ cases n,
{ simpa },
{ simpa [succ_add] using IH _ _ _ } }
end
lemma insert_nth_injective (n : ℕ) (x : α) : function.injective (insert_nth n x) :=
begin
induction n with n IH,
{ have : insert_nth 0 x = cons x := funext (λ _, rfl),
simp [this] },
{ rintros (_|⟨a, as⟩) (_|⟨b, bs⟩) h;
simpa [IH.eq_iff] using h <|> refl }
end
end insert_nth
/-! ### map -/
@[simp] lemma map_nil (f : α → β) : map f [] = [] := rfl
theorem map_eq_foldr (f : α → β) (l : list α) :
map f l = foldr (λ a bs, f a :: bs) [] l :=
by induction l; simp *
lemma map_congr {f g : α → β} : ∀ {l : list α}, (∀ x ∈ l, f x = g x) → map f l = map g l
| [] _ := rfl
| (a::l) h := let ⟨h₁, h₂⟩ := forall_mem_cons.1 h in
by rw [map, map, h₁, map_congr h₂]
lemma map_eq_map_iff {f g : α → β} {l : list α} : map f l = map g l ↔ (∀ x ∈ l, f x = g x) :=
begin
refine ⟨_, map_congr⟩, intros h x hx,
rw [mem_iff_nth_le] at hx, rcases hx with ⟨n, hn, rfl⟩,
rw [nth_le_map_rev f, nth_le_map_rev g], congr, exact h
end
theorem map_concat (f : α → β) (a : α) (l : list α) : map f (concat l a) = concat (map f l) (f a) :=
by induction l; [refl, simp only [*, concat_eq_append, cons_append, map, map_append]]; split; refl
@[simp] theorem map_id'' (l : list α) : map (λ x, x) l = l :=
map_id _
theorem map_id' {f : α → α} (h : ∀ x, f x = x) (l : list α) : map f l = l :=
by simp [show f = id, from funext h]
theorem eq_nil_of_map_eq_nil {f : α → β} {l : list α} (h : map f l = nil) : l = nil :=
eq_nil_of_length_eq_zero $ by rw [← length_map f l, h]; refl
@[simp] theorem map_join (f : α → β) (L : list (list α)) :
map f (join L) = join (map (map f) L) :=
by induction L; [refl, simp only [*, join, map, map_append]]
theorem bind_ret_eq_map (f : α → β) (l : list α) :
l.bind (list.ret ∘ f) = map f l :=
by unfold list.bind; induction l; simp only [map, join, list.ret, cons_append, nil_append, *];
split; refl
lemma bind_congr {l : list α} {f g : α → list β} (h : ∀ x ∈ l, f x = g x) :
list.bind l f = list.bind l g :=
(congr_arg list.join $ map_congr h : _)
@[simp] theorem map_eq_map {α β} (f : α → β) (l : list α) : f <$> l = map f l := rfl
@[simp] theorem map_tail (f : α → β) (l) : map f (tail l) = tail (map f l) :=
by cases l; refl
@[simp] theorem map_injective_iff {f : α → β} : injective (map f) ↔ injective f :=
begin
split; intros h x y hxy,
{ suffices : [x] = [y], { simpa using this }, apply h, simp [hxy] },
{ induction y generalizing x, simpa using hxy,
cases x, simpa using hxy, simp at hxy, simp [y_ih hxy.2, h hxy.1] }
end
/--
A single `list.map` of a composition of functions is equal to
composing a `list.map` with another `list.map`, fully applied.
This is the reverse direction of `list.map_map`.
-/
lemma comp_map (h : β → γ) (g : α → β) (l : list α) :
map (h ∘ g) l = map h (map g l) := (map_map _ _ _).symm
/--
Composing a `list.map` with another `list.map` is equal to
a single `list.map` of composed functions.
-/
@[simp] lemma map_comp_map (g : β → γ) (f : α → β) :
map g ∘ map f = map (g ∘ f) :=
by { ext l, rw comp_map }
theorem map_filter_eq_foldr (f : α → β) (p : α → Prop) [decidable_pred p] (as : list α) :
map f (filter p as) = foldr (λ a bs, if p a then f a :: bs else bs) [] as :=
by { induction as, { refl }, { simp! [*, apply_ite (map f)] } }
lemma last_map (f : α → β) {l : list α} (hl : l ≠ []) :
(l.map f).last (mt eq_nil_of_map_eq_nil hl) = f (l.last hl) :=
begin
induction l with l_ih l_tl l_ih,
{ apply (hl rfl).elim },
{ cases l_tl,
{ simp },
{ simpa using l_ih } }
end
/-! ### map₂ -/
theorem nil_map₂ (f : α → β → γ) (l : list β) : map₂ f [] l = [] :=
by cases l; refl
theorem map₂_nil (f : α → β → γ) (l : list α) : map₂ f l [] = [] :=
by cases l; refl
@[simp] theorem map₂_flip (f : α → β → γ) :
∀ as bs, map₂ (flip f) bs as = map₂ f as bs
| [] [] := rfl
| [] (b :: bs) := rfl
| (a :: as) [] := rfl
| (a :: as) (b :: bs) := by { simp! [map₂_flip], refl }
/-! ### take, drop -/
@[simp] theorem take_zero (l : list α) : take 0 l = [] := rfl
@[simp] theorem take_nil : ∀ n, take n [] = ([] : list α)
| 0 := rfl
| (n+1) := rfl
theorem take_cons (n) (a : α) (l : list α) : take (succ n) (a::l) = a :: take n l := rfl
@[simp] theorem take_length : ∀ (l : list α), take (length l) l = l
| [] := rfl
| (a::l) := begin change a :: (take (length l) l) = a :: l, rw take_length end
theorem take_all_of_le : ∀ {n} {l : list α}, length l ≤ n → take n l = l
| 0 [] h := rfl
| 0 (a::l) h := absurd h (not_le_of_gt (zero_lt_succ _))
| (n+1) [] h := rfl
| (n+1) (a::l) h :=
begin
change a :: take n l = a :: l,
rw [take_all_of_le (le_of_succ_le_succ h)]
end
@[simp] theorem take_left : ∀ l₁ l₂ : list α, take (length l₁) (l₁ ++ l₂) = l₁
| [] l₂ := rfl
| (a::l₁) l₂ := congr_arg (cons a) (take_left l₁ l₂)
theorem take_left' {l₁ l₂ : list α} {n} (h : length l₁ = n) :
take n (l₁ ++ l₂) = l₁ :=
by rw ← h; apply take_left
theorem take_take : ∀ (n m) (l : list α), take n (take m l) = take (min n m) l
| n 0 l := by rw [min_zero, take_zero, take_nil]
| 0 m l := by rw [zero_min, take_zero, take_zero]
| (succ n) (succ m) nil := by simp only [take_nil]
| (succ n) (succ m) (a::l) := by simp only [take, min_succ_succ, take_take n m l]; split; refl
theorem take_repeat (a : α) : ∀ (n m : ℕ), take n (repeat a m) = repeat a (min n m)
| n 0 := by simp
| 0 m := by simp
| (succ n) (succ m) := by simp [min_succ_succ, take_repeat]
lemma map_take {α β : Type*} (f : α → β) :
∀ (L : list α) (i : ℕ), (L.take i).map f = (L.map f).take i
| [] i := by simp
| L 0 := by simp
| (h :: t) (n+1) := by { dsimp, rw [map_take], }
/-- Taking the first `n` elements in `l₁ ++ l₂` is the same as appending the first `n` elements
of `l₁` to the first `n - l₁.length` elements of `l₂`. -/
lemma take_append_eq_append_take {l₁ l₂ : list α} {n : ℕ} :
take n (l₁ ++ l₂) = take n l₁ ++ take (n - l₁.length) l₂ :=
begin
induction l₁ generalizing n, { simp },
cases n, { simp }, simp *
end
lemma take_append_of_le_length {l₁ l₂ : list α} {n : ℕ} (h : n ≤ l₁.length) :
(l₁ ++ l₂).take n = l₁.take n :=
by simp [take_append_eq_append_take, tsub_eq_zero_iff_le.mpr h]
/-- Taking the first `l₁.length + i` elements in `l₁ ++ l₂` is the same as appending the first
`i` elements of `l₂` to `l₁`. -/
lemma take_append {l₁ l₂ : list α} (i : ℕ) :
take (l₁.length + i) (l₁ ++ l₂) = l₁ ++ (take i l₂) :=
by simp [take_append_eq_append_take, take_all_of_le le_self_add]
/-- The `i`-th element of a list coincides with the `i`-th element of any of its prefixes of
length `> i`. Version designed to rewrite from the big list to the small list. -/
lemma nth_le_take (L : list α) {i j : ℕ} (hi : i < L.length) (hj : i < j) :
nth_le L i hi = nth_le (L.take j) i (by { rw length_take, exact lt_min hj hi }) :=
by { rw nth_le_of_eq (take_append_drop j L).symm hi, exact nth_le_append _ _ }
/-- The `i`-th element of a list coincides with the `i`-th element of any of its prefixes of
length `> i`. Version designed to rewrite from the small list to the big list. -/
lemma nth_le_take' (L : list α) {i j : ℕ} (hi : i < (L.take j).length) :
nth_le (L.take j) i hi = nth_le L i (lt_of_lt_of_le hi (by simp [le_refl])) :=
by { simp at hi, rw nth_le_take L _ hi.1 }
lemma nth_take {l : list α} {n m : ℕ} (h : m < n) :
(l.take n).nth m = l.nth m :=
begin
induction n with n hn generalizing l m,
{ simp only [nat.nat_zero_eq_zero] at h,
exact absurd h (not_lt_of_le m.zero_le) },
{ cases l with hd tl,
{ simp only [take_nil] },
{ cases m,
{ simp only [nth, take] },
{ simpa only using hn (nat.lt_of_succ_lt_succ h) } } },
end
@[simp] lemma nth_take_of_succ {l : list α} {n : ℕ} :
(l.take (n + 1)).nth n = l.nth n :=
nth_take (nat.lt_succ_self n)
lemma take_succ {l : list α} {n : ℕ} :
l.take (n + 1) = l.take n ++ (l.nth n).to_list :=
begin
induction l with hd tl hl generalizing n,
{ simp only [option.to_list, nth, take_nil, append_nil]},
{ cases n,
{ simp only [option.to_list, nth, eq_self_iff_true, and_self, take, nil_append] },
{ simp only [hl, cons_append, nth, eq_self_iff_true, and_self, take] } }
end
@[simp] lemma take_eq_nil_iff {l : list α} {k : ℕ} :
l.take k = [] ↔ l = [] ∨ k = 0 :=
by { cases l; cases k; simp [nat.succ_ne_zero] }
lemma init_eq_take (l : list α) : l.init = l.take l.length.pred :=
begin
cases l with x l,
{ simp [init] },
{ induction l with hd tl hl generalizing x,
{ simp [init], },
{ simp [init, hl] } }
end
lemma init_take {n : ℕ} {l : list α} (h : n < l.length) :
(l.take n).init = l.take n.pred :=
by simp [init_eq_take, min_eq_left_of_lt h, take_take, pred_le]
@[simp] lemma init_cons_of_ne_nil {α : Type*} {x : α} :
∀ {l : list α} (h : l ≠ []), (x :: l).init = x :: l.init
| [] h := false.elim (h rfl)
| (a :: l) _ := by simp [init]
@[simp] lemma init_append_of_ne_nil {α : Type*} {l : list α} :
∀ (l' : list α) (h : l ≠ []), (l' ++ l).init = l' ++ l.init
| [] _ := by simp only [nil_append]
| (a :: l') h := by simp [append_ne_nil_of_ne_nil_right l' l h, init_append_of_ne_nil l' h]
@[simp] lemma drop_eq_nil_of_le {l : list α} {k : ℕ} (h : l.length ≤ k) :
l.drop k = [] :=
by simpa [←length_eq_zero] using tsub_eq_zero_iff_le.mpr h
lemma drop_eq_nil_iff_le {l : list α} {k : ℕ} :
l.drop k = [] ↔ l.length ≤ k :=
begin
refine ⟨λ h, _, drop_eq_nil_of_le⟩,
induction k with k hk generalizing l,
{ simp only [drop] at h,
simp [h] },
{ cases l,
{ simp },
{ simp only [drop] at h,
simpa [nat.succ_le_succ_iff] using hk h } }
end
lemma tail_drop (l : list α) (n : ℕ) : (l.drop n).tail = l.drop (n + 1) :=
begin
induction l with hd tl hl generalizing n,
{ simp },
{ cases n,
{ simp },
{ simp [hl] } }
end
lemma cons_nth_le_drop_succ {l : list α} {n : ℕ} (hn : n < l.length) :
l.nth_le n hn :: l.drop (n + 1) = l.drop n :=
begin
induction l with hd tl hl generalizing n,
{ exact absurd n.zero_le (not_le_of_lt (by simpa using hn)) },
{ cases n,
{ simp },
{ simp only [nat.succ_lt_succ_iff, list.length] at hn,
simpa [list.nth_le, list.drop] using hl hn } }
end
theorem drop_nil : ∀ n, drop n [] = ([] : list α) :=
λ _, drop_eq_nil_of_le (nat.zero_le _)
@[simp] theorem drop_one : ∀ l : list α, drop 1 l = tail l
| [] := rfl
| (a :: l) := rfl
theorem drop_add : ∀ m n (l : list α), drop (m + n) l = drop m (drop n l)
| m 0 l := rfl
| m (n+1) [] := (drop_nil _).symm
| m (n+1) (a::l) := drop_add m n _
@[simp] theorem drop_left : ∀ l₁ l₂ : list α, drop (length l₁) (l₁ ++ l₂) = l₂
| [] l₂ := rfl
| (a::l₁) l₂ := drop_left l₁ l₂
theorem drop_left' {l₁ l₂ : list α} {n} (h : length l₁ = n) :
drop n (l₁ ++ l₂) = l₂ :=
by rw ← h; apply drop_left
theorem drop_eq_nth_le_cons : ∀ {n} {l : list α} h,
drop n l = nth_le l n h :: drop (n+1) l
| 0 (a::l) h := rfl
| (n+1) (a::l) h := @drop_eq_nth_le_cons n _ _
@[simp] lemma drop_length (l : list α) : l.drop l.length = [] :=
calc l.drop l.length = (l ++ []).drop l.length : by simp
... = [] : drop_left _ _
/-- Dropping the elements up to `n` in `l₁ ++ l₂` is the same as dropping the elements up to `n`
in `l₁`, dropping the elements up to `n - l₁.length` in `l₂`, and appending them. -/
lemma drop_append_eq_append_drop {l₁ l₂ : list α} {n : ℕ} :
drop n (l₁ ++ l₂) = drop n l₁ ++ drop (n - l₁.length) l₂ :=
begin
induction l₁ generalizing n, { simp },
cases n, { simp }, simp *
end
lemma drop_append_of_le_length {l₁ l₂ : list α} {n : ℕ} (h : n ≤ l₁.length) :
(l₁ ++ l₂).drop n = l₁.drop n ++ l₂ :=
by simp [drop_append_eq_append_drop, tsub_eq_zero_iff_le.mpr h]
/-- Dropping the elements up to `l₁.length + i` in `l₁ + l₂` is the same as dropping the elements
up to `i` in `l₂`. -/
lemma drop_append {l₁ l₂ : list α} (i : ℕ) :
drop (l₁.length + i) (l₁ ++ l₂) = drop i l₂ :=
by simp [drop_append_eq_append_drop, take_all_of_le le_self_add]
lemma drop_sizeof_le [has_sizeof α] (l : list α) : ∀ (n : ℕ), (l.drop n).sizeof ≤ l.sizeof :=
begin
induction l with _ _ lih; intro n,
{ rw [drop_nil] },
{ induction n with n nih,
{ refl, },
{ exact trans (lih _) le_add_self } }
end
/-- The `i + j`-th element of a list coincides with the `j`-th element of the list obtained by
dropping the first `i` elements. Version designed to rewrite from the big list to the small list. -/
lemma nth_le_drop (L : list α) {i j : ℕ} (h : i + j < L.length) :
nth_le L (i + j) h = nth_le (L.drop i) j
begin
have A : i < L.length := lt_of_le_of_lt (nat.le.intro rfl) h,
rw (take_append_drop i L).symm at h,
simpa only [le_of_lt A, min_eq_left, add_lt_add_iff_left, length_take, length_append] using h
end :=
begin
have A : length (take i L) = i, by simp [le_of_lt (lt_of_le_of_lt (nat.le.intro rfl) h)],
rw [nth_le_of_eq (take_append_drop i L).symm h, nth_le_append_right];
simp [A]
end
/-- The `i + j`-th element of a list coincides with the `j`-th element of the list obtained by
dropping the first `i` elements. Version designed to rewrite from the small list to the big list. -/
lemma nth_le_drop' (L : list α) {i j : ℕ} (h : j < (L.drop i).length) :
nth_le (L.drop i) j h = nth_le L (i + j) (lt_tsub_iff_left.mp ((length_drop i L) ▸ h)) :=
by rw nth_le_drop
lemma nth_drop (L : list α) (i j : ℕ) :
nth (L.drop i) j = nth L (i + j) :=
begin
ext,
simp only [nth_eq_some, nth_le_drop', option.mem_def],
split;
exact λ ⟨h, ha⟩, ⟨by simpa [lt_tsub_iff_left] using h, ha⟩
end
@[simp] theorem drop_drop (n : ℕ) : ∀ (m) (l : list α), drop n (drop m l) = drop (n + m) l
| m [] := by simp
| 0 l := by simp
| (m+1) (a::l) :=
calc drop n (drop (m + 1) (a :: l)) = drop n (drop m l) : rfl
... = drop (n + m) l : drop_drop m l
... = drop (n + (m + 1)) (a :: l) : rfl
theorem drop_take : ∀ (m : ℕ) (n : ℕ) (l : list α),
drop m (take (m + n) l) = take n (drop m l)
| 0 n _ := by simp
| (m+1) n nil := by simp
| (m+1) n (_::l) :=
have h: m + 1 + n = (m+n) + 1, by ac_refl,
by simpa [take_cons, h] using drop_take m n l
lemma map_drop {α β : Type*} (f : α → β) :
∀ (L : list α) (i : ℕ), (L.drop i).map f = (L.map f).drop i
| [] i := by simp
| L 0 := by simp
| (h :: t) (n+1) := by { dsimp, rw [map_drop], }
theorem modify_nth_tail_eq_take_drop (f : list α → list α) (H : f [] = []) :
∀ n l, modify_nth_tail f n l = take n l ++ f (drop n l)
| 0 l := rfl
| (n+1) [] := H.symm
| (n+1) (b::l) := congr_arg (cons b) (modify_nth_tail_eq_take_drop n l)
theorem modify_nth_eq_take_drop (f : α → α) :
∀ n l, modify_nth f n l = take n l ++ modify_head f (drop n l) :=
modify_nth_tail_eq_take_drop _ rfl
theorem modify_nth_eq_take_cons_drop (f : α → α) {n l} (h) :
modify_nth f n l = take n l ++ f (nth_le l n h) :: drop (n+1) l :=
by rw [modify_nth_eq_take_drop, drop_eq_nth_le_cons h]; refl
theorem update_nth_eq_take_cons_drop (a : α) {n l} (h : n < length l) :
update_nth l n a = take n l ++ a :: drop (n+1) l :=
by rw [update_nth_eq_modify_nth, modify_nth_eq_take_cons_drop _ h]
lemma reverse_take {α} {xs : list α} (n : ℕ)
(h : n ≤ xs.length) :
xs.reverse.take n = (xs.drop (xs.length - n)).reverse :=
begin
induction xs generalizing n;
simp only [reverse_cons, drop, reverse_nil, zero_tsub, length, take_nil],
cases h.lt_or_eq_dec with h' h',
{ replace h' := le_of_succ_le_succ h',
rwa [take_append_of_le_length, xs_ih _ h'],
rw [show xs_tl.length + 1 - n = succ (xs_tl.length - n), from _, drop],
{ rwa [succ_eq_add_one, ← tsub_add_eq_add_tsub] },
{ rwa length_reverse } },
{ subst h', rw [length, tsub_self, drop],
suffices : xs_tl.length + 1 = (xs_tl.reverse ++ [xs_hd]).length,
by rw [this, take_length, reverse_cons],
rw [length_append, length_reverse], refl }
end
@[simp] lemma update_nth_eq_nil (l : list α) (n : ℕ) (a : α) : l.update_nth n a = [] ↔ l = [] :=
by cases l; cases n; simp only [update_nth]
section take'
variable [inhabited α]
@[simp] theorem take'_length : ∀ n l, length (@take' α _ n l) = n
| 0 l := rfl
| (n+1) l := congr_arg succ (take'_length _ _)
@[simp] theorem take'_nil : ∀ n, take' n (@nil α) = repeat default n
| 0 := rfl
| (n+1) := congr_arg (cons _) (take'_nil _)
theorem take'_eq_take : ∀ {n} {l : list α},
n ≤ length l → take' n l = take n l
| 0 l h := rfl
| (n+1) (a::l) h := congr_arg (cons _) $
take'_eq_take $ le_of_succ_le_succ h
@[simp] theorem take'_left (l₁ l₂ : list α) : take' (length l₁) (l₁ ++ l₂) = l₁ :=
(take'_eq_take (by simp only [length_append, nat.le_add_right])).trans (take_left _ _)
theorem take'_left' {l₁ l₂ : list α} {n} (h : length l₁ = n) :
take' n (l₁ ++ l₂) = l₁ :=
by rw ← h; apply take'_left
end take'
/-! ### foldl, foldr -/
lemma foldl_ext (f g : α → β → α) (a : α)
{l : list β} (H : ∀ a : α, ∀ b ∈ l, f a b = g a b) :
foldl f a l = foldl g a l :=
begin
induction l with hd tl ih generalizing a, {refl},
unfold foldl,
rw [ih (λ a b bin, H a b $ mem_cons_of_mem _ bin), H a hd (mem_cons_self _ _)]
end
lemma foldr_ext (f g : α → β → β) (b : β)
{l : list α} (H : ∀ a ∈ l, ∀ b : β, f a b = g a b) :
foldr f b l = foldr g b l :=
begin
induction l with hd tl ih, {refl},
simp only [mem_cons_iff, or_imp_distrib, forall_and_distrib, forall_eq] at H,
simp only [foldr, ih H.2, H.1]
end
@[simp] theorem foldl_nil (f : α → β → α) (a : α) : foldl f a [] = a := rfl
@[simp] theorem foldl_cons (f : α → β → α) (a : α) (b : β) (l : list β) :
foldl f a (b::l) = foldl f (f a b) l := rfl
@[simp] theorem foldr_nil (f : α → β → β) (b : β) : foldr f b [] = b := rfl
@[simp] theorem foldr_cons (f : α → β → β) (b : β) (a : α) (l : list α) :
foldr f b (a::l) = f a (foldr f b l) := rfl
@[simp] theorem foldl_append (f : α → β → α) :
∀ (a : α) (l₁ l₂ : list β), foldl f a (l₁++l₂) = foldl f (foldl f a l₁) l₂
| a [] l₂ := rfl
| a (b::l₁) l₂ := by simp only [cons_append, foldl_cons, foldl_append (f a b) l₁ l₂]
@[simp] theorem foldr_append (f : α → β → β) :
∀ (b : β) (l₁ l₂ : list α), foldr f b (l₁++l₂) = foldr f (foldr f b l₂) l₁
| b [] l₂ := rfl
| b (a::l₁) l₂ := by simp only [cons_append, foldr_cons, foldr_append b l₁ l₂]
theorem foldl_fixed' {f : α → β → α} {a : α} (hf : ∀ b, f a b = a) :
Π l : list β, foldl f a l = a
| [] := rfl
| (b::l) := by rw [foldl_cons, hf b, foldl_fixed' l]
theorem foldr_fixed' {f : α → β → β} {b : β} (hf : ∀ a, f a b = b) :
Π l : list α, foldr f b l = b
| [] := rfl
| (a::l) := by rw [foldr_cons, foldr_fixed' l, hf a]
@[simp] theorem foldl_fixed {a : α} : Π l : list β, foldl (λ a b, a) a l = a :=
foldl_fixed' (λ _, rfl)
@[simp] theorem foldr_fixed {b : β} : Π l : list α, foldr (λ a b, b) b l = b :=
foldr_fixed' (λ _, rfl)
@[simp] theorem foldl_combinator_K {a : α} : Π l : list β, foldl combinator.K a l = a :=
foldl_fixed
@[simp] theorem foldl_join (f : α → β → α) :
∀ (a : α) (L : list (list β)), foldl f a (join L) = foldl (foldl f) a L
| a [] := rfl
| a (l::L) := by simp only [join, foldl_append, foldl_cons, foldl_join (foldl f a l) L]
@[simp] theorem foldr_join (f : α → β → β) :
∀ (b : β) (L : list (list α)), foldr f b (join L) = foldr (λ l b, foldr f b l) b L
| a [] := rfl
| a (l::L) := by simp only [join, foldr_append, foldr_join a L, foldr_cons]
theorem foldl_reverse (f : α → β → α) (a : α) (l : list β) :
foldl f a (reverse l) = foldr (λx y, f y x) a l :=
by induction l; [refl, simp only [*, reverse_cons, foldl_append, foldl_cons, foldl_nil, foldr]]
theorem foldr_reverse (f : α → β → β) (a : β) (l : list α) :
foldr f a (reverse l) = foldl (λx y, f y x) a l :=
let t := foldl_reverse (λx y, f y x) a (reverse l) in
by rw reverse_reverse l at t; rwa t
@[simp] theorem foldr_eta : ∀ (l : list α), foldr cons [] l = l
| [] := rfl
| (x::l) := by simp only [foldr_cons, foldr_eta l]; split; refl
@[simp] theorem reverse_foldl {l : list α} : reverse (foldl (λ t h, h :: t) [] l) = l :=
by rw ←foldr_reverse; simp
@[simp] theorem foldl_map (g : β → γ) (f : α → γ → α) (a : α) (l : list β) :
foldl f a (map g l) = foldl (λx y, f x (g y)) a l :=
by revert a; induction l; intros; [refl, simp only [*, map, foldl]]
@[simp] theorem foldr_map (g : β → γ) (f : γ → α → α) (a : α) (l : list β) :
foldr f a (map g l) = foldr (f ∘ g) a l :=
by revert a; induction l; intros; [refl, simp only [*, map, foldr]]
theorem foldl_map' {α β: Type u} (g : α → β) (f : α → α → α) (f' : β → β → β)
(a : α) (l : list α) (h : ∀ x y, f' (g x) (g y) = g (f x y)) :
list.foldl f' (g a) (l.map g) = g (list.foldl f a l) :=
begin
induction l generalizing a,
{ simp }, { simp [l_ih, h] }
end
theorem foldr_map' {α β: Type u} (g : α → β) (f : α → α → α) (f' : β → β → β)
(a : α) (l : list α) (h : ∀ x y, f' (g x) (g y) = g (f x y)) :
list.foldr f' (g a) (l.map g) = g (list.foldr f a l) :=
begin
induction l generalizing a,
{ simp }, { simp [l_ih, h] }
end
theorem foldl_hom (l : list γ) (f : α → β) (op : α → γ → α) (op' : β → γ → β) (a : α)
(h : ∀a x, f (op a x) = op' (f a) x) : foldl op' (f a) l = f (foldl op a l) :=
eq.symm $ by { revert a, induction l; intros; [refl, simp only [*, foldl]] }
theorem foldr_hom (l : list γ) (f : α → β) (op : γ → α → α) (op' : γ → β → β) (a : α)
(h : ∀x a, f (op x a) = op' x (f a)) : foldr op' (f a) l = f (foldr op a l) :=
by { revert a, induction l; intros; [refl, simp only [*, foldr]] }
lemma foldl_hom₂ (l : list ι) (f : α → β → γ) (op₁ : α → ι → α) (op₂ : β → ι → β) (op₃ : γ → ι → γ)
(a : α) (b : β) (h : ∀ a b i, f (op₁ a i) (op₂ b i) = op₃ (f a b) i) :
foldl op₃ (f a b) l = f (foldl op₁ a l) (foldl op₂ b l) :=
eq.symm $ by { revert a b, induction l; intros; [refl, simp only [*, foldl]] }
lemma foldr_hom₂ (l : list ι) (f : α → β → γ) (op₁ : ι → α → α) (op₂ : ι → β → β) (op₃ : ι → γ → γ)
(a : α) (b : β) (h : ∀ a b i, f (op₁ i a) (op₂ i b) = op₃ i (f a b)) :
foldr op₃ (f a b) l = f (foldr op₁ a l) (foldr op₂ b l) :=
by { revert a, induction l; intros; [refl, simp only [*, foldr]] }
lemma injective_foldl_comp {α : Type*} {l : list (α → α)} {f : α → α}
(hl : ∀ f ∈ l, function.injective f) (hf : function.injective f):
function.injective (@list.foldl (α → α) (α → α) function.comp f l) :=
begin
induction l generalizing f,
{ exact hf },
{ apply l_ih (λ _ h, hl _ (list.mem_cons_of_mem _ h)),
apply function.injective.comp hf,
apply hl _ (list.mem_cons_self _ _) }
end
/-- Induction principle for values produced by a `foldr`: if a property holds
for the seed element `b : β` and for all incremental `op : α → β → β`
performed on the elements `(a : α) ∈ l`. The principle is given for
a `Sort`-valued predicate, i.e., it can also be used to construct data. -/
def foldr_rec_on {C : β → Sort*} (l : list α) (op : α → β → β) (b : β) (hb : C b)
(hl : ∀ (b : β) (hb : C b) (a : α) (ha : a ∈ l), C (op a b)) :
C (foldr op b l) :=
begin
induction l with hd tl IH,
{ exact hb },
{ refine hl _ _ hd (mem_cons_self hd tl),
refine IH _,
intros y hy x hx,
exact hl y hy x (mem_cons_of_mem hd hx) }
end
/-- Induction principle for values produced by a `foldl`: if a property holds
for the seed element `b : β` and for all incremental `op : β → α → β`
performed on the elements `(a : α) ∈ l`. The principle is given for
a `Sort`-valued predicate, i.e., it can also be used to construct data. -/
def foldl_rec_on {C : β → Sort*} (l : list α) (op : β → α → β) (b : β) (hb : C b)
(hl : ∀ (b : β) (hb : C b) (a : α) (ha : a ∈ l), C (op b a)) :
C (foldl op b l) :=
begin
induction l with hd tl IH generalizing b,
{ exact hb },
{ refine IH _ _ _,
{ intros y hy x hx,
exact hl y hy x (mem_cons_of_mem hd hx) },
{ exact hl b hb hd (mem_cons_self hd tl) } }
end
@[simp] lemma foldr_rec_on_nil {C : β → Sort*} (op : α → β → β) (b) (hb : C b) (hl) :
foldr_rec_on [] op b hb hl = hb := rfl
@[simp] lemma foldr_rec_on_cons {C : β → Sort*} (x : α) (l : list α)
(op : α → β → β) (b) (hb : C b)
(hl : ∀ (b : β) (hb : C b) (a : α) (ha : a ∈ (x :: l)), C (op a b)) :
foldr_rec_on (x :: l) op b hb hl = hl _ (foldr_rec_on l op b hb
(λ b hb a ha, hl b hb a (mem_cons_of_mem _ ha))) x (mem_cons_self _ _) := rfl
@[simp] lemma foldl_rec_on_nil {C : β → Sort*} (op : β → α → β) (b) (hb : C b) (hl) :
foldl_rec_on [] op b hb hl = hb := rfl
/- scanl -/
section scanl
variables {f : β → α → β} {b : β} {a : α} {l : list α}
lemma length_scanl :
∀ a l, length (scanl f a l) = l.length + 1
| a [] := rfl
| a (x :: l) := by erw [length_cons, length_cons, length_scanl]
@[simp] lemma scanl_nil (b : β) : scanl f b nil = [b] := rfl
@[simp] lemma scanl_cons :
scanl f b (a :: l) = [b] ++ scanl f (f b a) l :=
by simp only [scanl, eq_self_iff_true, singleton_append, and_self]
@[simp] lemma nth_zero_scanl : (scanl f b l).nth 0 = some b :=
begin
cases l,
{ simp only [nth, scanl_nil] },
{ simp only [nth, scanl_cons, singleton_append] }
end
@[simp] lemma nth_le_zero_scanl {h : 0 < (scanl f b l).length} :
(scanl f b l).nth_le 0 h = b :=
begin
cases l,
{ simp only [nth_le, scanl_nil] },
{ simp only [nth_le, scanl_cons, singleton_append] }
end
lemma nth_succ_scanl {i : ℕ} :
(scanl f b l).nth (i + 1) = ((scanl f b l).nth i).bind (λ x, (l.nth i).map (λ y, f x y)) :=
begin
induction l with hd tl hl generalizing b i,
{ symmetry,
simp only [option.bind_eq_none', nth, forall_2_true_iff, not_false_iff, option.map_none',
scanl_nil, option.not_mem_none, forall_true_iff] },
{ simp only [nth, scanl_cons, singleton_append],
cases i,
{ simp only [option.map_some', nth_zero_scanl, nth, option.some_bind'] },
{ simp only [hl, nth] } }
end
lemma nth_le_succ_scanl {i : ℕ} {h : i + 1 < (scanl f b l).length} :
(scanl f b l).nth_le (i + 1) h =
f ((scanl f b l).nth_le i (nat.lt_of_succ_lt h))
(l.nth_le i (nat.lt_of_succ_lt_succ (lt_of_lt_of_le h (le_of_eq (length_scanl b l))))) :=
begin
induction i with i hi generalizing b l,
{ cases l,
{ simp only [length, zero_add, scanl_nil] at h,
exact absurd h (lt_irrefl 1) },
{ simp only [scanl_cons, singleton_append, nth_le_zero_scanl, nth_le] } },
{ cases l,
{ simp only [length, add_lt_iff_neg_right, scanl_nil] at h,
exact absurd h (not_lt_of_lt nat.succ_pos') },
{ simp_rw scanl_cons,
rw nth_le_append_right _,
{ simpa only [hi, length, succ_add_sub_one] },
{ simp only [length, nat.zero_le, le_add_iff_nonneg_left] } } }
end
end scanl
/- scanr -/
@[simp] theorem scanr_nil (f : α → β → β) (b : β) : scanr f b [] = [b] := rfl
@[simp] theorem scanr_aux_cons (f : α → β → β) (b : β) : ∀ (a : α) (l : list α),
scanr_aux f b (a::l) = (foldr f b (a::l), scanr f b l)
| a [] := rfl
| a (x::l) := let t := scanr_aux_cons x l in
by simp only [scanr, scanr_aux, t, foldr_cons]
@[simp] theorem scanr_cons (f : α → β → β) (b : β) (a : α) (l : list α) :
scanr f b (a::l) = foldr f b (a::l) :: scanr f b l :=
by simp only [scanr, scanr_aux_cons, foldr_cons]; split; refl
section foldl_eq_foldr
-- foldl and foldr coincide when f is commutative and associative
variables {f : α → α → α} (hcomm : commutative f) (hassoc : associative f)
include hassoc
theorem foldl1_eq_foldr1 : ∀ a b l, foldl f a (l++[b]) = foldr f b (a::l)
| a b nil := rfl
| a b (c :: l) :=
by simp only [cons_append, foldl_cons, foldr_cons, foldl1_eq_foldr1 _ _ l]; rw hassoc
include hcomm
theorem foldl_eq_of_comm_of_assoc : ∀ a b l, foldl f a (b::l) = f b (foldl f a l)
| a b nil := hcomm a b
| a b (c::l) := by simp only [foldl_cons];
rw [← foldl_eq_of_comm_of_assoc, right_comm _ hcomm hassoc]; refl
theorem foldl_eq_foldr : ∀ a l, foldl f a l = foldr f a l
| a nil := rfl
| a (b :: l) :=
by simp only [foldr_cons, foldl_eq_of_comm_of_assoc hcomm hassoc]; rw (foldl_eq_foldr a l)
end foldl_eq_foldr
section foldl_eq_foldlr'
variables {f : α → β → α}
variables hf : ∀ a b c, f (f a b) c = f (f a c) b
include hf
theorem foldl_eq_of_comm' : ∀ a b l, foldl f a (b::l) = f (foldl f a l) b
| a b [] := rfl
| a b (c :: l) := by rw [foldl,foldl,foldl,← foldl_eq_of_comm',foldl,hf]
theorem foldl_eq_foldr' : ∀ a l, foldl f a l = foldr (flip f) a l
| a [] := rfl
| a (b :: l) := by rw [foldl_eq_of_comm' hf,foldr,foldl_eq_foldr']; refl
end foldl_eq_foldlr'
section foldl_eq_foldlr'
variables {f : α → β → β}
variables hf : ∀ a b c, f a (f b c) = f b (f a c)
include hf
theorem foldr_eq_of_comm' : ∀ a b l, foldr f a (b::l) = foldr f (f b a) l
| a b [] := rfl
| a b (c :: l) := by rw [foldr,foldr,foldr,hf,← foldr_eq_of_comm']; refl
end foldl_eq_foldlr'
section
variables {op : α → α → α} [ha : is_associative α op] [hc : is_commutative α op]
local notation a * b := op a b
local notation l <*> a := foldl op a l
include ha
lemma foldl_assoc : ∀ {l : list α} {a₁ a₂}, l <*> (a₁ * a₂) = a₁ * (l <*> a₂)
| [] a₁ a₂ := rfl
| (a :: l) a₁ a₂ :=
calc a::l <*> (a₁ * a₂) = l <*> (a₁ * (a₂ * a)) : by simp only [foldl_cons, ha.assoc]
... = a₁ * (a::l <*> a₂) : by rw [foldl_assoc, foldl_cons]
lemma foldl_op_eq_op_foldr_assoc : ∀{l : list α} {a₁ a₂}, (l <*> a₁) * a₂ = a₁ * l.foldr (*) a₂
| [] a₁ a₂ := rfl
| (a :: l) a₁ a₂ := by simp only [foldl_cons, foldr_cons, foldl_assoc, ha.assoc];
rw [foldl_op_eq_op_foldr_assoc]
include hc
lemma foldl_assoc_comm_cons {l : list α} {a₁ a₂} : (a₁ :: l) <*> a₂ = a₁ * (l <*> a₂) :=
by rw [foldl_cons, hc.comm, foldl_assoc]
end
/-! ### mfoldl, mfoldr, mmap -/
section mfoldl_mfoldr
variables {m : Type v → Type w} [monad m]
@[simp] theorem mfoldl_nil (f : β → α → m β) {b} : mfoldl f b [] = pure b := rfl
@[simp] theorem mfoldr_nil (f : α → β → m β) {b} : mfoldr f b [] = pure b := rfl
@[simp] theorem mfoldl_cons {f : β → α → m β} {b a l} :
mfoldl f b (a :: l) = f b a >>= λ b', mfoldl f b' l := rfl
@[simp] theorem mfoldr_cons {f : α → β → m β} {b a l} :
mfoldr f b (a :: l) = mfoldr f b l >>= f a := rfl
theorem mfoldr_eq_foldr (f : α → β → m β) (b l) :
mfoldr f b l = foldr (λ a mb, mb >>= f a) (pure b) l :=
by induction l; simp *
attribute [simp] mmap mmap'
variables [is_lawful_monad m]
theorem mfoldl_eq_foldl (f : β → α → m β) (b l) :
mfoldl f b l = foldl (λ mb a, mb >>= λ b, f b a) (pure b) l :=
begin
suffices h : ∀ (mb : m β),
(mb >>= λ b, mfoldl f b l) = foldl (λ mb a, mb >>= λ b, f b a) mb l,
by simp [←h (pure b)],
induction l; intro,
{ simp },
{ simp only [mfoldl, foldl, ←l_ih] with functor_norm }
end
@[simp] theorem mfoldl_append {f : β → α → m β} : ∀ {b l₁ l₂},
mfoldl f b (l₁ ++ l₂) = mfoldl f b l₁ >>= λ x, mfoldl f x l₂
| _ [] _ := by simp only [nil_append, mfoldl_nil, pure_bind]
| _ (_::_) _ := by simp only [cons_append, mfoldl_cons, mfoldl_append, is_lawful_monad.bind_assoc]
@[simp] theorem mfoldr_append {f : α → β → m β} : ∀ {b l₁ l₂},
mfoldr f b (l₁ ++ l₂) = mfoldr f b l₂ >>= λ x, mfoldr f x l₁
| _ [] _ := by simp only [nil_append, mfoldr_nil, bind_pure]
| _ (_::_) _ := by simp only [mfoldr_cons, cons_append, mfoldr_append, is_lawful_monad.bind_assoc]
end mfoldl_mfoldr
/-! ### intersperse -/
@[simp] lemma intersperse_nil {α : Type u} (a : α) : intersperse a [] = [] := rfl
@[simp] lemma intersperse_singleton {α : Type u} (a b : α) : intersperse a [b] = [b] := rfl
@[simp] lemma intersperse_cons_cons {α : Type u} (a b c : α) (tl : list α) :
intersperse a (b :: c :: tl) = b :: a :: intersperse a (c :: tl) := rfl
/-! ### split_at and split_on -/
section split_at_on
variables (p : α → Prop) [decidable_pred p] (xs ys : list α)
(ls : list (list α)) (f : list α → list α)
@[simp] theorem split_at_eq_take_drop : ∀ (n : ℕ) (l : list α), split_at n l = (take n l, drop n l)
| 0 a := rfl
| (succ n) [] := rfl
| (succ n) (x :: xs) := by simp only [split_at, split_at_eq_take_drop n xs, take, drop]
@[simp] lemma split_on_nil {α : Type u} [decidable_eq α] (a : α) : [].split_on a = [[]] := rfl
@[simp] lemma split_on_p_nil : [].split_on_p p = [[]] := rfl
/-- An auxiliary definition for proving a specification lemma for `split_on_p`.
`split_on_p_aux' P xs ys` splits the list `ys ++ xs` at every element satisfying `P`,
where `ys` is an accumulating parameter for the initial segment of elements not satisfying `P`.
-/
def split_on_p_aux' {α : Type u} (P : α → Prop) [decidable_pred P] : list α → list α → list (list α)
| [] xs := [xs]
| (h :: t) xs :=
if P h then xs :: split_on_p_aux' t []
else split_on_p_aux' t (xs ++ [h])
lemma split_on_p_aux_eq : split_on_p_aux' p xs ys = split_on_p_aux p xs ((++) ys) :=
begin
induction xs with a t ih generalizing ys; simp! only [append_nil, eq_self_iff_true, and_self],
split_ifs; rw ih,
{ refine ⟨rfl, rfl⟩ },
{ congr, ext, simp }
end
lemma split_on_p_aux_nil : split_on_p_aux p xs id = split_on_p_aux' p xs [] :=
by { rw split_on_p_aux_eq, refl }
/-- The original list `L` can be recovered by joining the lists produced by `split_on_p p L`,
interspersed with the elements `L.filter p`. -/
lemma split_on_p_spec (as : list α) :
join (zip_with (++) (split_on_p p as) ((as.filter p).map (λ x, [x]) ++ [[]])) = as :=
begin
rw [split_on_p, split_on_p_aux_nil],
suffices : ∀ xs,
join (zip_with (++) (split_on_p_aux' p as xs) ((as.filter p).map(λ x, [x]) ++ [[]])) = xs ++ as,
{ rw this, refl },
induction as; intro; simp! only [split_on_p_aux', append_nil],
split_ifs; simp [zip_with, join, *],
end
lemma split_on_p_aux_ne_nil : split_on_p_aux p xs f ≠ [] :=
begin
induction xs with _ _ ih generalizing f, { trivial, },
simp only [split_on_p_aux], split_ifs, { trivial, }, exact ih _,
end
lemma split_on_p_aux_spec : split_on_p_aux p xs f = (xs.split_on_p p).modify_head f :=
begin
simp only [split_on_p],
induction xs with hd tl ih generalizing f, { simp [split_on_p_aux], },
simp only [split_on_p_aux], split_ifs, { simp, },
rw [ih (λ l, f (hd :: l)), ih (λ l, id (hd :: l))],
simp,
end
lemma split_on_p_ne_nil : xs.split_on_p p ≠ [] := split_on_p_aux_ne_nil _ _ id
@[simp] lemma split_on_p_cons (x : α) (xs : list α) :
(x :: xs).split_on_p p =
if p x then [] :: xs.split_on_p p else (xs.split_on_p p).modify_head (cons x) :=
by { simp only [split_on_p, split_on_p_aux], split_ifs, { simp }, rw split_on_p_aux_spec, refl, }
/-- If no element satisfies `p` in the list `xs`, then `xs.split_on_p p = [xs]` -/
lemma split_on_p_eq_single (h : ∀ x ∈ xs, ¬p x) : xs.split_on_p p = [xs] :=
by { induction xs with hd tl ih, { refl, }, simp [h hd _, ih (λ t ht, h t (or.inr ht))], }
/-- When a list of the form `[...xs, sep, ...as]` is split on `p`, the first element is `xs`,
assuming no element in `xs` satisfies `p` but `sep` does satisfy `p` -/
lemma split_on_p_first (h : ∀ x ∈ xs, ¬p x) (sep : α) (hsep : p sep)
(as : list α) : (xs ++ sep :: as).split_on_p p = xs :: as.split_on_p p :=
by { induction xs with hd tl ih, { simp [hsep], }, simp [h hd _, ih (λ t ht, h t (or.inr ht))], }
/-- `intercalate [x]` is the left inverse of `split_on x` -/
lemma intercalate_split_on (x : α) [decidable_eq α] : [x].intercalate (xs.split_on x) = xs :=
begin
simp only [intercalate, split_on],
induction xs with hd tl ih, { simp [join], }, simp only [split_on_p_cons],
cases h' : split_on_p (=x) tl with hd' tl', { exact (split_on_p_ne_nil _ tl h').elim, },
rw h' at ih, split_ifs, { subst h, simp [ih, join], },
cases tl'; simpa [join] using ih,
end
/-- `split_on x` is the left inverse of `intercalate [x]`, on the domain
consisting of each nonempty list of lists `ls` whose elements do not contain `x` -/
lemma split_on_intercalate [decidable_eq α] (x : α) (hx : ∀ l ∈ ls, x ∉ l) (hls : ls ≠ []) :
([x].intercalate ls).split_on x = ls :=
begin
simp only [intercalate],
induction ls with hd tl ih, { contradiction, },
cases tl,
{ suffices : hd.split_on x = [hd], { simpa [join], },
refine split_on_p_eq_single _ _ _, intros y hy H, rw H at hy,
refine hx hd _ hy, simp, },
{ simp only [intersperse_cons_cons, singleton_append, join],
specialize ih _ _, { intros l hl, apply hx l, simp at hl ⊢, tauto, }, { trivial, },
have := split_on_p_first (=x) hd _ x rfl _,
{ simp only [split_on] at ⊢ ih, rw this, rw ih, },
intros y hy H, rw H at hy, exact hx hd (or.inl rfl) hy, }
end
end split_at_on
/-! ### map for partial functions -/
/-- Partial map. If `f : Π a, p a → β` is a partial function defined on
`a : α` satisfying `p`, then `pmap f l h` is essentially the same as `map f l`
but is defined only when all members of `l` satisfy `p`, using the proof
to apply `f`. -/
@[simp] def pmap {p : α → Prop} (f : Π a, p a → β) : Π l : list α, (∀ a ∈ l, p a) → list β
| [] H := []
| (a::l) H := f a (forall_mem_cons.1 H).1 :: pmap l (forall_mem_cons.1 H).2
/-- "Attach" the proof that the elements of `l` are in `l` to produce a new list
with the same elements but in the type `{x // x ∈ l}`. -/
def attach (l : list α) : list {x // x ∈ l} := pmap subtype.mk l (λ a, id)
theorem sizeof_lt_sizeof_of_mem [has_sizeof α] {x : α} {l : list α} (hx : x ∈ l) :
sizeof x < sizeof l :=
begin
induction l with h t ih; cases hx,
{ rw hx, exact lt_add_of_lt_of_nonneg (lt_one_add _) (nat.zero_le _) },
{ exact lt_add_of_pos_of_le (zero_lt_one_add _) (le_of_lt (ih hx)) }
end
@[simp] theorem pmap_eq_map (p : α → Prop) (f : α → β) (l : list α) (H) :
@pmap _ _ p (λ a _, f a) l H = map f l :=
by induction l; [refl, simp only [*, pmap, map]]; split; refl
theorem pmap_congr {p q : α → Prop} {f : Π a, p a → β} {g : Π a, q a → β}
(l : list α) {H₁ H₂} (h : ∀ a h₁ h₂, f a h₁ = g a h₂) :
pmap f l H₁ = pmap g l H₂ :=
by induction l with _ _ ih; [refl, rw [pmap, pmap, h, ih]]
theorem map_pmap {p : α → Prop} (g : β → γ) (f : Π a, p a → β)
(l H) : map g (pmap f l H) = pmap (λ a h, g (f a h)) l H :=
by induction l; [refl, simp only [*, pmap, map]]; split; refl
theorem pmap_map {p : β → Prop} (g : ∀ b, p b → γ) (f : α → β)
(l H) : pmap g (map f l) H = pmap (λ a h, g (f a) h) l (λ a h, H _ (mem_map_of_mem _ h)) :=
by induction l; [refl, simp only [*, pmap, map]]; split; refl
theorem pmap_eq_map_attach {p : α → Prop} (f : Π a, p a → β)
(l H) : pmap f l H = l.attach.map (λ x, f x.1 (H _ x.2)) :=
by rw [attach, map_pmap]; exact pmap_congr l (λ a h₁ h₂, rfl)
theorem attach_map_val (l : list α) : l.attach.map subtype.val = l :=
by rw [attach, map_pmap]; exact (pmap_eq_map _ _ _ _).trans (map_id l)
@[simp] theorem mem_attach (l : list α) : ∀ x, x ∈ l.attach | ⟨a, h⟩ :=
by have := mem_map.1 (by rw [attach_map_val]; exact h);
{ rcases this with ⟨⟨_, _⟩, m, rfl⟩, exact m }
@[simp] theorem mem_pmap {p : α → Prop} {f : Π a, p a → β}
{l H b} : b ∈ pmap f l H ↔ ∃ a (h : a ∈ l), f a (H a h) = b :=
by simp only [pmap_eq_map_attach, mem_map, mem_attach, true_and, subtype.exists]
@[simp] theorem length_pmap {p : α → Prop} {f : Π a, p a → β}
{l H} : length (pmap f l H) = length l :=
by induction l; [refl, simp only [*, pmap, length]]
@[simp] lemma length_attach (L : list α) : L.attach.length = L.length := length_pmap
@[simp] lemma pmap_eq_nil {p : α → Prop} {f : Π a, p a → β}
{l H} : pmap f l H = [] ↔ l = [] :=
by rw [← length_eq_zero, length_pmap, length_eq_zero]
@[simp] lemma attach_eq_nil (l : list α) : l.attach = [] ↔ l = [] := pmap_eq_nil
lemma last_pmap {α β : Type*} (p : α → Prop) (f : Π a, p a → β)
(l : list α) (hl₁ : ∀ a ∈ l, p a) (hl₂ : l ≠ []) :
(l.pmap f hl₁).last (mt list.pmap_eq_nil.1 hl₂) = f (l.last hl₂) (hl₁ _ (list.last_mem hl₂)) :=
begin
induction l with l_hd l_tl l_ih,
{ apply (hl₂ rfl).elim },
{ cases l_tl,
{ simp },
{ apply l_ih } }
end
lemma nth_pmap {p : α → Prop} (f : Π a, p a → β) {l : list α} (h : ∀ a ∈ l, p a) (n : ℕ) :
nth (pmap f l h) n = option.pmap f (nth l n) (λ x H, h x (nth_mem H)) :=
begin
induction l with hd tl hl generalizing n,
{ simp },
{ cases n; simp [hl] }
end
lemma nth_le_pmap {p : α → Prop} (f : Π a, p a → β) {l : list α} (h : ∀ a ∈ l, p a) {n : ℕ}
(hn : n < (pmap f l h).length) :
nth_le (pmap f l h) n hn = f (nth_le l n (@length_pmap _ _ p f l h ▸ hn))
(h _ (nth_le_mem l n (@length_pmap _ _ p f l h ▸ hn))) :=
begin
induction l with hd tl hl generalizing n,
{ simp only [length, pmap] at hn,
exact absurd hn (not_lt_of_le n.zero_le) },
{ cases n,
{ simp },
{ simpa [hl] } }
end
/-! ### find -/
section find
variables {p : α → Prop} [decidable_pred p] {l : list α} {a : α}
@[simp] theorem find_nil (p : α → Prop) [decidable_pred p] : find p [] = none :=
rfl
@[simp] theorem find_cons_of_pos (l) (h : p a) : find p (a::l) = some a :=
if_pos h
@[simp] theorem find_cons_of_neg (l) (h : ¬ p a) : find p (a::l) = find p l :=
if_neg h
@[simp] theorem find_eq_none : find p l = none ↔ ∀ x ∈ l, ¬ p x :=
begin
induction l with a l IH,
{ exact iff_of_true rfl (forall_mem_nil _) },
rw forall_mem_cons, by_cases h : p a,
{ simp only [find_cons_of_pos _ h, h, not_true, false_and] },
{ rwa [find_cons_of_neg _ h, iff_true_intro h, true_and] }
end
theorem find_some (H : find p l = some a) : p a :=
begin
induction l with b l IH, {contradiction},
by_cases h : p b,
{ rw find_cons_of_pos _ h at H, cases H, exact h },
{ rw find_cons_of_neg _ h at H, exact IH H }
end
@[simp] theorem find_mem (H : find p l = some a) : a ∈ l :=
begin
induction l with b l IH, {contradiction},
by_cases h : p b,
{ rw find_cons_of_pos _ h at H, cases H, apply mem_cons_self },
{ rw find_cons_of_neg _ h at H, exact mem_cons_of_mem _ (IH H) }
end
end find
/-! ### lookmap -/
section lookmap
variables (f : α → option α)
@[simp] theorem lookmap_nil : [].lookmap f = [] := rfl
@[simp] theorem lookmap_cons_none {a : α} (l : list α) (h : f a = none) :
(a :: l).lookmap f = a :: l.lookmap f :=
by simp [lookmap, h]
@[simp] theorem lookmap_cons_some {a b : α} (l : list α) (h : f a = some b) :
(a :: l).lookmap f = b :: l :=
by simp [lookmap, h]
theorem lookmap_some : ∀ l : list α, l.lookmap some = l
| [] := rfl
| (a::l) := rfl
theorem lookmap_none : ∀ l : list α, l.lookmap (λ _, none) = l
| [] := rfl
| (a::l) := congr_arg (cons a) (lookmap_none l)
theorem lookmap_congr {f g : α → option α} :
∀ {l : list α}, (∀ a ∈ l, f a = g a) → l.lookmap f = l.lookmap g
| [] H := rfl
| (a::l) H := begin
cases forall_mem_cons.1 H with H₁ H₂,
cases h : g a with b,
{ simp [h, H₁.trans h, lookmap_congr H₂] },
{ simp [lookmap_cons_some _ _ h, lookmap_cons_some _ _ (H₁.trans h)] }
end
theorem lookmap_of_forall_not {l : list α} (H : ∀ a ∈ l, f a = none) : l.lookmap f = l :=
(lookmap_congr H).trans (lookmap_none l)
theorem lookmap_map_eq (g : α → β) (h : ∀ a (b ∈ f a), g a = g b) :
∀ l : list α, map g (l.lookmap f) = map g l
| [] := rfl
| (a::l) := begin
cases h' : f a with b,
{ simp [h', lookmap_map_eq] },
{ simp [lookmap_cons_some _ _ h', h _ _ h'] }
end
theorem lookmap_id' (h : ∀ a (b ∈ f a), a = b) (l : list α) : l.lookmap f = l :=
by rw [← map_id (l.lookmap f), lookmap_map_eq, map_id]; exact h
theorem length_lookmap (l : list α) : length (l.lookmap f) = length l :=
by rw [← length_map, lookmap_map_eq _ (λ _, ()), length_map]; simp
end lookmap
/-! ### filter_map -/
@[simp] theorem filter_map_nil (f : α → option β) : filter_map f [] = [] := rfl
@[simp] theorem filter_map_cons_none {f : α → option β} (a : α) (l : list α) (h : f a = none) :
filter_map f (a :: l) = filter_map f l :=
by simp only [filter_map, h]
@[simp] theorem filter_map_cons_some (f : α → option β)
(a : α) (l : list α) {b : β} (h : f a = some b) :
filter_map f (a :: l) = b :: filter_map f l :=
by simp only [filter_map, h]; split; refl
theorem filter_map_cons (f : α → option β) (a : α) (l : list α) :
filter_map f (a :: l) = option.cases_on (f a) (filter_map f l) (λb, b :: filter_map f l) :=
begin
generalize eq : f a = b,
cases b,
{ rw filter_map_cons_none _ _ eq },
{ rw filter_map_cons_some _ _ _ eq },
end
lemma filter_map_append {α β : Type*} (l l' : list α) (f : α → option β) :
filter_map f (l ++ l') = filter_map f l ++ filter_map f l' :=
begin
induction l with hd tl hl generalizing l',
{ simp },
{ rw [cons_append, filter_map, filter_map],
cases f hd;
simp only [filter_map, hl, cons_append, eq_self_iff_true, and_self] }
end
theorem filter_map_eq_map (f : α → β) : filter_map (some ∘ f) = map f :=
begin
funext l,
induction l with a l IH, {refl},
simp only [filter_map_cons_some (some ∘ f) _ _ rfl, IH, map_cons], split; refl
end
theorem filter_map_eq_filter (p : α → Prop) [decidable_pred p] :
filter_map (option.guard p) = filter p :=
begin
funext l,
induction l with a l IH, {refl},
by_cases pa : p a,
{ simp only [filter_map, option.guard, IH, if_pos pa, filter_cons_of_pos _ pa], split; refl },
{ simp only [filter_map, option.guard, IH, if_neg pa, filter_cons_of_neg _ pa] }
end
theorem filter_map_filter_map (f : α → option β) (g : β → option γ) (l : list α) :
filter_map g (filter_map f l) = filter_map (λ x, (f x).bind g) l :=
begin
induction l with a l IH, {refl},
cases h : f a with b,
{ rw [filter_map_cons_none _ _ h, filter_map_cons_none, IH],
simp only [h, option.none_bind'] },
rw filter_map_cons_some _ _ _ h,
cases h' : g b with c;
[ rw [filter_map_cons_none _ _ h', filter_map_cons_none, IH],
rw [filter_map_cons_some _ _ _ h', filter_map_cons_some, IH] ];
simp only [h, h', option.some_bind']
end
theorem map_filter_map (f : α → option β) (g : β → γ) (l : list α) :
map g (filter_map f l) = filter_map (λ x, (f x).map g) l :=
by rw [← filter_map_eq_map, filter_map_filter_map]; refl
theorem filter_map_map (f : α → β) (g : β → option γ) (l : list α) :
filter_map g (map f l) = filter_map (g ∘ f) l :=
by rw [← filter_map_eq_map, filter_map_filter_map]; refl
theorem filter_filter_map (f : α → option β) (p : β → Prop) [decidable_pred p] (l : list α) :
filter p (filter_map f l) = filter_map (λ x, (f x).filter p) l :=
by rw [← filter_map_eq_filter, filter_map_filter_map]; refl
theorem filter_map_filter (p : α → Prop) [decidable_pred p] (f : α → option β) (l : list α) :
filter_map f (filter p l) = filter_map (λ x, if p x then f x else none) l :=
begin
rw [← filter_map_eq_filter, filter_map_filter_map], congr,
funext x,
show (option.guard p x).bind f = ite (p x) (f x) none,
by_cases h : p x,
{ simp only [option.guard, if_pos h, option.some_bind'] },
{ simp only [option.guard, if_neg h, option.none_bind'] }
end
@[simp] theorem filter_map_some (l : list α) : filter_map some l = l :=
by rw filter_map_eq_map; apply map_id
@[simp] theorem mem_filter_map (f : α → option β) (l : list α) {b : β} :
b ∈ filter_map f l ↔ ∃ a, a ∈ l ∧ f a = some b :=
begin
induction l with a l IH,
{ split, { intro H, cases H }, { rintro ⟨_, H, _⟩, cases H } },
cases h : f a with b',
{ have : f a ≠ some b, {rw h, intro, contradiction},
simp only [filter_map_cons_none _ _ h, IH, mem_cons_iff,
or_and_distrib_right, exists_or_distrib, exists_eq_left, this, false_or] },
{ have : f a = some b ↔ b = b',
{ split; intro t, {rw t at h; injection h}, {exact t.symm ▸ h} },
simp only [filter_map_cons_some _ _ _ h, IH, mem_cons_iff,
or_and_distrib_right, exists_or_distrib, this, exists_eq_left] }
end
theorem map_filter_map_of_inv (f : α → option β) (g : β → α)
(H : ∀ x : α, (f x).map g = some x) (l : list α) :
map g (filter_map f l) = l :=
by simp only [map_filter_map, H, filter_map_some]
theorem sublist.filter_map (f : α → option β) {l₁ l₂ : list α}
(s : l₁ <+ l₂) : filter_map f l₁ <+ filter_map f l₂ :=
by induction s with l₁ l₂ a s IH l₁ l₂ a s IH;
simp only [filter_map]; cases f a with b;
simp only [filter_map, IH, sublist.cons, sublist.cons2]
theorem sublist.map (f : α → β) {l₁ l₂ : list α}
(s : l₁ <+ l₂) : map f l₁ <+ map f l₂ :=
filter_map_eq_map f ▸ s.filter_map _
/-! ### reduce_option -/
@[simp] lemma reduce_option_cons_of_some (x : α) (l : list (option α)) :
reduce_option (some x :: l) = x :: l.reduce_option :=
by simp only [reduce_option, filter_map, id.def, eq_self_iff_true, and_self]
@[simp] lemma reduce_option_cons_of_none (l : list (option α)) :
reduce_option (none :: l) = l.reduce_option :=
by simp only [reduce_option, filter_map, id.def]
@[simp] lemma reduce_option_nil : @reduce_option α [] = [] := rfl
@[simp] lemma reduce_option_map {l : list (option α)} {f : α → β} :
reduce_option (map (option.map f) l) = map f (reduce_option l) :=
begin
induction l with hd tl hl,
{ simp only [reduce_option_nil, map_nil] },
{ cases hd;
simpa only [true_and, option.map_some', map, eq_self_iff_true,
reduce_option_cons_of_some] using hl },
end
lemma reduce_option_append (l l' : list (option α)) :
(l ++ l').reduce_option = l.reduce_option ++ l'.reduce_option :=
filter_map_append l l' id
lemma reduce_option_length_le (l : list (option α)) :
l.reduce_option.length ≤ l.length :=
begin
induction l with hd tl hl,
{ simp only [reduce_option_nil, length] },
{ cases hd,
{ exact nat.le_succ_of_le hl },
{ simpa only [length, add_le_add_iff_right, reduce_option_cons_of_some] using hl} }
end
lemma reduce_option_length_eq_iff {l : list (option α)} :
l.reduce_option.length = l.length ↔ ∀ x ∈ l, option.is_some x :=
begin
induction l with hd tl hl,
{ simp only [forall_const, reduce_option_nil, not_mem_nil,
forall_prop_of_false, eq_self_iff_true, length, not_false_iff] },
{ cases hd,
{ simp only [mem_cons_iff, forall_eq_or_imp, bool.coe_sort_ff, false_and,
reduce_option_cons_of_none, length, option.is_some_none, iff_false],
intro H,
have := reduce_option_length_le tl,
rw H at this,
exact absurd (nat.lt_succ_self _) (not_lt_of_le this) },
{ simp only [hl, true_and, mem_cons_iff, forall_eq_or_imp, add_left_inj,
bool.coe_sort_tt, length, option.is_some_some, reduce_option_cons_of_some] } }
end
lemma reduce_option_length_lt_iff {l : list (option α)} :
l.reduce_option.length < l.length ↔ none ∈ l :=
begin
rw [(reduce_option_length_le l).lt_iff_ne, ne, reduce_option_length_eq_iff],
induction l; simp *,
rw [eq_comm, ← option.not_is_some_iff_eq_none, decidable.imp_iff_not_or]
end
lemma reduce_option_singleton (x : option α) :
[x].reduce_option = x.to_list :=
by cases x; refl
lemma reduce_option_concat (l : list (option α)) (x : option α) :
(l.concat x).reduce_option = l.reduce_option ++ x.to_list :=
begin
induction l with hd tl hl generalizing x,
{ cases x;
simp [option.to_list] },
{ simp only [concat_eq_append, reduce_option_append] at hl,
cases hd;
simp [hl, reduce_option_append] }
end
lemma reduce_option_concat_of_some (l : list (option α)) (x : α) :
(l.concat (some x)).reduce_option = l.reduce_option.concat x :=
by simp only [reduce_option_nil, concat_eq_append, reduce_option_append, reduce_option_cons_of_some]
lemma reduce_option_mem_iff {l : list (option α)} {x : α} :
x ∈ l.reduce_option ↔ (some x) ∈ l :=
by simp only [reduce_option, id.def, mem_filter_map, exists_eq_right]
lemma reduce_option_nth_iff {l : list (option α)} {x : α} :
(∃ i, l.nth i = some (some x)) ↔ ∃ i, l.reduce_option.nth i = some x :=
by rw [←mem_iff_nth, ←mem_iff_nth, reduce_option_mem_iff]
/-! ### filter -/
section filter
variables {p : α → Prop} [decidable_pred p]
lemma filter_singleton {a : α} : [a].filter p = if p a then [a] else [] := rfl
theorem filter_eq_foldr (p : α → Prop) [decidable_pred p] (l : list α) :
filter p l = foldr (λ a out, if p a then a :: out else out) [] l :=
by induction l; simp [*, filter]
lemma filter_congr' {p q : α → Prop} [decidable_pred p] [decidable_pred q]
: ∀ {l : list α}, (∀ x ∈ l, p x ↔ q x) → filter p l = filter q l
| [] _ := rfl
| (a::l) h := by rw forall_mem_cons at h; by_cases pa : p a;
[simp only [filter_cons_of_pos _ pa, filter_cons_of_pos _ (h.1.1 pa), filter_congr' h.2],
simp only [filter_cons_of_neg _ pa, filter_cons_of_neg _ (mt h.1.2 pa), filter_congr' h.2]];
split; refl
@[simp] theorem filter_subset (l : list α) : filter p l ⊆ l :=
(filter_sublist l).subset
theorem of_mem_filter {a : α} : ∀ {l}, a ∈ filter p l → p a
| (b::l) ain :=
if pb : p b then
have a ∈ b :: filter p l, by simpa only [filter_cons_of_pos _ pb] using ain,
or.elim (eq_or_mem_of_mem_cons this)
(assume : a = b, begin rw [← this] at pb, exact pb end)
(assume : a ∈ filter p l, of_mem_filter this)
else
begin simp only [filter_cons_of_neg _ pb] at ain, exact (of_mem_filter ain) end
theorem mem_of_mem_filter {a : α} {l} (h : a ∈ filter p l) : a ∈ l :=
filter_subset l h
theorem mem_filter_of_mem {a : α} : ∀ {l}, a ∈ l → p a → a ∈ filter p l
| (_::l) (or.inl rfl) pa := by rw filter_cons_of_pos _ pa; apply mem_cons_self
| (b::l) (or.inr ain) pa := if pb : p b
then by rw [filter_cons_of_pos _ pb]; apply mem_cons_of_mem; apply mem_filter_of_mem ain pa
else by rw [filter_cons_of_neg _ pb]; apply mem_filter_of_mem ain pa
@[simp] theorem mem_filter {a : α} {l} : a ∈ filter p l ↔ a ∈ l ∧ p a :=
⟨λ h, ⟨mem_of_mem_filter h, of_mem_filter h⟩, λ ⟨h₁, h₂⟩, mem_filter_of_mem h₁ h₂⟩
lemma monotone_filter_left (p : α → Prop) [decidable_pred p]
⦃l l' : list α⦄ (h : l ⊆ l') : filter p l ⊆ filter p l' :=
begin
intros x hx,
rw [mem_filter] at hx ⊢,
exact ⟨h hx.left, hx.right⟩
end
theorem filter_eq_self {l} : filter p l = l ↔ ∀ a ∈ l, p a :=
begin
induction l with a l ih,
{ exact iff_of_true rfl (forall_mem_nil _) },
rw forall_mem_cons, by_cases p a,
{ rw [filter_cons_of_pos _ h, cons_inj, ih, and_iff_right h] },
{ rw [filter_cons_of_neg _ h],
refine iff_of_false _ (mt and.left h), intro e,
have := filter_sublist l, rw e at this,
exact not_lt_of_ge (length_le_of_sublist this) (lt_succ_self _) }
end
theorem filter_length_eq_length {l} : (filter p l).length = l.length ↔ ∀ a ∈ l, p a :=
iff.trans ⟨eq_of_sublist_of_length_eq l.filter_sublist, congr_arg list.length⟩ filter_eq_self
theorem filter_eq_nil {l} : filter p l = [] ↔ ∀ a ∈ l, ¬p a :=
by simp only [eq_nil_iff_forall_not_mem, mem_filter, not_and]
variable (p)
theorem sublist.filter {l₁ l₂} (s : l₁ <+ l₂) : filter p l₁ <+ filter p l₂ :=
filter_map_eq_filter p ▸ s.filter_map _
lemma monotone_filter_right (l : list α) ⦃p q : α → Prop⦄ [decidable_pred p] [decidable_pred q]
(h : p ≤ q) : l.filter p <+ l.filter q :=
begin
induction l with hd tl IH,
{ refl },
{ by_cases hp : p hd,
{ rw [filter_cons_of_pos _ hp, filter_cons_of_pos _ (h _ hp)],
exact IH.cons_cons hd },
{ rw filter_cons_of_neg _ hp,
by_cases hq : q hd,
{ rw filter_cons_of_pos _ hq,
exact sublist_cons_of_sublist hd IH },
{ rw filter_cons_of_neg _ hq,
exact IH } } }
end
theorem map_filter (f : β → α) (l : list β) :
filter p (map f l) = map f (filter (p ∘ f) l) :=
by rw [← filter_map_eq_map, filter_filter_map, filter_map_filter]; refl
@[simp] theorem filter_filter (q) [decidable_pred q] : ∀ l,
filter p (filter q l) = filter (λ a, p a ∧ q a) l
| [] := rfl
| (a :: l) := by by_cases hp : p a; by_cases hq : q a; simp only [hp, hq, filter, if_true, if_false,
true_and, false_and, filter_filter l, eq_self_iff_true]
@[simp] lemma filter_true {h : decidable_pred (λ a : α, true)} (l : list α) :
@filter α (λ _, true) h l = l :=
by convert filter_eq_self.2 (λ _ _, trivial)
@[simp] lemma filter_false {h : decidable_pred (λ a : α, false)} (l : list α) :
@filter α (λ _, false) h l = [] :=
by convert filter_eq_nil.2 (λ _ _, id)
@[simp] theorem span_eq_take_drop : ∀ (l : list α), span p l = (take_while p l, drop_while p l)
| [] := rfl
| (a::l) :=
if pa : p a then by simp only [span, if_pos pa, span_eq_take_drop l, take_while, drop_while]
else by simp only [span, take_while, drop_while, if_neg pa]
@[simp] theorem take_while_append_drop : ∀ (l : list α), take_while p l ++ drop_while p l = l
| [] := rfl
| (a::l) := if pa : p a then by rw [take_while, drop_while, if_pos pa, if_pos pa, cons_append,
take_while_append_drop l]
else by rw [take_while, drop_while, if_neg pa, if_neg pa, nil_append]
end filter
/-! ### erasep -/
section erasep
variables {p : α → Prop} [decidable_pred p]
@[simp] theorem erasep_nil : [].erasep p = [] := rfl
theorem erasep_cons (a : α) (l : list α) :
(a :: l).erasep p = if p a then l else a :: l.erasep p := rfl
@[simp] theorem erasep_cons_of_pos {a : α} {l : list α} (h : p a) : (a :: l).erasep p = l :=
by simp [erasep_cons, h]
@[simp] theorem erasep_cons_of_neg {a : α} {l : list α} (h : ¬ p a) :
(a::l).erasep p = a :: l.erasep p :=
by simp [erasep_cons, h]
theorem erasep_of_forall_not {l : list α}
(h : ∀ a ∈ l, ¬ p a) : l.erasep p = l :=
by induction l with _ _ ih; [refl,
simp [h _ (or.inl rfl), ih (forall_mem_of_forall_mem_cons h)]]
theorem exists_of_erasep {l : list α} {a} (al : a ∈ l) (pa : p a) :
∃ a l₁ l₂, (∀ b ∈ l₁, ¬ p b) ∧ p a ∧ l = l₁ ++ a :: l₂ ∧ l.erasep p = l₁ ++ l₂ :=
begin
induction l with b l IH, {cases al},
by_cases pb : p b,
{ exact ⟨b, [], l, forall_mem_nil _, pb, by simp [pb]⟩ },
{ rcases al with rfl | al, {exact pb.elim pa},
rcases IH al with ⟨c, l₁, l₂, h₁, h₂, h₃, h₄⟩,
exact ⟨c, b::l₁, l₂, forall_mem_cons.2 ⟨pb, h₁⟩,
h₂, by rw h₃; refl, by simp [pb, h₄]⟩ }
end
theorem exists_or_eq_self_of_erasep (p : α → Prop) [decidable_pred p] (l : list α) :
l.erasep p = l ∨ ∃ a l₁ l₂, (∀ b ∈ l₁, ¬ p b) ∧ p a ∧ l = l₁ ++ a :: l₂ ∧ l.erasep p = l₁ ++ l₂ :=
begin
by_cases h : ∃ a ∈ l, p a,
{ rcases h with ⟨a, ha, pa⟩,
exact or.inr (exists_of_erasep ha pa) },
{ simp at h, exact or.inl (erasep_of_forall_not h) }
end
@[simp] theorem length_erasep_of_mem {l : list α} {a} (al : a ∈ l) (pa : p a) :
length (l.erasep p) = pred (length l) :=
by rcases exists_of_erasep al pa with ⟨_, l₁, l₂, _, _, e₁, e₂⟩;
rw e₂; simp [-add_comm, e₁]; refl
@[simp] lemma length_erasep_add_one {l : list α} {a} (al : a ∈ l) (pa : p a) :
(l.erasep p).length + 1 = l.length :=
let ⟨_, l₁, l₂, _, _, h₁, h₂⟩ := exists_of_erasep al pa in
by { rw [h₂, h₁, length_append, length_append], refl }
theorem erasep_append_left {a : α} (pa : p a) :
∀ {l₁ : list α} (l₂), a ∈ l₁ → (l₁++l₂).erasep p = l₁.erasep p ++ l₂
| (x::xs) l₂ h := begin
by_cases h' : p x; simp [h'],
rw erasep_append_left l₂ (mem_of_ne_of_mem (mt _ h') h),
rintro rfl, exact pa
end
theorem erasep_append_right :
∀ {l₁ : list α} (l₂), (∀ b ∈ l₁, ¬ p b) → (l₁++l₂).erasep p = l₁ ++ l₂.erasep p
| [] l₂ h := rfl
| (x::xs) l₂ h := by simp [(forall_mem_cons.1 h).1,
erasep_append_right _ (forall_mem_cons.1 h).2]
theorem erasep_sublist (l : list α) : l.erasep p <+ l :=
by rcases exists_or_eq_self_of_erasep p l with h | ⟨c, l₁, l₂, h₁, h₂, h₃, h₄⟩;
[rw h, {rw [h₄, h₃], simp}]
theorem erasep_subset (l : list α) : l.erasep p ⊆ l :=
(erasep_sublist l).subset
theorem sublist.erasep {l₁ l₂ : list α} (s : l₁ <+ l₂) : l₁.erasep p <+ l₂.erasep p :=
begin
induction s,
case list.sublist.slnil { refl },
case list.sublist.cons : l₁ l₂ a s IH
{ by_cases h : p a; simp [h],
exacts [IH.trans (erasep_sublist _), IH.cons _ _ _] },
case list.sublist.cons2 : l₁ l₂ a s IH
{ by_cases h : p a; simp [h],
exacts [s, IH.cons2 _ _ _] }
end
theorem mem_of_mem_erasep {a : α} {l : list α} : a ∈ l.erasep p → a ∈ l :=
@erasep_subset _ _ _ _ _
@[simp] theorem mem_erasep_of_neg {a : α} {l : list α} (pa : ¬ p a) : a ∈ l.erasep p ↔ a ∈ l :=
⟨mem_of_mem_erasep, λ al, begin
rcases exists_or_eq_self_of_erasep p l with h | ⟨c, l₁, l₂, h₁, h₂, h₃, h₄⟩,
{ rwa h },
{ rw h₄, rw h₃ at al,
have : a ≠ c, {rintro rfl, exact pa.elim h₂},
simpa [this] using al }
end⟩
theorem erasep_map (f : β → α) :
∀ (l : list β), (map f l).erasep p = map f (l.erasep (p ∘ f))
| [] := rfl
| (b::l) := by by_cases p (f b); simp [h, erasep_map l]
@[simp] theorem extractp_eq_find_erasep :
∀ l : list α, extractp p l = (find p l, erasep p l)
| [] := rfl
| (a::l) := by by_cases pa : p a; simp [extractp, pa, extractp_eq_find_erasep l]
end erasep
/-! ### erase -/
section erase
variable [decidable_eq α]
@[simp] theorem erase_nil (a : α) : [].erase a = [] := rfl
theorem erase_cons (a b : α) (l : list α) :
(b :: l).erase a = if b = a then l else b :: l.erase a := rfl
@[simp] theorem erase_cons_head (a : α) (l : list α) : (a :: l).erase a = l :=
by simp only [erase_cons, if_pos rfl]
@[simp] theorem erase_cons_tail {a b : α} (l : list α) (h : b ≠ a) :
(b::l).erase a = b :: l.erase a :=
by simp only [erase_cons, if_neg h]; split; refl
theorem erase_eq_erasep (a : α) (l : list α) : l.erase a = l.erasep (eq a) :=
by { induction l with b l, {refl},
by_cases a = b; [simp [h], simp [h, ne.symm h, *]] }
@[simp, priority 980]
theorem erase_of_not_mem {a : α} {l : list α} (h : a ∉ l) : l.erase a = l :=
by rw [erase_eq_erasep, erasep_of_forall_not]; rintro b h' rfl; exact h h'
theorem exists_erase_eq {a : α} {l : list α} (h : a ∈ l) :
∃ l₁ l₂, a ∉ l₁ ∧ l = l₁ ++ a :: l₂ ∧ l.erase a = l₁ ++ l₂ :=
by rcases exists_of_erasep h rfl with ⟨_, l₁, l₂, h₁, rfl, h₂, h₃⟩;
rw erase_eq_erasep; exact ⟨l₁, l₂, λ h, h₁ _ h rfl, h₂, h₃⟩
@[simp] theorem length_erase_of_mem {a : α} {l : list α} (h : a ∈ l) :
length (l.erase a) = pred (length l) :=
by rw erase_eq_erasep; exact length_erasep_of_mem h rfl
@[simp] lemma length_erase_add_one {a : α} {l : list α} (h : a ∈ l) :
(l.erase a).length + 1 = l.length :=
by rw [erase_eq_erasep, length_erasep_add_one h rfl]
theorem erase_append_left {a : α} {l₁ : list α} (l₂) (h : a ∈ l₁) :
(l₁++l₂).erase a = l₁.erase a ++ l₂ :=
by simp [erase_eq_erasep]; exact erasep_append_left (by refl) l₂ h
theorem erase_append_right {a : α} {l₁ : list α} (l₂) (h : a ∉ l₁) :
(l₁++l₂).erase a = l₁ ++ l₂.erase a :=
by rw [erase_eq_erasep, erase_eq_erasep, erasep_append_right];
rintro b h' rfl; exact h h'
theorem erase_sublist (a : α) (l : list α) : l.erase a <+ l :=
by rw erase_eq_erasep; apply erasep_sublist
theorem erase_subset (a : α) (l : list α) : l.erase a ⊆ l :=
(erase_sublist a l).subset
theorem sublist.erase (a : α) {l₁ l₂ : list α} (h : l₁ <+ l₂) : l₁.erase a <+ l₂.erase a :=
by simp [erase_eq_erasep]; exact sublist.erasep h
theorem mem_of_mem_erase {a b : α} {l : list α} : a ∈ l.erase b → a ∈ l :=
@erase_subset _ _ _ _ _
@[simp] theorem mem_erase_of_ne {a b : α} {l : list α} (ab : a ≠ b) : a ∈ l.erase b ↔ a ∈ l :=
by rw erase_eq_erasep; exact mem_erasep_of_neg ab.symm
theorem erase_comm (a b : α) (l : list α) : (l.erase a).erase b = (l.erase b).erase a :=
if ab : a = b then by rw ab else
if ha : a ∈ l then
if hb : b ∈ l then match l, l.erase a, exists_erase_eq ha, hb with
| ._, ._, ⟨l₁, l₂, ha', rfl, rfl⟩, hb :=
if h₁ : b ∈ l₁ then
by rw [erase_append_left _ h₁, erase_append_left _ h₁,
erase_append_right _ (mt mem_of_mem_erase ha'), erase_cons_head]
else
by rw [erase_append_right _ h₁, erase_append_right _ h₁, erase_append_right _ ha',
erase_cons_tail _ ab, erase_cons_head]
end
else by simp only [erase_of_not_mem hb, erase_of_not_mem (mt mem_of_mem_erase hb)]
else by simp only [erase_of_not_mem ha, erase_of_not_mem (mt mem_of_mem_erase ha)]
theorem map_erase [decidable_eq β] {f : α → β} (finj : injective f) {a : α}
(l : list α) : map f (l.erase a) = (map f l).erase (f a) :=
have this : eq a = eq (f a) ∘ f, { ext b, simp [finj.eq_iff] },
by simp [erase_eq_erasep, erase_eq_erasep, erasep_map, this]
theorem map_foldl_erase [decidable_eq β] {f : α → β} (finj : injective f) {l₁ l₂ : list α} :
map f (foldl list.erase l₁ l₂) = foldl (λ l a, l.erase (f a)) (map f l₁) l₂ :=
by induction l₂ generalizing l₁; [refl,
simp only [foldl_cons, map_erase finj, *]]
end erase
/-! ### diff -/
section diff
variable [decidable_eq α]
@[simp] theorem diff_nil (l : list α) : l.diff [] = l := rfl
@[simp] theorem diff_cons (l₁ l₂ : list α) (a : α) : l₁.diff (a::l₂) = (l₁.erase a).diff l₂ :=
if h : a ∈ l₁ then by simp only [list.diff, if_pos h]
else by simp only [list.diff, if_neg h, erase_of_not_mem h]
lemma diff_cons_right (l₁ l₂ : list α) (a : α) : l₁.diff (a::l₂) = (l₁.diff l₂).erase a :=
begin
induction l₂ with b l₂ ih generalizing l₁ a,
{ simp_rw [diff_cons, diff_nil] },
{ rw [diff_cons, diff_cons, erase_comm, ← diff_cons, ih, ← diff_cons] }
end
lemma diff_erase (l₁ l₂ : list α) (a : α) : (l₁.diff l₂).erase a = (l₁.erase a).diff l₂ :=
by rw [← diff_cons_right, diff_cons]
@[simp] theorem nil_diff (l : list α) : [].diff l = [] :=
by induction l; [refl, simp only [*, diff_cons, erase_of_not_mem (not_mem_nil _)]]
lemma cons_diff (a : α) (l₁ l₂ : list α) :
(a :: l₁).diff l₂ = if a ∈ l₂ then l₁.diff (l₂.erase a) else a :: l₁.diff l₂ :=
begin
induction l₂ with b l₂ ih, { refl },
rcases eq_or_ne a b with rfl|hne,
{ simp },
{ simp only [mem_cons_iff, *, false_or, diff_cons_right],
split_ifs with h₂; simp [diff_erase, list.erase, hne, hne.symm] }
end
lemma cons_diff_of_mem {a : α} {l₂ : list α} (h : a ∈ l₂) (l₁ : list α) :
(a :: l₁).diff l₂ = l₁.diff (l₂.erase a) :=
by rw [cons_diff, if_pos h]
lemma cons_diff_of_not_mem {a : α} {l₂ : list α} (h : a ∉ l₂) (l₁ : list α) :
(a :: l₁).diff l₂ = a :: l₁.diff l₂ :=
by rw [cons_diff, if_neg h]
theorem diff_eq_foldl : ∀ (l₁ l₂ : list α), l₁.diff l₂ = foldl list.erase l₁ l₂
| l₁ [] := rfl
| l₁ (a::l₂) := (diff_cons l₁ l₂ a).trans (diff_eq_foldl _ _)
@[simp] theorem diff_append (l₁ l₂ l₃ : list α) : l₁.diff (l₂ ++ l₃) = (l₁.diff l₂).diff l₃ :=
by simp only [diff_eq_foldl, foldl_append]
@[simp] theorem map_diff [decidable_eq β] {f : α → β} (finj : injective f) {l₁ l₂ : list α} :
map f (l₁.diff l₂) = (map f l₁).diff (map f l₂) :=
by simp only [diff_eq_foldl, foldl_map, map_foldl_erase finj]
theorem diff_sublist : ∀ l₁ l₂ : list α, l₁.diff l₂ <+ l₁
| l₁ [] := sublist.refl _
| l₁ (a::l₂) := calc l₁.diff (a :: l₂) = (l₁.erase a).diff l₂ : diff_cons _ _ _
... <+ l₁.erase a : diff_sublist _ _
... <+ l₁ : list.erase_sublist _ _
theorem diff_subset (l₁ l₂ : list α) : l₁.diff l₂ ⊆ l₁ :=
(diff_sublist _ _).subset
theorem mem_diff_of_mem {a : α} : ∀ {l₁ l₂ : list α}, a ∈ l₁ → a ∉ l₂ → a ∈ l₁.diff l₂
| l₁ [] h₁ h₂ := h₁
| l₁ (b::l₂) h₁ h₂ := by rw diff_cons; exact
mem_diff_of_mem ((mem_erase_of_ne (ne_of_not_mem_cons h₂)).2 h₁) (not_mem_of_not_mem_cons h₂)
theorem sublist.diff_right : ∀ {l₁ l₂ l₃: list α}, l₁ <+ l₂ → l₁.diff l₃ <+ l₂.diff l₃
| l₁ l₂ [] h := h
| l₁ l₂ (a::l₃) h := by simp only
[diff_cons, (h.erase _).diff_right]
theorem erase_diff_erase_sublist_of_sublist {a : α} : ∀ {l₁ l₂ : list α},
l₁ <+ l₂ → (l₂.erase a).diff (l₁.erase a) <+ l₂.diff l₁
| [] l₂ h := erase_sublist _ _
| (b::l₁) l₂ h := if heq : b = a then by simp only [heq, erase_cons_head, diff_cons]
else by simpa only [erase_cons_head, erase_cons_tail _ heq, diff_cons,
erase_comm a b l₂]
using erase_diff_erase_sublist_of_sublist (h.erase b)
end diff
/-! ### enum -/
theorem length_enum_from : ∀ n (l : list α), length (enum_from n l) = length l
| n [] := rfl
| n (a::l) := congr_arg nat.succ (length_enum_from _ _)
theorem length_enum : ∀ (l : list α), length (enum l) = length l := length_enum_from _
@[simp] theorem enum_from_nth : ∀ n (l : list α) m,
nth (enum_from n l) m = (λ a, (n + m, a)) <$> nth l m
| n [] m := rfl
| n (a :: l) 0 := rfl
| n (a :: l) (m+1) := (enum_from_nth (n+1) l m).trans $
by rw [add_right_comm]; refl
@[simp] theorem enum_nth : ∀ (l : list α) n,
nth (enum l) n = (λ a, (n, a)) <$> nth l n :=
by simp only [enum, enum_from_nth, zero_add]; intros; refl
@[simp] theorem enum_from_map_snd : ∀ n (l : list α),
map prod.snd (enum_from n l) = l
| n [] := rfl
| n (a :: l) := congr_arg (cons _) (enum_from_map_snd _ _)
@[simp] theorem enum_map_snd : ∀ (l : list α),
map prod.snd (enum l) = l := enum_from_map_snd _
theorem mem_enum_from {x : α} {i : ℕ} :
∀ {j : ℕ} (xs : list α), (i, x) ∈ xs.enum_from j → j ≤ i ∧ i < j + xs.length ∧ x ∈ xs
| j [] := by simp [enum_from]
| j (y :: ys) :=
suffices i = j ∧ x = y ∨ (i, x) ∈ enum_from (j + 1) ys →
j ≤ i ∧ i < j + (length ys + 1) ∧ (x = y ∨ x ∈ ys),
by simpa [enum_from, mem_enum_from ys],
begin
rintro (h|h),
{ refine ⟨le_of_eq h.1.symm,h.1 ▸ _,or.inl h.2⟩,
apply nat.lt_add_of_pos_right; simp },
{ obtain ⟨hji, hijlen, hmem⟩ := mem_enum_from _ h,
refine ⟨_, _, _⟩,
{ exact le_trans (nat.le_succ _) hji },
{ convert hijlen using 1, ac_refl },
{ simp [hmem] } }
end
section choose
variables (p : α → Prop) [decidable_pred p] (l : list α)
lemma choose_spec (hp : ∃ a, a ∈ l ∧ p a) : choose p l hp ∈ l ∧ p (choose p l hp) :=
(choose_x p l hp).property
lemma choose_mem (hp : ∃ a, a ∈ l ∧ p a) : choose p l hp ∈ l := (choose_spec _ _ _).1
lemma choose_property (hp : ∃ a, a ∈ l ∧ p a) : p (choose p l hp) := (choose_spec _ _ _).2
end choose
/-! ### map₂_left' -/
section map₂_left'
-- The definitional equalities for `map₂_left'` can already be used by the
-- simplifie because `map₂_left'` is marked `@[simp]`.
@[simp] theorem map₂_left'_nil_right (f : α → option β → γ) (as) :
map₂_left' f as [] = (as.map (λ a, f a none), []) :=
by cases as; refl
end map₂_left'
/-! ### map₂_right' -/
section map₂_right'
variables (f : option α → β → γ) (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem map₂_right'_nil_left :
map₂_right' f [] bs = (bs.map (f none), []) :=
by cases bs; refl
@[simp] theorem map₂_right'_nil_right :
map₂_right' f as [] = ([], as) :=
rfl
@[simp] theorem map₂_right'_nil_cons :
map₂_right' f [] (b :: bs) = (f none b :: bs.map (f none), []) :=
rfl
@[simp] theorem map₂_right'_cons_cons :
map₂_right' f (a :: as) (b :: bs) =
let rec := map₂_right' f as bs in
(f (some a) b :: rec.fst, rec.snd) :=
rfl
end map₂_right'
/-! ### zip_left' -/
section zip_left'
variables (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem zip_left'_nil_right :
zip_left' as ([] : list β) = (as.map (λ a, (a, none)), []) :=
by cases as; refl
@[simp] theorem zip_left'_nil_left :
zip_left' ([] : list α) bs = ([], bs) :=
rfl
@[simp] theorem zip_left'_cons_nil :
zip_left' (a :: as) ([] : list β) = ((a, none) :: as.map (λ a, (a, none)), []) :=
rfl
@[simp] theorem zip_left'_cons_cons :
zip_left' (a :: as) (b :: bs) =
let rec := zip_left' as bs in
((a, some b) :: rec.fst, rec.snd) :=
rfl
end zip_left'
/-! ### zip_right' -/
section zip_right'
variables (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem zip_right'_nil_left :
zip_right' ([] : list α) bs = (bs.map (λ b, (none, b)), []) :=
by cases bs; refl
@[simp] theorem zip_right'_nil_right :
zip_right' as ([] : list β) = ([], as) :=
rfl
@[simp] theorem zip_right'_nil_cons :
zip_right' ([] : list α) (b :: bs) = ((none, b) :: bs.map (λ b, (none, b)), []) :=
rfl
@[simp] theorem zip_right'_cons_cons :
zip_right' (a :: as) (b :: bs) =
let rec := zip_right' as bs in
((some a, b) :: rec.fst, rec.snd) :=
rfl
end zip_right'
/-! ### map₂_left -/
section map₂_left
variables (f : α → option β → γ) (as : list α)
-- The definitional equalities for `map₂_left` can already be used by the
-- simplifier because `map₂_left` is marked `@[simp]`.
@[simp] theorem map₂_left_nil_right :
map₂_left f as [] = as.map (λ a, f a none) :=
by cases as; refl
theorem map₂_left_eq_map₂_left' : ∀ as bs,
map₂_left f as bs = (map₂_left' f as bs).fst
| [] bs := by simp!
| (a :: as) [] := by simp!
| (a :: as) (b :: bs) := by simp! [*]
theorem map₂_left_eq_map₂ : ∀ as bs,
length as ≤ length bs →
map₂_left f as bs = map₂ (λ a b, f a (some b)) as bs
| [] [] h := by simp!
| [] (b :: bs) h := by simp!
| (a :: as) [] h := by { simp at h, contradiction }
| (a :: as) (b :: bs) h := by { simp at h, simp! [*] }
end map₂_left
/-! ### map₂_right -/
section map₂_right
variables (f : option α → β → γ) (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem map₂_right_nil_left :
map₂_right f [] bs = bs.map (f none) :=
by cases bs; refl
@[simp] theorem map₂_right_nil_right :
map₂_right f as [] = [] :=
rfl
@[simp] theorem map₂_right_nil_cons :
map₂_right f [] (b :: bs) = f none b :: bs.map (f none) :=
rfl
@[simp] theorem map₂_right_cons_cons :
map₂_right f (a :: as) (b :: bs) = f (some a) b :: map₂_right f as bs :=
rfl
theorem map₂_right_eq_map₂_right' :
map₂_right f as bs = (map₂_right' f as bs).fst :=
by simp only [map₂_right, map₂_right', map₂_left_eq_map₂_left']
theorem map₂_right_eq_map₂ (h : length bs ≤ length as) :
map₂_right f as bs = map₂ (λ a b, f (some a) b) as bs :=
begin
have : (λ a b, flip f a (some b)) = (flip (λ a b, f (some a) b)) := rfl,
simp only [map₂_right, map₂_left_eq_map₂, map₂_flip, *]
end
end map₂_right
/-! ### zip_left -/
section zip_left
variables (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem zip_left_nil_right :
zip_left as ([] : list β) = as.map (λ a, (a, none)) :=
by cases as; refl
@[simp] theorem zip_left_nil_left :
zip_left ([] : list α) bs = [] :=
rfl
@[simp] theorem zip_left_cons_nil :
zip_left (a :: as) ([] : list β) = (a, none) :: as.map (λ a, (a, none)) :=
rfl
@[simp] theorem zip_left_cons_cons :
zip_left (a :: as) (b :: bs) = (a, some b) :: zip_left as bs :=
rfl
theorem zip_left_eq_zip_left' :
zip_left as bs = (zip_left' as bs).fst :=
by simp only [zip_left, zip_left', map₂_left_eq_map₂_left']
end zip_left
/-! ### zip_right -/
section zip_right
variables (a : α) (as : list α) (b : β) (bs : list β)
@[simp] theorem zip_right_nil_left :
zip_right ([] : list α) bs = bs.map (λ b, (none, b)) :=
by cases bs; refl
@[simp] theorem zip_right_nil_right :
zip_right as ([] : list β) = [] :=
rfl
@[simp] theorem zip_right_nil_cons :
zip_right ([] : list α) (b :: bs) = (none, b) :: bs.map (λ b, (none, b)) :=
rfl
@[simp] theorem zip_right_cons_cons :
zip_right (a :: as) (b :: bs) = (some a, b) :: zip_right as bs :=
rfl
theorem zip_right_eq_zip_right' :
zip_right as bs = (zip_right' as bs).fst :=
by simp only [zip_right, zip_right', map₂_right_eq_map₂_right']
end zip_right
/-! ### to_chunks -/
section to_chunks
@[simp] theorem to_chunks_nil (n) : @to_chunks α n [] = [] := by cases n; refl
theorem to_chunks_aux_eq (n) : ∀ xs i,
@to_chunks_aux α n xs i = (xs.take i, (xs.drop i).to_chunks (n+1))
| [] i := by cases i; refl
| (x::xs) 0 := by rw [to_chunks_aux, drop, to_chunks]; cases to_chunks_aux n xs n; refl
| (x::xs) (i+1) := by rw [to_chunks_aux, to_chunks_aux_eq]; refl
theorem to_chunks_eq_cons' (n) : ∀ {xs : list α} (h : xs ≠ []),
xs.to_chunks (n+1) = xs.take (n+1) :: (xs.drop (n+1)).to_chunks (n+1)
| [] e := (e rfl).elim
| (x::xs) _ := by rw [to_chunks, to_chunks_aux_eq]; refl
theorem to_chunks_eq_cons : ∀ {n} {xs : list α} (n0 : n ≠ 0) (x0 : xs ≠ []),
xs.to_chunks n = xs.take n :: (xs.drop n).to_chunks n
| 0 _ e := (e rfl).elim
| (n+1) xs _ := to_chunks_eq_cons' _
theorem to_chunks_aux_join {n} : ∀ {xs i l L}, @to_chunks_aux α n xs i = (l, L) → l ++ L.join = xs
| [] _ _ _ rfl := rfl
| (x::xs) i l L e := begin
cases i; [
cases e' : to_chunks_aux n xs n with l L,
cases e' : to_chunks_aux n xs i with l L];
{ rw [to_chunks_aux, e', to_chunks_aux] at e, cases e,
exact (congr_arg (cons x) (to_chunks_aux_join e') : _) }
end
@[simp] theorem to_chunks_join : ∀ n xs, (@to_chunks α n xs).join = xs
| n [] := by cases n; refl
| 0 (x::xs) := by simp only [to_chunks, join]; rw append_nil
| (n+1) (x::xs) := begin
rw to_chunks,
cases e : to_chunks_aux n xs n with l L,
exact (congr_arg (cons x) (to_chunks_aux_join e) : _),
end
theorem to_chunks_length_le : ∀ n xs, n ≠ 0 → ∀ l : list α,
l ∈ @to_chunks α n xs → l.length ≤ n
| 0 _ e _ := (e rfl).elim
| (n+1) xs _ l := begin
refine (measure_wf length).induction xs _, intros xs IH h,
by_cases x0 : xs = [], {subst xs, cases h},
rw to_chunks_eq_cons' _ x0 at h, rcases h with rfl|h,
{ apply length_take_le },
{ refine IH _ _ h,
simp only [measure, inv_image, length_drop],
exact tsub_lt_self (length_pos_iff_ne_nil.2 x0) (succ_pos _) },
end
end to_chunks
/-! ### all₂ -/
section all₂
variables {p q : α → Prop} {l : list α}
@[simp] lemma all₂_cons (p : α → Prop) (x : α) : ∀ (l : list α), all₂ p (x :: l) ↔ p x ∧ all₂ p l
| [] := (and_true _).symm
| (x :: l) := iff.rfl
lemma all₂_iff_forall : ∀ {l : list α}, all₂ p l ↔ ∀ x ∈ l, p x
| [] := (iff_true_intro $ ball_nil _).symm
| (x :: l) := by rw [ball_cons, all₂_cons, all₂_iff_forall]
lemma all₂.imp (h : ∀ x, p x → q x) : ∀ {l : list α}, all₂ p l → all₂ q l
| [] := id
| (x :: l) := by simpa using and.imp (h x) all₂.imp
@[simp] lemma all₂_map_iff {p : β → Prop} (f : α → β) : all₂ p (l.map f) ↔ all₂ (p ∘ f) l :=
by induction l; simp *
instance (p : α → Prop) [decidable_pred p] : decidable_pred (all₂ p) :=
λ l, decidable_of_iff' _ all₂_iff_forall
end all₂
/-! ### Retroattributes
The list definitions happen earlier than `to_additive`, so here we tag the few multiplicative
definitions that couldn't be tagged earlier.
-/
attribute [to_additive] list.prod -- `list.sum`
attribute [to_additive] alternating_prod -- `list.alternating_sum`
/-! ### Miscellaneous lemmas -/
theorem ilast'_mem : ∀ a l, @ilast' α a l ∈ a :: l
| a [] := or.inl rfl
| a (b::l) := or.inr (ilast'_mem b l)
@[simp] lemma nth_le_attach (L : list α) (i) (H : i < L.attach.length) :
(L.attach.nth_le i H).1 = L.nth_le i (length_attach L ▸ H) :=
calc (L.attach.nth_le i H).1
= (L.attach.map subtype.val).nth_le i (by simpa using H) : by rw nth_le_map'
... = L.nth_le i _ : by congr; apply attach_map_val
@[simp]
theorem mem_map_swap (x : α) (y : β) (xs : list (α × β)) :
(y, x) ∈ map prod.swap xs ↔ (x, y) ∈ xs :=
begin
induction xs with x xs,
{ simp only [not_mem_nil, map_nil] },
{ cases x with a b,
simp only [mem_cons_iff, prod.mk.inj_iff, map, prod.swap_prod_mk,
prod.exists, xs_ih, and_comm] },
end
lemma slice_eq (xs : list α) (n m : ℕ) :
slice n m xs = xs.take n ++ xs.drop (n+m) :=
begin
induction n generalizing xs,
{ simp [slice] },
{ cases xs; simp [slice, *, nat.succ_add], }
end
lemma sizeof_slice_lt [has_sizeof α] (i j : ℕ) (hj : 0 < j) (xs : list α) (hi : i < xs.length) :
sizeof (list.slice i j xs) < sizeof xs :=
begin
induction xs generalizing i j,
case list.nil : i j h
{ cases hi },
case list.cons : x xs xs_ih i j h
{ cases i; simp only [-slice_eq, list.slice],
{ cases j, cases h,
dsimp only [drop], unfold_wf,
apply @lt_of_le_of_lt _ _ _ xs.sizeof,
{ clear_except,
induction xs generalizing j; unfold_wf,
case list.nil : j
{ refl },
case list.cons : xs_hd xs_tl xs_ih j
{ cases j; unfold_wf, refl,
transitivity, apply xs_ih,
simp }, },
unfold_wf, apply zero_lt_one_add, },
{ unfold_wf, apply xs_ih _ _ h,
apply lt_of_succ_lt_succ hi, } },
end
end list
|
aec2507e489819cc9dba24227aab6bda941ae131
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/field_theory/perfect_closure_auto.lean
|
660fcaa26b80a04ab48d01c326c5165d8dc20243
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 15,232
|
lean
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Yury Kudryashov
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.char_p.basic
import Mathlib.data.equiv.ring
import Mathlib.algebra.group_with_zero.power
import Mathlib.algebra.iterate_hom
import Mathlib.PostPort
universes u l v u_1
namespace Mathlib
/-!
# The perfect closure of a field
-/
/-- A perfect ring is a ring of characteristic p that has p-th root. -/
class perfect_ring (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] where
pth_root' : R → R
frobenius_pth_root' : ∀ (x : R), coe_fn (frobenius R p) (pth_root' x) = x
pth_root_frobenius' : ∀ (x : R), pth_root' (coe_fn (frobenius R p) x) = x
/-- Frobenius automorphism of a perfect ring. -/
def frobenius_equiv (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p]
[perfect_ring R p] : R ≃+* R :=
ring_equiv.mk (ring_hom.to_fun (frobenius R p)) (perfect_ring.pth_root' p)
perfect_ring.pth_root_frobenius' perfect_ring.frobenius_pth_root' sorry sorry
/-- `p`-th root of an element in a `perfect_ring` as a `ring_hom`. -/
def pth_root (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p]
[perfect_ring R p] : R →+* R :=
↑(ring_equiv.symm (frobenius_equiv R p))
@[simp] theorem coe_frobenius_equiv {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] : ⇑(frobenius_equiv R p) = ⇑(frobenius R p) :=
rfl
@[simp] theorem coe_frobenius_equiv_symm {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] : ⇑(ring_equiv.symm (frobenius_equiv R p)) = ⇑(pth_root R p) :=
rfl
@[simp] theorem frobenius_pth_root {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] (x : R) :
coe_fn (frobenius R p) (coe_fn (pth_root R p) x) = x :=
ring_equiv.apply_symm_apply (frobenius_equiv R p) x
@[simp] theorem pth_root_pow_p {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] (x : R) : coe_fn (pth_root R p) x ^ p = x :=
frobenius_pth_root x
@[simp] theorem pth_root_frobenius {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] (x : R) :
coe_fn (pth_root R p) (coe_fn (frobenius R p) x) = x :=
ring_equiv.symm_apply_apply (frobenius_equiv R p) x
@[simp] theorem pth_root_pow_p' {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] (x : R) : coe_fn (pth_root R p) (x ^ p) = x :=
pth_root_frobenius x
theorem left_inverse_pth_root_frobenius {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] : function.left_inverse ⇑(pth_root R p) ⇑(frobenius R p) :=
pth_root_frobenius
theorem right_inverse_pth_root_frobenius {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] : function.right_inverse ⇑(pth_root R p) ⇑(frobenius R p) :=
frobenius_pth_root
theorem commute_frobenius_pth_root {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)]
[char_p R p] [perfect_ring R p] : function.commute ⇑(frobenius R p) ⇑(pth_root R p) :=
fun (x : R) => Eq.trans (frobenius_pth_root x) (Eq.symm (pth_root_frobenius x))
theorem eq_pth_root_iff {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)] [char_p R p]
[perfect_ring R p] {x : R} {y : R} :
x = coe_fn (pth_root R p) y ↔ coe_fn (frobenius R p) x = y :=
equiv.eq_symm_apply (ring_equiv.to_equiv (frobenius_equiv R p))
theorem pth_root_eq_iff {R : Type u} [comm_semiring R] {p : ℕ} [fact (nat.prime p)] [char_p R p]
[perfect_ring R p] {x : R} {y : R} :
coe_fn (pth_root R p) x = y ↔ x = coe_fn (frobenius R p) y :=
equiv.symm_apply_eq (ring_equiv.to_equiv (frobenius_equiv R p))
theorem monoid_hom.map_pth_root {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S]
(f : R →* S) {p : ℕ} [fact (nat.prime p)] [char_p R p] [perfect_ring R p] [char_p S p]
[perfect_ring S p] (x : R) :
coe_fn f (coe_fn (pth_root R p) x) = coe_fn (pth_root S p) (coe_fn f x) :=
sorry
theorem monoid_hom.map_iterate_pth_root {R : Type u} [comm_semiring R] {S : Type v}
[comm_semiring S] (f : R →* S) {p : ℕ} [fact (nat.prime p)] [char_p R p] [perfect_ring R p]
[char_p S p] [perfect_ring S p] (x : R) (n : ℕ) :
coe_fn f (nat.iterate (⇑(pth_root R p)) n x) = nat.iterate (⇑(pth_root S p)) n (coe_fn f x) :=
function.semiconj.iterate_right (monoid_hom.map_pth_root f) n x
theorem ring_hom.map_pth_root {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S]
(g : R →+* S) {p : ℕ} [fact (nat.prime p)] [char_p R p] [perfect_ring R p] [char_p S p]
[perfect_ring S p] (x : R) :
coe_fn g (coe_fn (pth_root R p) x) = coe_fn (pth_root S p) (coe_fn g x) :=
monoid_hom.map_pth_root (ring_hom.to_monoid_hom g) x
theorem ring_hom.map_iterate_pth_root {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S]
(g : R →+* S) {p : ℕ} [fact (nat.prime p)] [char_p R p] [perfect_ring R p] [char_p S p]
[perfect_ring S p] (x : R) (n : ℕ) :
coe_fn g (nat.iterate (⇑(pth_root R p)) n x) = nat.iterate (⇑(pth_root S p)) n (coe_fn g x) :=
monoid_hom.map_iterate_pth_root (ring_hom.to_monoid_hom g) x n
theorem injective_pow_p {R : Type u} [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p]
[perfect_ring R p] {x : R} {y : R} (hxy : x ^ p = y ^ p) : x = y :=
function.left_inverse.injective left_inverse_pth_root_frobenius hxy
/-- `perfect_closure K p` is the quotient by this relation. -/
inductive perfect_closure.r (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
ℕ × K → ℕ × K → Prop
where
| intro : ∀ (n : ℕ) (x : K), perfect_closure.r K p (n, x) (n + 1, coe_fn (frobenius K p) x)
/-- The perfect closure is the smallest extension that makes frobenius surjective. -/
def perfect_closure (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :=
Quot sorry
namespace perfect_closure
/-- Constructor for `perfect_closure`. -/
def mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : ℕ × K) :
perfect_closure K p :=
Quot.mk (r K p) x
@[simp] theorem quot_mk_eq_mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) : Quot.mk (r K p) x = mk K p x :=
rfl
/-- Lift a function `ℕ × K → L` to a function on `perfect_closure K p`. -/
def lift_on {K : Type u} [comm_ring K] {p : ℕ} [fact (nat.prime p)] [char_p K p] {L : Type u_1}
(x : perfect_closure K p) (f : ℕ × K → L) (hf : ∀ (x y : ℕ × K), r K p x y → f x = f y) : L :=
quot.lift_on x f hf
@[simp] theorem lift_on_mk {K : Type u} [comm_ring K] {p : ℕ} [fact (nat.prime p)] [char_p K p]
{L : Type u_1} (f : ℕ × K → L) (hf : ∀ (x y : ℕ × K), r K p x y → f x = f y) (x : ℕ × K) :
lift_on (mk K p x) f hf = f x :=
rfl
theorem induction_on {K : Type u} [comm_ring K] {p : ℕ} [fact (nat.prime p)] [char_p K p]
(x : perfect_closure K p) {q : perfect_closure K p → Prop} (h : ∀ (x : ℕ × K), q (mk K p x)) :
q x :=
quot.induction_on x h
protected instance has_mul (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
Mul (perfect_closure K p) :=
{ mul :=
Quot.lift
(fun (x : ℕ × K) =>
Quot.lift
(fun (y : ℕ × K) =>
mk K p
(prod.fst x + prod.fst y,
nat.iterate (⇑(frobenius K p)) (prod.fst y) (prod.snd x) *
nat.iterate (⇑(frobenius K p)) (prod.fst x) (prod.snd y)))
(mul_aux_right K p x))
sorry }
@[simp] theorem mk_mul_mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) (y : ℕ × K) :
mk K p x * mk K p y =
mk K p
(prod.fst x + prod.fst y,
nat.iterate (⇑(frobenius K p)) (prod.fst y) (prod.snd x) *
nat.iterate (⇑(frobenius K p)) (prod.fst x) (prod.snd y)) :=
rfl
protected instance comm_monoid (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)]
[char_p K p] : comm_monoid (perfect_closure K p) :=
comm_monoid.mk Mul.mul sorry (mk K p (0, 1)) sorry sorry sorry
theorem one_def (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
1 = mk K p (0, 1) :=
rfl
protected instance inhabited (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
Inhabited (perfect_closure K p) :=
{ default := 1 }
protected instance has_add (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
Add (perfect_closure K p) :=
{ add :=
Quot.lift
(fun (x : ℕ × K) =>
Quot.lift
(fun (y : ℕ × K) =>
mk K p
(prod.fst x + prod.fst y,
nat.iterate (⇑(frobenius K p)) (prod.fst y) (prod.snd x) +
nat.iterate (⇑(frobenius K p)) (prod.fst x) (prod.snd y)))
(add_aux_right K p x))
sorry }
@[simp] theorem mk_add_mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) (y : ℕ × K) :
mk K p x + mk K p y =
mk K p
(prod.fst x + prod.fst y,
nat.iterate (⇑(frobenius K p)) (prod.fst y) (prod.snd x) +
nat.iterate (⇑(frobenius K p)) (prod.fst x) (prod.snd y)) :=
rfl
protected instance has_neg (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
Neg (perfect_closure K p) :=
{ neg := Quot.lift (fun (x : ℕ × K) => mk K p (prod.fst x, -prod.snd x)) sorry }
@[simp] theorem neg_mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) : -mk K p x = mk K p (prod.fst x, -prod.snd x) :=
rfl
protected instance has_zero (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
HasZero (perfect_closure K p) :=
{ zero := mk K p (0, 0) }
theorem zero_def (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
0 = mk K p (0, 0) :=
rfl
theorem mk_zero (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (n : ℕ) :
mk K p (n, 0) = 0 :=
sorry
theorem r.sound (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (m : ℕ) (n : ℕ)
(x : K) (y : K) (H : nat.iterate (⇑(frobenius K p)) m x = y) :
mk K p (n, x) = mk K p (m + n, y) :=
sorry
protected instance comm_ring (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
comm_ring (perfect_closure K p) :=
comm_ring.mk Add.add sorry 0 sorry sorry Neg.neg
(ring.sub._default Add.add sorry 0 sorry sorry Neg.neg) sorry sorry comm_monoid.mul sorry
comm_monoid.one sorry sorry sorry sorry sorry
theorem eq_iff' (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : ℕ × K)
(y : ℕ × K) :
mk K p x = mk K p y ↔
∃ (z : ℕ),
nat.iterate (⇑(frobenius K p)) (prod.fst y + z) (prod.snd x) =
nat.iterate (⇑(frobenius K p)) (prod.fst x + z) (prod.snd y) :=
sorry
theorem nat_cast (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (n : ℕ)
(x : ℕ) : ↑x = mk K p (n, ↑x) :=
sorry
theorem int_cast (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : ℤ) :
↑x = mk K p (0, ↑x) :=
sorry
theorem nat_cast_eq_iff (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : ℕ)
(y : ℕ) : ↑x = ↑y ↔ ↑x = ↑y :=
sorry
protected instance char_p (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
char_p (perfect_closure K p) p :=
char_p.mk
fun (x : ℕ) =>
eq.mpr
(id
(Eq._oldrec (Eq.refl (↑x = 0 ↔ p ∣ x))
(Eq.symm (propext (char_p.cast_eq_zero_iff K p x)))))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑x = 0 ↔ ↑x = 0)) (Eq.symm nat.cast_zero)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑x = ↑0 ↔ ↑x = 0)) (propext (nat_cast_eq_iff K p x 0))))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑x = ↑0 ↔ ↑x = 0)) nat.cast_zero))
(iff.refl (↑x = 0)))))
theorem frobenius_mk (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) :
coe_fn (frobenius (perfect_closure K p) p) (mk K p x) = mk K p (prod.fst x, prod.snd x ^ p) :=
sorry
/-- Embedding of `K` into `perfect_closure K p` -/
def of (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
K →+* perfect_closure K p :=
ring_hom.mk (fun (x : K) => mk K p (0, x)) sorry sorry sorry sorry
theorem of_apply (K : Type u) [comm_ring K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : K) :
coe_fn (of K p) x = mk K p (0, x) :=
rfl
theorem eq_iff (K : Type u) [integral_domain K] (p : ℕ) [fact (nat.prime p)] [char_p K p]
(x : ℕ × K) (y : ℕ × K) :
Quot.mk (r K p) x = Quot.mk (r K p) y ↔
nat.iterate (⇑(frobenius K p)) (prod.fst y) (prod.snd x) =
nat.iterate (⇑(frobenius K p)) (prod.fst x) (prod.snd y) :=
sorry
protected instance has_inv (K : Type u) [field K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
has_inv (perfect_closure K p) :=
has_inv.mk (Quot.lift (fun (x : ℕ × K) => Quot.mk (r K p) (prod.fst x, prod.snd x⁻¹)) sorry)
protected instance field (K : Type u) [field K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
field (perfect_closure K p) :=
field.mk comm_ring.add sorry comm_ring.zero sorry sorry comm_ring.neg comm_ring.sub sorry sorry
comm_ring.mul sorry comm_ring.one sorry sorry sorry sorry sorry has_inv.inv sorry sorry sorry
protected instance perfect_ring (K : Type u) [field K] (p : ℕ) [fact (nat.prime p)] [char_p K p] :
perfect_ring (perfect_closure K p) p :=
perfect_ring.mk
(fun (e : perfect_closure K p) =>
lift_on e (fun (x : ℕ × K) => mk K p (prod.fst x + 1, prod.snd x)) sorry)
sorry sorry
theorem eq_pth_root (K : Type u) [field K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (x : ℕ × K) :
mk K p x =
nat.iterate (⇑(pth_root (perfect_closure K p) p)) (prod.fst x)
(coe_fn (of K p) (prod.snd x)) :=
sorry
/-- Given a field `K` of characteristic `p` and a perfect ring `L` of the same characteristic,
any homomorphism `K →+* L` can be lifted to `perfect_closure K p`. -/
def lift (K : Type u) [field K] (p : ℕ) [fact (nat.prime p)] [char_p K p] (L : Type v)
[comm_semiring L] [char_p L p] [perfect_ring L p] : (K →+* L) ≃ (perfect_closure K p →+* L) :=
equiv.mk
(fun (f : K →+* L) =>
ring_hom.mk
(fun (e : perfect_closure K p) =>
lift_on e
(fun (x : ℕ × K) => nat.iterate (⇑(pth_root L p)) (prod.fst x) (coe_fn f (prod.snd x)))
sorry)
sorry sorry sorry sorry)
(fun (f : perfect_closure K p →+* L) => ring_hom.comp f (of K p)) sorry sorry
end Mathlib
|
4626e3e6390628909a6fb02cab7416aa821e15ee
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/analysis/calculus/implicit.lean
|
80704de48ffb0e07827aef953dd55763867fcbaa
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 20,250
|
lean
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import analysis.calculus.inverse
import analysis.normed_space.complemented
/-!
# Implicit function theorem
We prove three versions of the implicit function theorem. First we define a structure
`implicit_function_data` that holds arguments for the most general version of the implicit function
theorem, see `implicit_function_data.implicit_function`
and `implicit_function_data.to_implicit_function`. This version allows a user to choose
a specific implicit function but provides only a little convenience over the inverse function
theorem.
Then we define `implicit_function_of_complemented`: implicit function defined by `f (g z y) = z`,
where `f : E → F` is a function strictly differentiable at `a` such that its derivative `f'`
is surjective and has a `complemented` kernel.
Finally, if the codomain of `f` is a finite dimensional space, then we can automatically prove
that the kernel of `f'` is complemented, hence the only assumptions are `has_strict_fderiv_at`
and `f'.range = ⊤`. This version is named `implicit_function`.
## TODO
* Add a version for a function `f : E × F → G` such that $$\frac{\partial f}{\partial y}$$ is
invertible.
* Add a version for `f : 𝕜 × 𝕜 → 𝕜` proving `has_strict_deriv_at` and `deriv φ = ...`.
* Prove that in a real vector space the implicit function has the same smoothness as the original
one.
* If the original function is differentiable in a neighborhood, then the implicit function is
differentiable in a neighborhood as well. Current setup only proves differentiability at one
point for the implicit function constructed in this file (as opposed to an unspecified implicit
function). One of the ways to overcome this difficulty is to use uniqueness of the implicit
function in the general version of the theorem. Another way is to prove that *any* implicit
function satisfying some predicate is strictly differentiable.
## Tags
implicit function, inverse function
-/
noncomputable theory
open_locale topological_space
open filter
open continuous_linear_map (fst snd smul_right ker_prod)
open continuous_linear_equiv (of_bijective)
/-!
### General version
Consider two functions `f : E → F` and `g : E → G` and a point `a` such that
* both functions are strictly differentiable at `a`;
* the derivatives are surjective;
* the kernels of the derivatives are complementary subspaces of `E`.
Note that the map `x ↦ (f x, g x)` has a bijective derivative, hence it is a local homeomorphism
between `E` and `F × G`. We use this fact to define a function `φ : F → G → E`
(see `implicit_function_data.implicit_function`) such that for `(y, z)` close enough to `(f a, g a)`
we have `f (φ y z) = y` and `g (φ y z) = z`.
We also prove a formula for $$\frac{\partial\varphi}{\partial z}.$$
Though this statement is almost symmetric with respect to `F`, `G`, we interpret it in the following
way. Consider a family of surfaces `{x | f x = y}`, `y ∈ 𝓝 (f a)`. Each of these surfaces is
parametrized by `φ y`.
There are many ways to choose a (differentiable) function `φ` such that `f (φ y z) = y` but the
extra condition `g (φ y z) = z` allows a user to select one of these functions. If we imagine
that the level surfaces `f = const` form a local horizontal foliation, then the choice of
`g` fixes a transverse foliation `g = const`, and `φ` is the inverse function of the projection
of `{x | f x = y}` along this transverse foliation.
This version of the theorem is used to prove the other versions and can be used if a user
needs to have a complete control over the choice of the implicit function.
-/
/-- Data for the general version of the implicit function theorem. It holds two functions
`f : E → F` and `g : E → G` (named `left_fun` and `right_fun`) and a point `a` (named `pt`)
such that
* both functions are strictly differentiable at `a`;
* the derivatives are surjective;
* the kernels of the derivatives are complementary subspaces of `E`. -/
@[nolint has_nonempty_instance]
structure implicit_function_data (𝕜 : Type*) [nontrivially_normed_field 𝕜]
(E : Type*) [normed_add_comm_group E] [normed_space 𝕜 E] [complete_space E]
(F : Type*) [normed_add_comm_group F] [normed_space 𝕜 F] [complete_space F]
(G : Type*) [normed_add_comm_group G] [normed_space 𝕜 G] [complete_space G] :=
(left_fun : E → F)
(left_deriv : E →L[𝕜] F)
(right_fun : E → G)
(right_deriv : E →L[𝕜] G)
(pt : E)
(left_has_deriv : has_strict_fderiv_at left_fun left_deriv pt)
(right_has_deriv : has_strict_fderiv_at right_fun right_deriv pt)
(left_range : left_deriv.range = ⊤)
(right_range : right_deriv.range = ⊤)
(is_compl_ker : is_compl left_deriv.ker right_deriv.ker)
namespace implicit_function_data
variables {𝕜 : Type*} [nontrivially_normed_field 𝕜]
{E : Type*} [normed_add_comm_group E] [normed_space 𝕜 E] [complete_space E]
{F : Type*} [normed_add_comm_group F] [normed_space 𝕜 F] [complete_space F]
{G : Type*} [normed_add_comm_group G] [normed_space 𝕜 G] [complete_space G]
(φ : implicit_function_data 𝕜 E F G)
/-- The function given by `x ↦ (left_fun x, right_fun x)`. -/
def prod_fun (x : E) : F × G := (φ.left_fun x, φ.right_fun x)
@[simp] lemma prod_fun_apply (x : E) : φ.prod_fun x = (φ.left_fun x, φ.right_fun x) := rfl
protected lemma has_strict_fderiv_at :
has_strict_fderiv_at φ.prod_fun
(φ.left_deriv.equiv_prod_of_surjective_of_is_compl φ.right_deriv φ.left_range φ.right_range
φ.is_compl_ker : E →L[𝕜] F × G) φ.pt :=
φ.left_has_deriv.prod φ.right_has_deriv
/-- Implicit function theorem. If `f : E → F` and `g : E → G` are two maps strictly differentiable
at `a`, their derivatives `f'`, `g'` are surjective, and the kernels of these derivatives are
complementary subspaces of `E`, then `x ↦ (f x, g x)` defines a local homeomorphism between
`E` and `F × G`. In particular, `{x | f x = f a}` is locally homeomorphic to `G`. -/
def to_local_homeomorph : local_homeomorph E (F × G) :=
φ.has_strict_fderiv_at.to_local_homeomorph _
/-- Implicit function theorem. If `f : E → F` and `g : E → G` are two maps strictly differentiable
at `a`, their derivatives `f'`, `g'` are surjective, and the kernels of these derivatives are
complementary subspaces of `E`, then `implicit_function_of_is_compl_ker` is the unique (germ of a)
map `φ : F → G → E` such that `f (φ y z) = y` and `g (φ y z) = z`. -/
def implicit_function : F → G → E := function.curry $ φ.to_local_homeomorph.symm
@[simp] lemma to_local_homeomorph_coe : ⇑(φ.to_local_homeomorph) = φ.prod_fun := rfl
lemma to_local_homeomorph_apply (x : E) :
φ.to_local_homeomorph x = (φ.left_fun x, φ.right_fun x) :=
rfl
lemma pt_mem_to_local_homeomorph_source :
φ.pt ∈ φ.to_local_homeomorph.source :=
φ.has_strict_fderiv_at.mem_to_local_homeomorph_source
lemma map_pt_mem_to_local_homeomorph_target :
(φ.left_fun φ.pt, φ.right_fun φ.pt) ∈ φ.to_local_homeomorph.target :=
φ.to_local_homeomorph.map_source $ φ.pt_mem_to_local_homeomorph_source
lemma prod_map_implicit_function :
∀ᶠ (p : F × G) in 𝓝 (φ.prod_fun φ.pt), φ.prod_fun (φ.implicit_function p.1 p.2) = p :=
φ.has_strict_fderiv_at.eventually_right_inverse.mono $ λ ⟨z, y⟩ h, h
lemma left_map_implicit_function :
∀ᶠ (p : F × G) in 𝓝 (φ.prod_fun φ.pt), φ.left_fun (φ.implicit_function p.1 p.2) = p.1 :=
φ.prod_map_implicit_function.mono $ λ z, congr_arg prod.fst
lemma right_map_implicit_function :
∀ᶠ (p : F × G) in 𝓝 (φ.prod_fun φ.pt), φ.right_fun (φ.implicit_function p.1 p.2) = p.2 :=
φ.prod_map_implicit_function.mono $ λ z, congr_arg prod.snd
lemma implicit_function_apply_image :
∀ᶠ x in 𝓝 φ.pt, φ.implicit_function (φ.left_fun x) (φ.right_fun x) = x :=
φ.has_strict_fderiv_at.eventually_left_inverse
lemma map_nhds_eq : map φ.left_fun (𝓝 φ.pt) = 𝓝 (φ.left_fun φ.pt) :=
show map (prod.fst ∘ φ.prod_fun) (𝓝 φ.pt) = 𝓝 (φ.prod_fun φ.pt).1,
by rw [← map_map, φ.has_strict_fderiv_at.map_nhds_eq_of_equiv, map_fst_nhds]
lemma implicit_function_has_strict_fderiv_at
(g'inv : G →L[𝕜] E) (hg'inv : φ.right_deriv.comp g'inv = continuous_linear_map.id 𝕜 G)
(hg'invf : φ.left_deriv.comp g'inv = 0) :
has_strict_fderiv_at (φ.implicit_function (φ.left_fun φ.pt)) g'inv (φ.right_fun φ.pt) :=
begin
have := φ.has_strict_fderiv_at.to_local_inverse,
simp only [prod_fun] at this,
convert this.comp (φ.right_fun φ.pt)
((has_strict_fderiv_at_const _ _).prod (has_strict_fderiv_at_id _)),
simp only [continuous_linear_map.ext_iff, continuous_linear_map.coe_comp', function.comp_app]
at hg'inv hg'invf ⊢,
simp [continuous_linear_equiv.eq_symm_apply, *]
end
end implicit_function_data
namespace has_strict_fderiv_at
section complemented
/-!
### Case of a complemented kernel
In this section we prove the following version of the implicit function theorem. Consider a map
`f : E → F` and a point `a : E` such that `f` is strictly differentiable at `a`, its derivative `f'`
is surjective and the kernel of `f'` is a complemented subspace of `E` (i.e., it has a closed
complementary subspace). Then there exists a function `φ : F → ker f' → E` such that for `(y, z)`
close to `(f a, 0)` we have `f (φ y z) = y` and the derivative of `φ (f a)` at zero is the
embedding `ker f' → E`.
Note that a map with these properties is not unique. E.g., different choices of a subspace
complementary to `ker f'` lead to different maps `φ`.
-/
variables {𝕜 : Type*} [nontrivially_normed_field 𝕜]
{E : Type*} [normed_add_comm_group E] [normed_space 𝕜 E] [complete_space E]
{F : Type*} [normed_add_comm_group F] [normed_space 𝕜 F] [complete_space F]
{f : E → F} {f' : E →L[𝕜] F} {a : E}
section defs
variables (f f')
/-- Data used to apply the generic implicit function theorem to the case of a strictly
differentiable map such that its derivative is surjective and has a complemented kernel. -/
@[simp] def implicit_function_data_of_complemented (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
implicit_function_data 𝕜 E F f'.ker :=
{ left_fun := f,
left_deriv := f',
right_fun := λ x, classical.some hker (x - a),
right_deriv := classical.some hker,
pt := a,
left_has_deriv := hf,
right_has_deriv := (classical.some hker).has_strict_fderiv_at.comp a
((has_strict_fderiv_at_id a).sub_const a),
left_range := hf',
right_range := linear_map.range_eq_of_proj (classical.some_spec hker),
is_compl_ker := linear_map.is_compl_of_proj (classical.some_spec hker) }
/-- A local homeomorphism between `E` and `F × f'.ker` sending level surfaces of `f`
to vertical subspaces. -/
def implicit_to_local_homeomorph_of_complemented (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
local_homeomorph E (F × f'.ker) :=
(implicit_function_data_of_complemented f f' hf hf' hker).to_local_homeomorph
/-- Implicit function `g` defined by `f (g z y) = z`. -/
def implicit_function_of_complemented (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
F → f'.ker → E :=
(implicit_function_data_of_complemented f f' hf hf' hker).implicit_function
end defs
@[simp] lemma implicit_to_local_homeomorph_of_complemented_fst (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) (x : E) :
(hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker x).fst = f x :=
rfl
lemma implicit_to_local_homeomorph_of_complemented_apply
(hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤)
(hker : f'.ker.closed_complemented) (y : E) :
hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker y =
(f y, classical.some hker (y - a)) :=
rfl
@[simp] lemma implicit_to_local_homeomorph_of_complemented_apply_ker
(hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤)
(hker : f'.ker.closed_complemented) (y : f'.ker) :
hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker (y + a) = (f (y + a), y) :=
by simp only [implicit_to_local_homeomorph_of_complemented_apply, add_sub_cancel,
classical.some_spec hker]
@[simp] lemma implicit_to_local_homeomorph_of_complemented_self
(hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker a = (f a, 0) :=
by simp [hf.implicit_to_local_homeomorph_of_complemented_apply]
lemma mem_implicit_to_local_homeomorph_of_complemented_source (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
a ∈ (hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker).source :=
mem_to_local_homeomorph_source _
lemma mem_implicit_to_local_homeomorph_of_complemented_target (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
(f a, (0 : f'.ker)) ∈ (hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker).target :=
by simpa only [implicit_to_local_homeomorph_of_complemented_self] using
((hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker).map_source $
(hf.mem_implicit_to_local_homeomorph_of_complemented_source hf' hker))
/-- `implicit_function_of_complemented` sends `(z, y)` to a point in `f ⁻¹' z`. -/
lemma map_implicit_function_of_complemented_eq (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
∀ᶠ (p : F × f'.ker) in 𝓝 (f a, 0),
f (hf.implicit_function_of_complemented f f' hf' hker p.1 p.2) = p.1 :=
((hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker).eventually_right_inverse $
hf.mem_implicit_to_local_homeomorph_of_complemented_target hf' hker).mono $ λ ⟨z, y⟩ h,
congr_arg prod.fst h
/-- Any point in some neighborhood of `a` can be represented as `implicit_function`
of some point. -/
lemma eq_implicit_function_of_complemented (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
∀ᶠ x in 𝓝 a, hf.implicit_function_of_complemented f f' hf' hker (f x)
(hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker x).snd = x :=
(implicit_function_data_of_complemented f f' hf hf' hker).implicit_function_apply_image
@[simp] lemma implicit_function_of_complemented_apply_image (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
hf.implicit_function_of_complemented f f' hf' hker (f a) 0 = a :=
begin
convert (hf.implicit_to_local_homeomorph_of_complemented f f' hf' hker).left_inv
(hf.mem_implicit_to_local_homeomorph_of_complemented_source hf' hker),
exact congr_arg prod.snd (hf.implicit_to_local_homeomorph_of_complemented_self hf' hker).symm
end
lemma to_implicit_function_of_complemented (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (hker : f'.ker.closed_complemented) :
has_strict_fderiv_at (hf.implicit_function_of_complemented f f' hf' hker (f a))
f'.ker.subtypeL 0 :=
by convert (implicit_function_data_of_complemented f f' hf hf'
hker).implicit_function_has_strict_fderiv_at f'.ker.subtypeL _ _;
simp only [implicit_function_data_of_complemented]; ext; simp [classical.some_spec hker]
end complemented
/-!
### Finite dimensional case
In this section we prove the following version of the implicit function theorem. Consider a map
`f : E → F` from a Banach normed space to a finite dimensional space.
Take a point `a : E` such that `f` is strictly differentiable at `a` and its derivative `f'`
is surjective. Then there exists a function `φ : F → ker f' → E` such that for `(y, z)`
close to `(f a, 0)` we have `f (φ y z) = y` and the derivative of `φ (f a)` at zero is the
embedding `ker f' → E`.
This version deduces that `ker f'` is a complemented subspace from the fact that `F` is a finite
dimensional space, then applies the previous version.
Note that a map with these properties is not unique. E.g., different choices of a subspace
complementary to `ker f'` lead to different maps `φ`.
-/
section finite_dimensional
variables {𝕜 : Type*} [nontrivially_normed_field 𝕜] [complete_space 𝕜]
{E : Type*} [normed_add_comm_group E] [normed_space 𝕜 E] [complete_space E]
{F : Type*} [normed_add_comm_group F] [normed_space 𝕜 F] [finite_dimensional 𝕜 F]
(f : E → F) (f' : E →L[𝕜] F) {a : E}
/-- Given a map `f : E → F` to a finite dimensional space with a surjective derivative `f'`,
returns a local homeomorphism between `E` and `F × ker f'`. -/
def implicit_to_local_homeomorph (hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
local_homeomorph E (F × f'.ker) :=
by haveI := finite_dimensional.complete 𝕜 F; exact
hf.implicit_to_local_homeomorph_of_complemented f f' hf'
f'.ker_closed_complemented_of_finite_dimensional_range
/-- Implicit function `g` defined by `f (g z y) = z`. -/
def implicit_function (hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
F → f'.ker → E :=
function.curry $ (hf.implicit_to_local_homeomorph f f' hf').symm
variables {f f'}
@[simp] lemma implicit_to_local_homeomorph_fst (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) (x : E) :
(hf.implicit_to_local_homeomorph f f' hf' x).fst = f x :=
rfl
@[simp] lemma implicit_to_local_homeomorph_apply_ker
(hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) (y : f'.ker) :
hf.implicit_to_local_homeomorph f f' hf' (y + a) = (f (y + a), y) :=
by apply implicit_to_local_homeomorph_of_complemented_apply_ker
@[simp] lemma implicit_to_local_homeomorph_self
(hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
hf.implicit_to_local_homeomorph f f' hf' a = (f a, 0) :=
by apply implicit_to_local_homeomorph_of_complemented_self
lemma mem_implicit_to_local_homeomorph_source (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) :
a ∈ (hf.implicit_to_local_homeomorph f f' hf').source :=
mem_to_local_homeomorph_source _
lemma mem_implicit_to_local_homeomorph_target (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) :
(f a, (0 : f'.ker)) ∈ (hf.implicit_to_local_homeomorph f f' hf').target :=
by apply mem_implicit_to_local_homeomorph_of_complemented_target
lemma tendsto_implicit_function (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) {α : Type*} {l : filter α} {g₁ : α → F} {g₂ : α → f'.ker}
(h₁ : tendsto g₁ l (𝓝 $ f a)) (h₂ : tendsto g₂ l (𝓝 0)) :
tendsto (λ t, hf.implicit_function f f' hf' (g₁ t) (g₂ t)) l (𝓝 a) :=
begin
refine ((hf.implicit_to_local_homeomorph f f' hf').tendsto_symm
(hf.mem_implicit_to_local_homeomorph_source hf')).comp _,
rw [implicit_to_local_homeomorph_self],
exact h₁.prod_mk_nhds h₂
end
alias tendsto_implicit_function ← _root_.filter.tendsto.implicit_function
/-- `implicit_function` sends `(z, y)` to a point in `f ⁻¹' z`. -/
lemma map_implicit_function_eq (hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
∀ᶠ (p : F × f'.ker) in 𝓝 (f a, 0), f (hf.implicit_function f f' hf' p.1 p.2) = p.1 :=
by apply map_implicit_function_of_complemented_eq
@[simp] lemma implicit_function_apply_image (hf : has_strict_fderiv_at f f' a)
(hf' : f'.range = ⊤) :
hf.implicit_function f f' hf' (f a) 0 = a :=
by apply implicit_function_of_complemented_apply_image
/-- Any point in some neighborhood of `a` can be represented as `implicit_function`
of some point. -/
lemma eq_implicit_function (hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
∀ᶠ x in 𝓝 a, hf.implicit_function f f' hf' (f x)
(hf.implicit_to_local_homeomorph f f' hf' x).snd = x :=
by apply eq_implicit_function_of_complemented
lemma to_implicit_function (hf : has_strict_fderiv_at f f' a) (hf' : f'.range = ⊤) :
has_strict_fderiv_at (hf.implicit_function f f' hf' (f a))
f'.ker.subtypeL 0 :=
by apply to_implicit_function_of_complemented
end finite_dimensional
end has_strict_fderiv_at
|
15dfaf348ee9b71e14f9c30f193a897969ced646
|
682dc1c167e5900ba3168b89700ae1cf501cfa29
|
/src/basicmodal/semantics/undefinability.lean
|
c74c1d309e94daa744857e2e03646e9fd13834a0
|
[] |
no_license
|
paulaneeley/modal
|
834558c87f55cdd6d8a29bb46c12f4d1de3239bc
|
ee5d149d4ecb337005b850bddf4453e56a5daf04
|
refs/heads/master
| 1,675,911,819,093
| 1,609,785,144,000
| 1,609,785,144,000
| 270,388,715
| 13
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 7,262
|
lean
|
/-
Copyright (c) 2021 Paula Neeley. All rights reserved.
Author: Paula Neeley
-/
import basicmodal.language basicmodal.syntax.syntax
import basicmodal.semantics.semantics data.set.basic basicmodal.paths
import basicmodal.semantics.definability
local attribute [instance] classical.prop_decidable
open form
---------------------- Frame Undefinability ----------------------
-- F (a class of frames) is undefinable
def undefinable (F : set (frame)) :=
∀ φ, ¬ defines φ F
-- The theory of x in m
def theory_at (f : frame) (v : nat → f.states → Prop) (x : f.states) :
set (form) := { φ : form | forces f v x φ}
-- Invariance under disjoint union
variables {α β : Type}
def frame.rel.dunion (r1 : α → α → Prop) (r2 : β → β → Prop) :
(sum α β) → (sum α β) → Prop
| (sum.inl a1) (sum.inl a2) := r1 a1 a2
| (sum.inr b1) (sum.inr b2) := r2 b1 b2
| _ _ := false
def val_dunion {f1 f2 : frame} (v1 : nat → f1.states → Prop)
(v2 : nat → f2.states → Prop) : nat → (sum f1.states f2.states) → Prop
| n (sum.inl x1) := v1 n x1
| n (sum.inr x2) := v2 n x2
def frame.dunion (f1 f2 : frame) : frame :=
{
states := sum f1.states f2.states,
h := ⟨sum.inl f1.h.default⟩,
rel := frame.rel.dunion (f1.rel) (f2.rel)
}
theorem invariance_dunion (f1 f2 : frame) (v1 : nat → f1.states → Prop)
(v2 : nat → f2.states → Prop) (x1 : f1.states) :
theory_at f1 v1 x1 = theory_at (f1.dunion f2) (val_dunion v1 v2) (sum.inl x1) :=
begin
ext φ, revert x1, induction φ,
repeat {rename φ_ih_φ ih_φ},
repeat {rename φ_ih_ψ ih_ψ},
repeat {rename φ_φ φ},
repeat {rename φ_ψ ψ},
repeat {intro x1},
repeat {split},
repeat {intro h, exact h},
{intro h, split, exact (ih_φ x1).mp h.left, exact (ih_ψ x1).mp h.right},
{intro h, split, exact (ih_φ x1).mpr h.left, exact (ih_ψ x1).mpr h.right},
{intros h h1, exact (ih_ψ x1).mp (h ((ih_φ x1).mpr h1))},
{intros h h1, exact (ih_ψ x1).mpr (h ((ih_φ x1).mp h1))},
{intros h s h1, cases s, exact (φ_ih s).mp ((h s) h1), exact false.elim h1},
{intros h s h1, exact (φ_ih s).mpr ((h (sum.inl s)) h1)}
end
-- Invariance under generated submodels
def frame.gen_subframe (f : frame) (x : f.states) : frame :=
{
states := { y // reachable (f.rel) x y},
h := ⟨⟨x, ref_close x (f.rel)⟩⟩,
rel := λ x1 x2, (f.rel) x1.val x2.val
}
def val_gen_subframe (f : frame) (x : f.states)
(v : nat → f.states → Prop) : nat → (frame.gen_subframe f x).states → Prop :=
λ n, λ y, v n y.val
theorem invariance_gen_submodel (f : frame) (v : ℕ → f.states → Prop)
(x : f.states) (y : (frame.gen_subframe f x).states) :
theory_at f v y.val = theory_at (f.gen_subframe x) (val_gen_subframe f x v) y :=
begin
ext φ, revert y, induction φ,
repeat {rename φ_ih_φ ih_φ},
repeat {rename φ_ih_ψ ih_ψ},
repeat {rename φ_φ φ},
repeat {rename φ_ψ ψ},
repeat {intro y},
repeat {split},
repeat {intro h1, exact h1},
{intro h1, split, exact (ih_φ y).mp h1.left, exact (ih_ψ y).mp h1.right},
{intro h1, split, exact (ih_φ y).mpr h1.left, exact (ih_ψ y).mpr h1.right},
{intros h1 h2, exact (ih_ψ y).mp (h1 ((ih_φ y).mpr h2))},
{intros h1 h2, exact (ih_ψ y).mpr (h1 ((ih_φ y).mp h2))},
{intros h1 z h2, exact (φ_ih z).mp ((h1 z.val) h2)},
{intros h1 z h2,
have h3 := reach_right x y.1 z f.rel (and.intro y.2 h2),
exact (φ_ih ⟨z, h3⟩).mpr ((h1 ⟨z, h3⟩) h2)}
end
-- Invariance under bisimulation
def base {f1 f2 : frame} (v1 : nat → f1.states → Prop)
(v2 : nat → f2.states → Prop) (x1 : f1.states) (x2 : f2.states) :=
∀ n, v1 n x1 ↔ v2 n x2
def forth {f1 f2 : frame} (bisim : f1.states → f2.states → Prop)
(x1 : f1.states) (x2 : f2.states) :=
∀ y1, f1.rel x1 y1 → (∃ y2 : f2.states, f2.rel x2 y2 ∧ bisim y1 y2)
def back {f1 f2 : frame} (bisim : f1.states → f2.states → Prop)
(x1 : f1.states) (x2 : f2.states) :=
∀ y2, f2.rel x2 y2 → (∃ y1 : f1.states, f1.rel x1 y1 ∧ bisim y1 y2)
-- Bisimulation between M1 = (f1,v1) and M2 = (f2,v2)
def is_bisimulation {f1 f2 : frame} (v1 : nat → f1.states → Prop)
(v2 : nat → f2.states → Prop) (bisim : f1.states → f2.states → Prop) := ∀ x1 x2,
bisim x1 x2 → (base v1 v2 x1 x2 ∧ forth bisim x1 x2 ∧ back bisim x1 x2)
theorem invariance_bisim {f1 f2 : frame} (v1 : nat → f1.states → Prop)
(v2 : nat → f2.states → Prop) (bisim : f1.states → f2.states → Prop)
(h : is_bisimulation v1 v2 bisim) (x1 : f1.states) (x2 : f2.states) :
bisim x1 x2 → theory_at f1 v1 x1 = theory_at f2 v2 x2 :=
begin
intro h1, ext φ, revert x1 x2, induction φ,
repeat {rename φ_ih_φ ih_φ},
repeat {rename φ_ih_ψ ih_ψ},
repeat {rename φ_φ φ},
repeat {rename φ_ψ ψ},
repeat {intros x1 x2 h1},
{split, repeat {intro h2, exact h2}},
{split,
intro h2, exact ((h x1 x2 h1).left φ).mp h2,
intro h2, exact ((h x1 x2 h1).left φ).mpr h2},
{split,
intro h2, split, exact (ih_φ x1 x2 h1).mp h2.left, exact (ih_ψ x1 x2 h1).mp h2.right,
intro h2, split, exact (ih_φ x1 x2 h1).mpr h2.left, exact (ih_ψ x1 x2 h1).mpr h2.right},
{split,
intros h2 h3, exact (ih_ψ x1 x2 h1).mp (h2 ((ih_φ x1 x2 h1).mpr h3)),
intros h2 h3, exact (ih_ψ x1 x2 h1).mpr (h2 ((ih_φ x1 x2 h1).mp h3))},
{cases h x1 x2 h1 with h4 h5,
cases h5 with h5 h6, split,
{intros h2 y2 h3, cases h6 y2 h3 with y1 h6,
cases h6 with h6 h7, exact (φ_ih y1 y2 h7).mp (h2 y1 h6)},
{intros h2 y1 h3, cases h5 y1 h3 with y2 h5,
cases h5 with h5 h7, exact (φ_ih y1 y2 h7).mpr (h2 y2 h5)}}
end
-- Invariance using surjective bounded morphisms
open function
def is_bddmorphism {f1 f2 : frame} (g : f1.states → f2.states) :=
∀ x1 : f1.states, forth (λ x1 x2, (g x1) = x2) x1 (g x1) ∧ back (λ x1 x2, (g x1) = x2) x1 (g x1)
def is_surjbddmorphism {f1 f2 : frame} (g : f1.states → f2.states)
:= surjective g ∧ is_bddmorphism g
theorem pull_back {f1 f2 : frame} (g : f1.states → f2.states) (h : is_surjbddmorphism g) :
∀ φ, ¬ f_valid φ f2 → ¬ f_valid φ f1 :=
begin
intros φ h1, rw f_valid at *,
push_neg at h1, push_neg,
cases h1 with v2 h1,
cases h1 with y h1,
let v1 := (λ n x, v2 n (g x)), use v1,
cases h with hl hr, cases hl y with x hl,
existsi (x : f1.states), have h3 := invariance_bisim v1 v2 (λ x y, g x = y),
have h4 : is_bisimulation v1 v2 (λ (x : f1.states) (y : f2.states), g x = y),
{intros x1 x2 h2, split,
{intro n, split, intro h, subst h2, apply h, intro h, subst h2, apply h},
split,
have h5 : forth (λ (x1 : f1.states) (x2 : f2.states), g x1 = x2) x1 x2,
from eq.subst h2 (hr x1).left, exact h5,
have h5 : back (λ (x1 : f1.states) (x2 : f2.states), g x1 = x2) x1 x2,
from eq.subst h2 (hr x1).right, exact h5},
specialize h3 h4 x y hl, intro h2, apply h1,
rw set.subset.antisymm_iff at h3, cases h3,
rw set.subset_def at h3_left,
exact h3_left φ h2
end
lemma invariance_pull_back (F : set (frame)) {f1 f2 : frame}
(h1 : f1 ∈ F) (h2 : f2 ∉ F) :
(∃ g : f1.states → f2.states, is_surjbddmorphism g) → undefinable F :=
begin
intro h, cases h with g h,
intro φ, by_contradiction h3,
have h4 := h3 f1, specialize h3 f2,
rw ←not_iff_not at h3,
exact (pull_back g h φ (h3.mp h2)) (h4.mp h1)
end
|
9872752ba661e7d758d3f18ad75cb71a241477c5
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/concrete_category/basic.lean
|
4a74dcdba59c603af80ebd0c60062fddb3a2e3d3
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 10,168
|
lean
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johannes Hölzl, Reid Barton, Sean Leather, Yury Kudryashov
-/
import category_theory.types
import category_theory.functor.epi_mono
import category_theory.limits.constructions.epi_mono
/-!
# Concrete categories
A concrete category is a category `C` with a fixed faithful functor
`forget : C ⥤ Type*`. We define concrete categories using `class
concrete_category`. In particular, we impose no restrictions on the
carrier type `C`, so `Type` is a concrete category with the identity
forgetful functor.
Each concrete category `C` comes with a canonical faithful functor
`forget C : C ⥤ Type*`. We say that a concrete category `C` admits a
*forgetful functor* to a concrete category `D`, if it has a functor
`forget₂ C D : C ⥤ D` such that `(forget₂ C D) ⋙ (forget D) = forget C`,
see `class has_forget₂`. Due to `faithful.div_comp`, it suffices
to verify that `forget₂.obj` and `forget₂.map` agree with the equality
above; then `forget₂` will satisfy the functor laws automatically, see
`has_forget₂.mk'`.
Two classes helping construct concrete categories in the two most
common cases are provided in the files `bundled_hom` and
`unbundled_hom`, see their documentation for details.
## References
See [Ahrens and Lumsdaine, *Displayed Categories*][ahrens2017] for
related work.
-/
universes w v v' u
namespace category_theory
open category_theory.limits
/--
A concrete category is a category `C` with a fixed faithful functor `forget : C ⥤ Type`.
Note that `concrete_category` potentially depends on three independent universe levels,
* the universe level `w` appearing in `forget : C ⥤ Type w`
* the universe level `v` of the morphisms (i.e. we have a `category.{v} C`)
* the universe level `u` of the objects (i.e `C : Type u`)
They are specified that order, to avoid unnecessary universe annotations.
-/
class concrete_category (C : Type u) [category.{v} C] :=
(forget [] : C ⥤ Type w)
[forget_faithful : faithful forget]
attribute [instance] concrete_category.forget_faithful
/-- The forgetful functor from a concrete category to `Type u`. -/
@[reducible] def forget (C : Type v) [category C] [concrete_category.{u} C] : C ⥤ Type u :=
concrete_category.forget C
instance concrete_category.types : concrete_category (Type u) :=
{ forget := 𝟭 _ }
/--
Provide a coercion to `Type u` for a concrete category. This is not marked as an instance
as it could potentially apply to every type, and so is too expensive in typeclass search.
You can use it on particular examples as:
```
instance : has_coe_to_sort X := concrete_category.has_coe_to_sort X
```
-/
def concrete_category.has_coe_to_sort (C : Type v) [category C] [concrete_category C] :
has_coe_to_sort C (Type u) :=
⟨(concrete_category.forget C).obj⟩
section
local attribute [instance] concrete_category.has_coe_to_sort
variables {C : Type v} [category C] [concrete_category C]
@[simp] lemma forget_obj_eq_coe {X : C} : (forget C).obj X = X := rfl
/-- Usually a bundled hom structure already has a coercion to function
that works with different universes. So we don't use this as a global instance. -/
def concrete_category.has_coe_to_fun {X Y : C} : has_coe_to_fun (X ⟶ Y) (λ f, X → Y) :=
⟨λ f, (forget _).map f⟩
local attribute [instance] concrete_category.has_coe_to_fun
/-- In any concrete category, we can test equality of morphisms by pointwise evaluations.-/
lemma concrete_category.hom_ext {X Y : C} (f g : X ⟶ Y) (w : ∀ x : X, f x = g x) : f = g :=
begin
apply faithful.map_injective (forget C),
ext,
exact w x,
end
@[simp] lemma forget_map_eq_coe {X Y : C} (f : X ⟶ Y) : (forget C).map f = f := rfl
/--
Analogue of `congr_fun h x`,
when `h : f = g` is an equality between morphisms in a concrete category.
-/
lemma congr_hom {X Y : C} {f g : X ⟶ Y} (h : f = g) (x : X) : f x = g x :=
congr_fun (congr_arg (λ k : X ⟶ Y, (k : X → Y)) h) x
lemma coe_id {X : C} : ((𝟙 X) : X → X) = id :=
(forget _).map_id X
lemma coe_comp {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) :
(f ≫ g : X → Z) = g ∘ f :=
(forget _).map_comp f g
@[simp] lemma id_apply {X : C} (x : X) : ((𝟙 X) : X → X) x = x :=
congr_fun ((forget _).map_id X) x
@[simp] lemma comp_apply {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (x : X) :
(f ≫ g) x = g (f x) :=
congr_fun ((forget _).map_comp _ _) x
lemma concrete_category.congr_hom {X Y : C} {f g : X ⟶ Y} (h : f = g) (x : X) : f x = g x :=
congr_fun (congr_arg (λ f : X ⟶ Y, (f : X → Y)) h) x
lemma concrete_category.congr_arg {X Y : C} (f : X ⟶ Y) {x x' : X} (h : x = x') : f x = f x' :=
congr_arg (f : X → Y) h
/-- In any concrete category, injective morphisms are monomorphisms. -/
lemma concrete_category.mono_of_injective {X Y : C} (f : X ⟶ Y) (i : function.injective f) :
mono f :=
(forget C).mono_of_mono_map ((mono_iff_injective f).2 i)
lemma concrete_category.injective_of_mono_of_preserves_pullback {X Y : C} (f : X ⟶ Y) [mono f]
[preserves_limits_of_shape walking_cospan (forget C)] : function.injective f :=
(mono_iff_injective ((forget C).map f)).mp infer_instance
lemma concrete_category.mono_iff_injective_of_preserves_pullback {X Y : C} (f : X ⟶ Y)
[preserves_limits_of_shape walking_cospan (forget C)] : mono f ↔ function.injective f :=
((forget C).mono_map_iff_mono _).symm.trans (mono_iff_injective _)
/-- In any concrete category, surjective morphisms are epimorphisms. -/
lemma concrete_category.epi_of_surjective {X Y : C} (f : X ⟶ Y) (s : function.surjective f) :
epi f :=
(forget C).epi_of_epi_map ((epi_iff_surjective f).2 s)
lemma concrete_category.surjective_of_epi_of_preserves_pushout {X Y : C} (f : X ⟶ Y) [epi f]
[preserves_colimits_of_shape walking_span (forget C)] : function.surjective f :=
(epi_iff_surjective ((forget C).map f)).mp infer_instance
lemma concrete_category.epi_iff_surjective_of_preserves_pushout {X Y : C} (f : X ⟶ Y)
[preserves_colimits_of_shape walking_span (forget C)] : epi f ↔ function.surjective f :=
((forget C).epi_map_iff_epi _).symm.trans (epi_iff_surjective _)
lemma concrete_category.bijective_of_is_iso {X Y : C} (f : X ⟶ Y) [is_iso f] :
function.bijective ((forget C).map f) :=
by { rw ← is_iso_iff_bijective, apply_instance, }
@[simp] lemma concrete_category.has_coe_to_fun_Type {X Y : Type u} (f : X ⟶ Y) :
coe_fn f = f :=
rfl
end
/--
`has_forget₂ C D`, where `C` and `D` are both concrete categories, provides a functor
`forget₂ C D : C ⥤ D` and a proof that `forget₂ ⋙ (forget D) = forget C`.
-/
class has_forget₂ (C : Type v) (D : Type v') [category C] [concrete_category.{u} C] [category D]
[concrete_category.{u} D] :=
(forget₂ : C ⥤ D)
(forget_comp : forget₂ ⋙ (forget D) = forget C . obviously)
/-- The forgetful functor `C ⥤ D` between concrete categories for which we have an instance
`has_forget₂ C `. -/
@[reducible] def forget₂ (C : Type v) (D : Type v') [category C] [concrete_category C] [category D]
[concrete_category D] [has_forget₂ C D] : C ⥤ D :=
has_forget₂.forget₂
instance forget₂_faithful (C : Type v) (D : Type v') [category C] [concrete_category C] [category D]
[concrete_category D] [has_forget₂ C D] : faithful (forget₂ C D) :=
has_forget₂.forget_comp.faithful_of_comp
instance forget₂_preserves_monomorphisms (C : Type v) (D : Type v') [category C]
[concrete_category C] [category D] [concrete_category D] [has_forget₂ C D]
[(forget C).preserves_monomorphisms] : (forget₂ C D).preserves_monomorphisms :=
have (forget₂ C D ⋙ forget D).preserves_monomorphisms,
by { simp only [has_forget₂.forget_comp], apply_instance },
by exactI functor.preserves_monomorphisms_of_preserves_of_reflects _ (forget D)
instance forget₂_preserves_epimorphisms (C : Type v) (D : Type v') [category C]
[concrete_category C] [category D] [concrete_category D] [has_forget₂ C D]
[(forget C).preserves_epimorphisms] : (forget₂ C D).preserves_epimorphisms :=
have (forget₂ C D ⋙ forget D).preserves_epimorphisms,
by { simp only [has_forget₂.forget_comp], apply_instance },
by exactI functor.preserves_epimorphisms_of_preserves_of_reflects _ (forget D)
instance induced_category.concrete_category {C : Type v} {D : Type v'} [category D]
[concrete_category D] (f : C → D) :
concrete_category (induced_category D f) :=
{ forget := induced_functor f ⋙ forget D }
instance induced_category.has_forget₂ {C : Type v} {D : Type v'} [category D] [concrete_category D]
(f : C → D) :
has_forget₂ (induced_category D f) D :=
{ forget₂ := induced_functor f,
forget_comp := rfl }
instance full_subcategory.concrete_category {C : Type v} [category C] [concrete_category C]
(Z : C → Prop) : concrete_category (full_subcategory Z) :=
{ forget := full_subcategory_inclusion Z ⋙ forget C }
instance full_subcategory.has_forget₂ {C : Type v} [category C] [concrete_category C]
(Z : C → Prop) : has_forget₂ (full_subcategory Z) C :=
{ forget₂ := full_subcategory_inclusion Z,
forget_comp := rfl }
/--
In order to construct a “partially forgetting” functor, we do not need to verify functor laws;
it suffices to ensure that compositions agree with `forget₂ C D ⋙ forget D = forget C`.
-/
def has_forget₂.mk' {C : Type v} {D : Type v'} [category C] [concrete_category C] [category D]
[concrete_category D] (obj : C → D) (h_obj : ∀ X, (forget D).obj (obj X) = (forget C).obj X)
(map : Π {X Y}, (X ⟶ Y) → (obj X ⟶ obj Y))
(h_map : ∀ {X Y} {f : X ⟶ Y}, (forget D).map (map f) == (forget C).map f) :
has_forget₂ C D :=
{ forget₂ := faithful.div _ _ _ @h_obj _ @h_map,
forget_comp := by apply faithful.div_comp }
/-- Every forgetful functor factors through the identity functor. This is not a global instance as
it is prone to creating type class resolution loops. -/
def has_forget_to_Type (C : Type v) [category C] [concrete_category C] :
has_forget₂ C (Type u) :=
{ forget₂ := forget C,
forget_comp := functor.comp_id _ }
end category_theory
|
c300c579606c5b8b98efefe2c1d73b77434c64d9
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/data/equiv/mul_add.lean
|
5db37aea7094a4d8fe2a7a006125f3d988f13157
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 20,050
|
lean
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Callum Sutton, Yury Kudryashov
-/
import data.equiv.basic
import deprecated.group
import algebra.group.hom
/-!
# Multiplicative and additive equivs
In this file we define two extensions of `equiv` called `add_equiv` and `mul_equiv`, which are
datatypes representing isomorphisms of `add_monoid`s/`add_group`s and `monoid`s/`group`s. We also
introduce the corresponding groups of automorphisms `add_aut` and `mul_aut`.
## Notations
The extended equivs all have coercions to functions, and the coercions are the canonical
notation when treating the isomorphisms as maps.
## Implementation notes
The fields for `mul_equiv`, `add_equiv` now avoid the unbundled `is_mul_hom` and `is_add_hom`, as
these are deprecated.
Definition of multiplication in the groups of automorphisms agrees with function composition,
multiplication in `equiv.perm`, and multiplication in `category_theory.End`, not with
`category_theory.comp`.
## Tags
equiv, mul_equiv, add_equiv, mul_aut, add_aut
-/
variables {A : Type*} {B : Type*} {M : Type*} {N : Type*} {P : Type*} {G : Type*} {H : Type*}
set_option old_structure_cmd true
/-- add_equiv α β is the type of an equiv α ≃ β which preserves addition. -/
structure add_equiv (A B : Type*) [has_add A] [has_add B] extends A ≃ B, add_hom A B
/-- The `equiv` underlying an `add_equiv`. -/
add_decl_doc add_equiv.to_equiv
/-- The `add_hom` underlying a `add_equiv`. -/
add_decl_doc add_equiv.to_add_hom
/-- `mul_equiv α β` is the type of an equiv `α ≃ β` which preserves multiplication. -/
@[to_additive]
structure mul_equiv (M N : Type*) [has_mul M] [has_mul N] extends M ≃ N, mul_hom M N
/-- The `equiv` underlying a `mul_equiv`. -/
add_decl_doc mul_equiv.to_equiv
/-- The `mul_hom` underlying a `mul_equiv`. -/
add_decl_doc mul_equiv.to_mul_hom
infix ` ≃* `:25 := mul_equiv
infix ` ≃+ `:25 := add_equiv
namespace mul_equiv
@[to_additive]
instance [has_mul M] [has_mul N] : has_coe_to_fun (M ≃* N) := ⟨_, mul_equiv.to_fun⟩
variables [has_mul M] [has_mul N] [has_mul P]
@[simp, to_additive]
lemma to_fun_apply {f : M ≃* N} {m : M} : f.to_fun m = f m := rfl
@[simp, to_additive]
lemma to_equiv_apply {f : M ≃* N} {m : M} : f.to_equiv m = f m := rfl
/-- A multiplicative isomorphism preserves multiplication (canonical form). -/
@[simp, to_additive]
lemma map_mul (f : M ≃* N) : ∀ x y, f (x * y) = f x * f y := f.map_mul'
/-- A multiplicative isomorphism preserves multiplication (deprecated). -/
@[to_additive]
instance (h : M ≃* N) : is_mul_hom h := ⟨h.map_mul⟩
/-- Makes a multiplicative isomorphism from a bijection which preserves multiplication. -/
@[to_additive "Makes an additive isomorphism from a bijection which preserves addition."]
def mk' (f : M ≃ N) (h : ∀ x y, f (x * y) = f x * f y) : M ≃* N :=
⟨f.1, f.2, f.3, f.4, h⟩
@[to_additive]
protected lemma bijective (e : M ≃* N) : function.bijective e := e.to_equiv.bijective
@[to_additive]
protected lemma injective (e : M ≃* N) : function.injective e := e.to_equiv.injective
@[to_additive]
protected lemma surjective (e : M ≃* N) : function.surjective e := e.to_equiv.surjective
/-- The identity map is a multiplicative isomorphism. -/
@[refl, to_additive "The identity map is an additive isomorphism."]
def refl (M : Type*) [has_mul M] : M ≃* M :=
{ map_mul' := λ _ _, rfl,
..equiv.refl _}
instance : inhabited (M ≃* M) := ⟨refl M⟩
/-- The inverse of an isomorphism is an isomorphism. -/
@[symm, to_additive "The inverse of an isomorphism is an isomorphism."]
def symm (h : M ≃* N) : N ≃* M :=
{ map_mul' := λ n₁ n₂, h.injective $
begin
have : ∀ x, h (h.to_equiv.symm.to_fun x) = x := h.to_equiv.apply_symm_apply,
simp only [this, h.map_mul]
end,
.. h.to_equiv.symm}
/-- See Note [custom simps projection] -/
@[to_additive add_equiv.simps.inv_fun "See Note [custom simps projection]"]
def simps.inv_fun (e : M ≃* N) : N → M := e.symm
initialize_simps_projections add_equiv (to_fun → apply, inv_fun → symm_apply)
initialize_simps_projections mul_equiv (to_fun → apply, inv_fun → symm_apply)
@[simp, to_additive]
theorem to_equiv_symm (f : M ≃* N) : f.symm.to_equiv = f.to_equiv.symm := rfl
@[simp, to_additive]
theorem coe_mk (f : M → N) (g h₁ h₂ h₃) : ⇑(mul_equiv.mk f g h₁ h₂ h₃) = f := rfl
@[simp, to_additive]
theorem coe_symm_mk (f : M → N) (g h₁ h₂ h₃) : ⇑(mul_equiv.mk f g h₁ h₂ h₃).symm = g := rfl
/-- Transitivity of multiplication-preserving isomorphisms -/
@[trans, to_additive "Transitivity of addition-preserving isomorphisms"]
def trans (h1 : M ≃* N) (h2 : N ≃* P) : (M ≃* P) :=
{ map_mul' := λ x y, show h2 (h1 (x * y)) = h2 (h1 x) * h2 (h1 y),
by rw [h1.map_mul, h2.map_mul],
..h1.to_equiv.trans h2.to_equiv }
/-- e.right_inv in canonical form -/
@[simp, to_additive]
lemma apply_symm_apply (e : M ≃* N) : ∀ y, e (e.symm y) = y :=
e.to_equiv.apply_symm_apply
/-- e.left_inv in canonical form -/
@[simp, to_additive]
lemma symm_apply_apply (e : M ≃* N) : ∀ x, e.symm (e x) = x :=
e.to_equiv.symm_apply_apply
@[simp, to_additive]
theorem refl_apply (m : M) : refl M m = m := rfl
@[simp, to_additive]
theorem trans_apply (e₁ : M ≃* N) (e₂ : N ≃* P) (m : M) : e₁.trans e₂ m = e₂ (e₁ m) := rfl
@[simp, to_additive] theorem apply_eq_iff_eq (e : M ≃* N) {x y : M} : e x = e y ↔ x = y :=
e.injective.eq_iff
@[to_additive]
lemma apply_eq_iff_symm_apply (e : M ≃* N) {x : M} {y : N} : e x = y ↔ x = e.symm y :=
e.to_equiv.apply_eq_iff_eq_symm_apply
@[to_additive]
lemma symm_apply_eq (e : M ≃* N) {x y} : e.symm x = y ↔ x = e y :=
e.to_equiv.symm_apply_eq
@[to_additive]
lemma eq_symm_apply (e : M ≃* N) {x y} : y = e.symm x ↔ e y = x :=
e.to_equiv.eq_symm_apply
/-- a multiplicative equiv of monoids sends 1 to 1 (and is hence a monoid isomorphism) -/
@[simp, to_additive]
lemma map_one {M N} [monoid M] [monoid N] (h : M ≃* N) : h 1 = 1 :=
by rw [←mul_one (h 1), ←h.apply_symm_apply 1, ←h.map_mul, one_mul]
@[simp, to_additive]
lemma map_eq_one_iff {M N} [monoid M] [monoid N] (h : M ≃* N) {x : M} :
h x = 1 ↔ x = 1 :=
h.map_one ▸ h.to_equiv.apply_eq_iff_eq
@[to_additive]
lemma map_ne_one_iff {M N} [monoid M] [monoid N] (h : M ≃* N) {x : M} :
h x ≠ 1 ↔ x ≠ 1 :=
⟨mt h.map_eq_one_iff.2, mt h.map_eq_one_iff.1⟩
/-- A bijective `monoid` homomorphism is an isomorphism -/
@[to_additive "A bijective `add_monoid` homomorphism is an isomorphism"]
noncomputable def of_bijective {M N} [monoid M] [monoid N] (f : M →* N)
(hf : function.bijective f) : M ≃* N :=
{ map_mul' := f.map_mul',
..equiv.of_bijective f hf }
/--
Extract the forward direction of a multiplicative equivalence
as a multiplication-preserving function.
-/
@[to_additive "Extract the forward direction of an additive equivalence
as an addition-preserving function."]
def to_monoid_hom {M N} [monoid M] [monoid N] (h : M ≃* N) : (M →* N) :=
{ map_one' := h.map_one, .. h }
@[simp, to_additive]
lemma coe_to_monoid_hom {M N} [monoid M] [monoid N] (e : M ≃* N) :
⇑e.to_monoid_hom = e :=
rfl
@[to_additive]
lemma to_monoid_hom_apply {M N} [monoid M] [monoid N] (e : M ≃* N) (x : M) :
e.to_monoid_hom x = e x :=
rfl
/-- A multiplicative equivalence of groups preserves inversion. -/
@[simp, to_additive]
lemma map_inv [group G] [group H] (h : G ≃* H) (x : G) : h x⁻¹ = (h x)⁻¹ :=
h.to_monoid_hom.map_inv x
/-- A multiplicative bijection between two monoids is a monoid hom
(deprecated -- use to_monoid_hom). -/
@[to_additive]
instance is_monoid_hom {M N} [monoid M] [monoid N] (h : M ≃* N) : is_monoid_hom h :=
⟨h.map_one⟩
/-- A multiplicative bijection between two groups is a group hom
(deprecated -- use to_monoid_hom). -/
@[to_additive]
instance is_group_hom {G H} [group G] [group H] (h : G ≃* H) :
is_group_hom h := { map_mul := h.map_mul }
/-- Two multiplicative isomorphisms agree if they are defined by the
same underlying function. -/
@[ext, to_additive
"Two additive isomorphisms agree if they are defined by the same underlying function."]
lemma ext {f g : mul_equiv M N} (h : ∀ x, f x = g x) : f = g :=
begin
have h₁ : f.to_equiv = g.to_equiv := equiv.ext h,
cases f, cases g, congr,
{ exact (funext h) },
{ exact congr_arg equiv.inv_fun h₁ }
end
@[to_additive] lemma to_monoid_hom_injective
{M N} [monoid M] [monoid N] : function.injective (to_monoid_hom : (M ≃* N) → M →* N) :=
λ f g h, mul_equiv.ext (monoid_hom.ext_iff.1 h)
attribute [ext] add_equiv.ext
end mul_equiv
-- We don't use `to_additive` to generate definition because it fails to tell Lean about
-- equational lemmas
/-- Given a pair of additive monoid homomorphisms `f`, `g` such that `g.comp f = id` and
`f.comp g = id`, returns an additive equivalence with `to_fun = f` and `inv_fun = g`. This
constructor is useful if the underlying type(s) have specialized `ext` lemmas for additive
monoid homomorphisms. -/
def add_monoid_hom.to_add_equiv [add_monoid M] [add_monoid N] (f : M →+ N) (g : N →+ M)
(h₁ : g.comp f = add_monoid_hom.id _) (h₂ : f.comp g = add_monoid_hom.id _) :
M ≃+ N :=
{ to_fun := f,
inv_fun := g,
left_inv := add_monoid_hom.congr_fun h₁,
right_inv := add_monoid_hom.congr_fun h₂,
map_add' := f.map_add }
/-- Given a pair of monoid homomorphisms `f`, `g` such that `g.comp f = id` and `f.comp g = id`,
returns an multiplicative equivalence with `to_fun = f` and `inv_fun = g`. This constructor is
useful if the underlying type(s) have specialized `ext` lemmas for monoid homomorphisms. -/
@[to_additive]
def monoid_hom.to_mul_equiv [monoid M] [monoid N] (f : M →* N) (g : N →* M)
(h₁ : g.comp f = monoid_hom.id _) (h₂ : f.comp g = monoid_hom.id _) :
M ≃* N :=
{ to_fun := f,
inv_fun := g,
left_inv := monoid_hom.congr_fun h₁,
right_inv := monoid_hom.congr_fun h₂,
map_mul' := f.map_mul }
@[simp, to_additive]
lemma monoid_hom.coe_to_mul_equiv [monoid M] [monoid N] (f : M →* N) (g : N →* M) (h₁ h₂) :
⇑(f.to_mul_equiv g h₁ h₂) = f := rfl
/-- An additive equivalence of additive groups preserves subtraction. -/
lemma add_equiv.map_sub [add_group A] [add_group B] (h : A ≃+ B) (x y : A) :
h (x - y) = h x - h y :=
h.to_add_monoid_hom.map_sub x y
instance add_equiv.inhabited {M : Type*} [has_add M] : inhabited (M ≃+ M) := ⟨add_equiv.refl M⟩
/-- The group of multiplicative automorphisms. -/
@[to_additive "The group of additive automorphisms."]
def mul_aut (M : Type*) [has_mul M] := M ≃* M
attribute [reducible] mul_aut add_aut
namespace mul_aut
variables (M) [has_mul M]
/--
The group operation on multiplicative automorphisms is defined by
`λ g h, mul_equiv.trans h g`.
This means that multiplication agrees with composition, `(g*h)(x) = g (h x)`.
-/
instance : group (mul_aut M) :=
by refine_struct
{ mul := λ g h, mul_equiv.trans h g,
one := mul_equiv.refl M,
inv := mul_equiv.symm };
intros; ext; try { refl }; apply equiv.left_inv
instance : inhabited (mul_aut M) := ⟨1⟩
@[simp] lemma coe_mul (e₁ e₂ : mul_aut M) : ⇑(e₁ * e₂) = e₁ ∘ e₂ := rfl
@[simp] lemma coe_one : ⇑(1 : mul_aut M) = id := rfl
lemma mul_def (e₁ e₂ : mul_aut M) : e₁ * e₂ = e₂.trans e₁ := rfl
lemma one_def : (1 : mul_aut M) = mul_equiv.refl _ := rfl
lemma inv_def (e₁ : mul_aut M) : e₁⁻¹ = e₁.symm := rfl
@[simp] lemma mul_apply (e₁ e₂ : mul_aut M) (m : M) : (e₁ * e₂) m = e₁ (e₂ m) := rfl
@[simp] lemma one_apply (m : M) : (1 : mul_aut M) m = m := rfl
@[simp] lemma apply_inv_self (e : mul_aut M) (m : M) : e (e⁻¹ m) = m :=
mul_equiv.apply_symm_apply _ _
@[simp] lemma inv_apply_self (e : mul_aut M) (m : M) : e⁻¹ (e m) = m :=
mul_equiv.apply_symm_apply _ _
/-- Monoid hom from the group of multiplicative automorphisms to the group of permutations. -/
def to_perm : mul_aut M →* equiv.perm M :=
by refine_struct { to_fun := mul_equiv.to_equiv }; intros; refl
/-- group conjugation as a group homomorphism into the automorphism group.
`conj g h = g * h * g⁻¹` -/
def conj [group G] : G →* mul_aut G :=
{ to_fun := λ g,
{ to_fun := λ h, g * h * g⁻¹,
inv_fun := λ h, g⁻¹ * h * g,
left_inv := λ _, by simp [mul_assoc],
right_inv := λ _, by simp [mul_assoc],
map_mul' := by simp [mul_assoc] },
map_mul' := λ _ _, by ext; simp [mul_assoc],
map_one' := by ext; simp [mul_assoc] }
@[simp] lemma conj_apply [group G] (g h : G) : conj g h = g * h * g⁻¹ := rfl
@[simp] lemma conj_symm_apply [group G] (g h : G) : (conj g).symm h = g⁻¹ * h * g := rfl
@[simp] lemma conj_inv_apply {G : Type*} [group G] (g h : G) : (conj g)⁻¹ h = g⁻¹ * h * g := rfl
end mul_aut
namespace add_aut
variables (A) [has_add A]
/--
The group operation on additive automorphisms is defined by
`λ g h, mul_equiv.trans h g`.
This means that multiplication agrees with composition, `(g*h)(x) = g (h x)`.
-/
instance group : group (add_aut A) :=
by refine_struct
{ mul := λ g h, add_equiv.trans h g,
one := add_equiv.refl A,
inv := add_equiv.symm };
intros; ext; try { refl }; apply equiv.left_inv
instance : inhabited (add_aut A) := ⟨1⟩
@[simp] lemma coe_mul (e₁ e₂ : add_aut A) : ⇑(e₁ * e₂) = e₁ ∘ e₂ := rfl
@[simp] lemma coe_one : ⇑(1 : add_aut A) = id := rfl
lemma mul_def (e₁ e₂ : add_aut A) : e₁ * e₂ = e₂.trans e₁ := rfl
lemma one_def : (1 : add_aut A) = add_equiv.refl _ := rfl
lemma inv_def (e₁ : add_aut A) : e₁⁻¹ = e₁.symm := rfl
@[simp] lemma mul_apply (e₁ e₂ : add_aut A) (a : A) : (e₁ * e₂) a = e₁ (e₂ a) := rfl
@[simp] lemma one_apply (a : A) : (1 : add_aut A) a = a := rfl
@[simp] lemma apply_inv_self (e : add_aut A) (a : A) : e⁻¹ (e a) = a :=
add_equiv.apply_symm_apply _ _
@[simp] lemma inv_apply_self (e : add_aut A) (a : A) : e (e⁻¹ a) = a :=
add_equiv.apply_symm_apply _ _
/-- Monoid hom from the group of multiplicative automorphisms to the group of permutations. -/
def to_perm : add_aut A →* equiv.perm A :=
by refine_struct { to_fun := add_equiv.to_equiv }; intros; refl
end add_aut
/-- A group is isomorphic to its group of units. -/
@[to_additive to_add_units "An additive group is isomorphic to its group of additive units"]
def to_units {G} [group G] : G ≃* units G :=
{ to_fun := λ x, ⟨x, x⁻¹, mul_inv_self _, inv_mul_self _⟩,
inv_fun := coe,
left_inv := λ x, rfl,
right_inv := λ u, units.ext rfl,
map_mul' := λ x y, units.ext rfl }
namespace units
variables [monoid M] [monoid N] [monoid P]
/-- A multiplicative equivalence of monoids defines a multiplicative equivalence
of their groups of units. -/
def map_equiv (h : M ≃* N) : units M ≃* units N :=
{ inv_fun := map h.symm.to_monoid_hom,
left_inv := λ u, ext $ h.left_inv u,
right_inv := λ u, ext $ h.right_inv u,
.. map h.to_monoid_hom }
/-- Left multiplication by a unit of a monoid is a permutation of the underlying type. -/
@[to_additive "Left addition of an additive unit is a permutation of the underlying type."]
def mul_left (u : units M) : equiv.perm M :=
{ to_fun := λx, u * x,
inv_fun := λx, ↑u⁻¹ * x,
left_inv := u.inv_mul_cancel_left,
right_inv := u.mul_inv_cancel_left }
@[simp, to_additive]
lemma coe_mul_left (u : units M) : ⇑u.mul_left = (*) u := rfl
@[simp, to_additive]
lemma mul_left_symm (u : units M) : u.mul_left.symm = u⁻¹.mul_left :=
equiv.ext $ λ x, rfl
/-- Right multiplication by a unit of a monoid is a permutation of the underlying type. -/
@[to_additive "Right addition of an additive unit is a permutation of the underlying type."]
def mul_right (u : units M) : equiv.perm M :=
{ to_fun := λx, x * u,
inv_fun := λx, x * ↑u⁻¹,
left_inv := λ x, mul_inv_cancel_right x u,
right_inv := λ x, inv_mul_cancel_right x u }
@[simp, to_additive]
lemma coe_mul_right (u : units M) : ⇑u.mul_right = λ x : M, x * u := rfl
@[simp, to_additive]
lemma mul_right_symm (u : units M) : u.mul_right.symm = u⁻¹.mul_right :=
equiv.ext $ λ x, rfl
end units
namespace equiv
section group
variables [group G]
/-- Left multiplication in a `group` is a permutation of the underlying type. -/
@[to_additive "Left addition in an `add_group` is a permutation of the underlying type."]
protected def mul_left (a : G) : perm G := (to_units a).mul_left
@[simp, to_additive]
lemma coe_mul_left (a : G) : ⇑(equiv.mul_left a) = (*) a := rfl
@[simp, to_additive]
lemma mul_left_symm (a : G) : (equiv.mul_left a).symm = equiv.mul_left a⁻¹ :=
ext $ λ x, rfl
/-- Right multiplication in a `group` is a permutation of the underlying type. -/
@[to_additive "Right addition in an `add_group` is a permutation of the underlying type."]
protected def mul_right (a : G) : perm G := (to_units a).mul_right
@[simp, to_additive]
lemma coe_mul_right (a : G) : ⇑(equiv.mul_right a) = λ x, x * a := rfl
@[simp, to_additive]
lemma mul_right_symm (a : G) : (equiv.mul_right a).symm = equiv.mul_right a⁻¹ :=
ext $ λ x, rfl
variable (G)
/-- Inversion on a `group` is a permutation of the underlying type. -/
@[to_additive "Negation on an `add_group` is a permutation of the underlying type."]
protected def inv : perm G :=
{ to_fun := λa, a⁻¹,
inv_fun := λa, a⁻¹,
left_inv := assume a, inv_inv a,
right_inv := assume a, inv_inv a }
variable {G}
@[simp, to_additive]
lemma coe_inv : ⇑(equiv.inv G) = has_inv.inv := rfl
@[simp, to_additive]
lemma inv_symm : (equiv.inv G).symm = equiv.inv G := rfl
end group
end equiv
section type_tags
/-- Reinterpret `G ≃+ H` as `multiplicative G ≃* multiplicative H`. -/
def add_equiv.to_multiplicative [add_monoid G] [add_monoid H] :
(G ≃+ H) ≃ (multiplicative G ≃* multiplicative H) :=
{ to_fun := λ f, ⟨f.to_add_monoid_hom.to_multiplicative, f.symm.to_add_monoid_hom.to_multiplicative, f.3, f.4, f.5⟩,
inv_fun := λ f, ⟨f.to_monoid_hom, f.symm.to_monoid_hom, f.3, f.4, f.5⟩,
left_inv := λ x, by { ext, refl, },
right_inv := λ x, by { ext, refl, }, }
/-- Reinterpret `G ≃* H` as `additive G ≃+ additive H`. -/
def mul_equiv.to_additive [monoid G] [monoid H] :
(G ≃* H) ≃ (additive G ≃+ additive H) :=
{ to_fun := λ f, ⟨f.to_monoid_hom.to_additive, f.symm.to_monoid_hom.to_additive, f.3, f.4, f.5⟩,
inv_fun := λ f, ⟨f.to_add_monoid_hom, f.symm.to_add_monoid_hom, f.3, f.4, f.5⟩,
left_inv := λ x, by { ext, refl, },
right_inv := λ x, by { ext, refl, }, }
/-- Reinterpret `additive G ≃+ H` as `G ≃* multiplicative H`. -/
def add_equiv.to_multiplicative' [monoid G] [add_monoid H] :
(additive G ≃+ H) ≃ (G ≃* multiplicative H) :=
{ to_fun := λ f, ⟨f.to_add_monoid_hom.to_multiplicative', f.symm.to_add_monoid_hom.to_multiplicative'', f.3, f.4, f.5⟩,
inv_fun := λ f, ⟨f.to_monoid_hom, f.symm.to_monoid_hom, f.3, f.4, f.5⟩,
left_inv := λ x, by { ext, refl, },
right_inv := λ x, by { ext, refl, }, }
/-- Reinterpret `G ≃* multiplicative H` as `additive G ≃+ H` as. -/
def mul_equiv.to_additive' [monoid G] [add_monoid H] :
(G ≃* multiplicative H) ≃ (additive G ≃+ H) :=
add_equiv.to_multiplicative'.symm
/-- Reinterpret `G ≃+ additive H` as `multiplicative G ≃* H`. -/
def add_equiv.to_multiplicative'' [add_monoid G] [monoid H] :
(G ≃+ additive H) ≃ (multiplicative G ≃* H) :=
{ to_fun := λ f, ⟨f.to_add_monoid_hom.to_multiplicative'', f.symm.to_add_monoid_hom.to_multiplicative', f.3, f.4, f.5⟩,
inv_fun := λ f, ⟨f.to_monoid_hom, f.symm.to_monoid_hom, f.3, f.4, f.5⟩,
left_inv := λ x, by { ext, refl, },
right_inv := λ x, by { ext, refl, }, }
/-- Reinterpret `multiplicative G ≃* H` as `G ≃+ additive H` as. -/
def mul_equiv.to_additive'' [add_monoid G] [monoid H] :
(multiplicative G ≃* H) ≃ (G ≃+ additive H) :=
add_equiv.to_multiplicative''.symm
end type_tags
|
adbdb65117fa04178796c9a8aa29444a6bd2faa3
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/library/data/int/countable.lean
|
58f6a6b3eb39d8c70bdc5fc9a2411ed1c6277b64
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 1,209
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import data.equiv data.int.basic data.encodable data.countable
open equiv bool sum
namespace int
definition int_equiv_bool_nat : int ≃ (bool × nat) :=
equiv.mk
(λ i, match i with of_nat a := (tt, a) | neg_succ_of_nat a := (ff, a) end)
(λ p, match p with (tt, a) := of_nat a | (ff, a) := neg_succ_of_nat a end)
(λ i, begin cases i, repeat reflexivity end)
(λ p, begin cases p with b a, cases b, repeat reflexivity end)
definition int_equiv_nat : int ≃ nat :=
calc int ≃ (bool × nat) : int_equiv_bool_nat
... ≃ ((unit + unit) × nat) : prod_congr bool_equiv_unit_sum_unit !_root_.equiv.refl
... ≃ (unit × nat) + (unit × nat) : sum_prod_distrib
... ≃ nat + nat : sum_congr !prod_unit_left !prod_unit_left
... ≃ nat : nat_sum_nat_equiv_nat
definition encodable_int [instance] : encodable int :=
encodable_of_equiv (_root_.equiv.symm int_equiv_nat)
lemma countable_int : countable int :=
countable_of_encodable encodable_int
end int
|
4222ac4a8c41ad9439a322e20c60bc20af4c2415
|
82e44445c70db0f03e30d7be725775f122d72f3e
|
/src/topology/algebra/uniform_field.lean
|
ba66a15ce40cc15ea77f3404d9f932adcccb0b37
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/mathlib
|
51e286d19140e3788ef2c470bc7b953e4991f0c9
|
2568d41bca08f5d6bf39d915434c8447e21f42ee
|
refs/heads/master
| 1,631,748,053,501
| 1,627,938,886,000
| 1,627,938,886,000
| 228,728,358
| 0
| 0
|
Apache-2.0
| 1,576,630,588,000
| 1,576,630,587,000
| null |
UTF-8
|
Lean
| false
| false
| 6,605
|
lean
|
/-
Copyright (c) 2019 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot
-/
import topology.algebra.uniform_ring
import topology.algebra.field
/-!
# Completion of topological fields
The goal of this file is to prove the main part of Proposition 7 of Bourbaki GT III 6.8 :
The completion `hat K` of a Hausdorff topological field is a field if the image under
the mapping `x ↦ x⁻¹` of every Cauchy filter (with respect to the additive uniform structure)
which does not have a cluster point at `0` is a Cauchy filter
(with respect to the additive uniform structure).
Bourbaki does not give any detail here, he refers to the general discussion of extending
functions defined on a dense subset with values in a complete Hausdorff space. In particular
the subtlety about clustering at zero is totally left to readers.
Note that the separated completion of a non-separated topological field is the zero ring, hence
the separation assumption is needed. Indeed the kernel of the completion map is the closure of
zero which is an ideal. Hence it's either zero (and the field is separated) or the full field,
which implies one is sent to zero and the completion ring is trivial.
The main definition is `completable_top_field` which packages the assumptions as a Prop-valued
type class and the main results are the instances `field_completion` and
`topological_division_ring_completion`.
-/
noncomputable theory
open_locale classical uniformity topological_space
open set uniform_space uniform_space.completion filter
variables (K : Type*) [field K] [uniform_space K]
local notation `hat` := completion
@[priority 100]
instance [separated_space K] : nontrivial (hat K) :=
⟨⟨0, 1, λ h, zero_ne_one $ (uniform_embedding_coe K).inj h⟩⟩
/--
A topological field is completable if it is separated and the image under
the mapping x ↦ x⁻¹ of every Cauchy filter (with respect to the additive uniform structure)
which does not have a cluster point at 0 is a Cauchy filter
(with respect to the additive uniform structure). This ensures the completion is
a field.
-/
class completable_top_field extends separated_space K : Prop :=
(nice : ∀ F : filter K, cauchy F → 𝓝 0 ⊓ F = ⊥ → cauchy (map (λ x, x⁻¹) F))
variables {K}
/-- extension of inversion to the completion of a field. -/
def hat_inv : hat K → hat K := dense_inducing_coe.extend (λ x : K, (coe x⁻¹ : hat K))
lemma continuous_hat_inv [completable_top_field K] {x : hat K} (h : x ≠ 0) :
continuous_at hat_inv x :=
begin
haveI : regular_space (hat K) := completion.regular_space K,
refine dense_inducing_coe.continuous_at_extend _,
apply mem_sets_of_superset (compl_singleton_mem_nhds h),
intros y y_ne,
rw mem_compl_singleton_iff at y_ne,
apply complete_space.complete,
rw ← filter.map_map,
apply cauchy.map _ (completion.uniform_continuous_coe K),
apply completable_top_field.nice,
{ haveI := dense_inducing_coe.comap_nhds_ne_bot y,
apply cauchy_nhds.comap,
{ rw completion.comap_coe_eq_uniformity,
exact le_rfl } },
{ have eq_bot : 𝓝 (0 : hat K) ⊓ 𝓝 y = ⊥,
{ by_contradiction h,
exact y_ne (eq_of_nhds_ne_bot $ ne_bot_iff.mpr h).symm },
erw [dense_inducing_coe.nhds_eq_comap (0 : K), ← comap_inf, eq_bot],
exact comap_bot },
end
/-
The value of `hat_inv` at zero is not really specified, although it's probably zero.
Here we explicitly enforce the `inv_zero` axiom.
-/
instance completion.has_inv : has_inv (hat K) := ⟨λ x, if x = 0 then 0 else hat_inv x⟩
variables [topological_division_ring K]
lemma hat_inv_extends {x : K} (h : x ≠ 0) : hat_inv (x : hat K) = coe (x⁻¹ : K) :=
dense_inducing_coe.extend_eq_at _
((continuous_coe K).continuous_at.comp (topological_division_ring.continuous_inv x h))
variables [completable_top_field K]
@[norm_cast]
lemma coe_inv (x : K) : (x : hat K)⁻¹ = ((x⁻¹ : K) : hat K) :=
begin
by_cases h : x = 0,
{ rw [h, inv_zero],
dsimp [has_inv.inv],
norm_cast,
simp [if_pos] },
{ conv_lhs { dsimp [has_inv.inv] },
norm_cast,
rw if_neg,
{ exact hat_inv_extends h },
{ exact λ H, h (dense_embedding_coe.inj H) } }
end
variables [uniform_add_group K] [topological_ring K]
lemma mul_hat_inv_cancel {x : hat K} (x_ne : x ≠ 0) : x*hat_inv x = 1 :=
begin
haveI : t1_space (hat K) := t2_space.t1_space,
let f := λ x : hat K, x*hat_inv x,
let c := (coe : K → hat K),
change f x = 1,
have cont : continuous_at f x,
{ letI : topological_space (hat K × hat K) := prod.topological_space,
have : continuous_at (λ y : hat K, ((y, hat_inv y) : hat K × hat K)) x,
from continuous_id.continuous_at.prod (continuous_hat_inv x_ne),
exact (_root_.continuous_mul.continuous_at.comp this : _) },
have clo : x ∈ closure (c '' {0}ᶜ),
{ have := dense_inducing_coe.dense x,
rw [← image_univ, show (univ : set K) = {0} ∪ {0}ᶜ,
from (union_compl_self _).symm, image_union] at this,
apply mem_closure_of_mem_closure_union this,
rw image_singleton,
exact compl_singleton_mem_nhds x_ne },
have fxclo : f x ∈ closure (f '' (c '' {0}ᶜ)) := mem_closure_image cont clo,
have : f '' (c '' {0}ᶜ) ⊆ {1},
{ rw image_image,
rintros _ ⟨z, z_ne, rfl⟩,
rw mem_singleton_iff,
rw mem_compl_singleton_iff at z_ne,
dsimp [c, f],
rw hat_inv_extends z_ne,
norm_cast,
rw mul_inv_cancel z_ne,
norm_cast },
replace fxclo := closure_mono this fxclo,
rwa [closure_singleton, mem_singleton_iff] at fxclo
end
instance field_completion : field (hat K) :=
{ exists_pair_ne := ⟨0, 1, λ h, zero_ne_one ((uniform_embedding_coe K).inj h)⟩,
mul_inv_cancel := λ x x_ne, by { dsimp [has_inv.inv],
simp [if_neg x_ne, mul_hat_inv_cancel x_ne], },
inv_zero := show ((0 : K) : hat K)⁻¹ = ((0 : K) : hat K), by rw [coe_inv, inv_zero],
..completion.has_inv,
..(by apply_instance : comm_ring (hat K)) }
instance topological_division_ring_completion : topological_division_ring (hat K) :=
{ continuous_inv := begin
intros x x_ne,
have : {y | hat_inv y = y⁻¹ } ∈ 𝓝 x,
{ have : {(0 : hat K)}ᶜ ⊆ {y : hat K | hat_inv y = y⁻¹ },
{ intros y y_ne,
rw mem_compl_singleton_iff at y_ne,
dsimp [has_inv.inv],
rw if_neg y_ne },
exact mem_sets_of_superset (compl_singleton_mem_nhds x_ne) this },
exact continuous_at.congr (continuous_hat_inv x_ne) this
end,
..completion.top_ring_compl }
|
ab7fc2de1ebc2c122be89a4af0c04dc5b34d1ecf
|
9be442d9ec2fcf442516ed6e9e1660aa9071b7bd
|
/tests/lean/run/nativeReflBackdoor.lean
|
08ad3f7da4a7bd49b0d4445f3d3e15aa02c8fa11
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
EdAyers/lean4
|
57ac632d6b0789cb91fab2170e8c9e40441221bd
|
37ba0df5841bde51dbc2329da81ac23d4f6a4de4
|
refs/heads/master
| 1,676,463,245,298
| 1,660,619,433,000
| 1,660,619,433,000
| 183,433,437
| 1
| 0
|
Apache-2.0
| 1,657,612,672,000
| 1,556,196,574,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 1,006
|
lean
|
--
/-
This example demonstratea that when we are using `native_decide`,
we are also trusting the correctness of `implementedBy` annotations,
foreign functions (i.e., `[extern]` annotations), etc.
-/
def g (b : Bool) := false
/-
The following `implementedBy` is telling the compiler
"trust me, `g` does implement `f`"
which is clearly false in this example.
-/
@[implementedBy g]
def f (b : Bool) := b
theorem fConst (b : Bool) : f b = false :=
match b with
| true =>
/- The following `native_decide` is going to use `g` to evaluate `f`
because of the `implementedBy` directive. -/
have : (f true) = false := by native_decide
this
| false => rfl
theorem trueEqFalse : true = false :=
have h₁ : f true = true := rfl;
have h₂ : f true = false := fConst true;
Eq.trans h₁.symm h₂
/-
We managed to prove `False` using the unsound annotation `implementedBy` above.
-/
theorem unsound : False :=
Bool.noConfusion trueEqFalse
#print axioms unsound -- axiom 'Lean.ofReduceBool' is listed
|
ac0b891859ebc426dff3edb9c2f116795b40b351
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/group_theory/specific_groups/quaternion.lean
|
8efd4b943114462dfdac2e3c7a7796ecb3092d85
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 9,140
|
lean
|
/-
Copyright (c) 2021 Julian Kuelshammer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Julian Kuelshammer
-/
import data.zmod.basic
import data.nat.basic
import tactic.interval_cases
import group_theory.specific_groups.dihedral
import group_theory.specific_groups.cyclic
/-!
# Quaternion Groups
We define the (generalised) quaternion groups `quaternion_group n` of order `4n`, also known as
dicyclic groups, with elements `a i` and `xa i` for `i : zmod n`. The (generalised) quaternion
groups can be defined by the presentation
$\langle a, x | a^{2n} = 1, x^2 = a^n, x^{-1}ax=a^{-1}\rangle$. We write `a i` for
$a^i$ and `xa i` for $x * a^i$. For `n=2` the quaternion group `quaternion_group 2` is isomorphic to
the unit integral quaternions `(quaternion ℤ)ˣ`.
## Main definition
`quaternion_group n`: The (generalised) quaternion group of order `4n`.
## Implementation notes
This file is heavily based on `dihedral_group` by Shing Tak Lam.
In mathematics, the name "quaternion group" is reserved for the cases `n ≥ 2`. Since it would be
inconvenient to carry around this condition we define `quaternion_group` also for `n = 0` and
`n = 1`. `quaternion_group 0` is isomorphic to the infinite dihedral group, while
`quaternion_group 1` is isomorphic to a cyclic group of order `4`.
## References
* https://en.wikipedia.org/wiki/Dicyclic_group
* https://en.wikipedia.org/wiki/Quaternion_group
## TODO
Show that `quaternion_group 2 ≃* (quaternion ℤ)ˣ`.
-/
/--
The (generalised) quaternion group `quaternion_group n` of order `4n`. It can be defined by the
presentation $\langle a, x | a^{2n} = 1, x^2 = a^n, x^{-1}ax=a^{-1}\rangle$. We write `a i` for
$a^i$ and `xa i` for $x * a^i$.
-/
@[derive decidable_eq]
inductive quaternion_group (n : ℕ) : Type
| a : zmod (2 * n) → quaternion_group
| xa : zmod (2 * n) → quaternion_group
namespace quaternion_group
variables {n : ℕ}
/--
Multiplication of the dihedral group.
-/
private def mul : quaternion_group n → quaternion_group n → quaternion_group n
| (a i) (a j) := a (i + j)
| (a i) (xa j) := xa (j - i)
| (xa i) (a j) := xa (i + j)
| (xa i) (xa j) := a (n + j - i)
/--
The identity `1` is given by `aⁱ`.
-/
private def one : quaternion_group n := a 0
instance : inhabited (quaternion_group n) := ⟨one⟩
/--
The inverse of an element of the quaternion group.
-/
private def inv : quaternion_group n → quaternion_group n
| (a i) := a (-i)
| (xa i) := xa (n + i)
/--
The group structure on `quaternion_group n`.
-/
instance : group (quaternion_group n) :=
{ mul := mul,
mul_assoc :=
begin
rintros (i | i) (j | j) (k | k);
simp only [mul];
abel,
simp only [neg_mul, one_mul, int.cast_one, zsmul_eq_mul, int.cast_neg,
add_right_inj],
calc -(n : zmod (2 * n)) = 0 - n : by rw zero_sub
... = 2 * n - n : by { norm_cast, simp, }
... = n : by ring
end,
one := one,
one_mul :=
begin
rintros (i | i),
{ exact congr_arg a (zero_add i) },
{ exact congr_arg xa (sub_zero i) },
end,
mul_one := begin
rintros (i | i),
{ exact congr_arg a (add_zero i) },
{ exact congr_arg xa (add_zero i) },
end,
inv := inv,
mul_left_inv := begin
rintros (i | i),
{ exact congr_arg a (neg_add_self i) },
{ exact congr_arg a (sub_self (n + i)) },
end }
variable {n}
@[simp] lemma a_mul_a (i j : zmod (2 * n)) : a i * a j = a (i + j) := rfl
@[simp] lemma a_mul_xa (i j : zmod (2 * n)) : a i * xa j = xa (j - i) := rfl
@[simp] lemma xa_mul_a (i j : zmod (2 * n)) : xa i * a j = xa (i + j) := rfl
@[simp] lemma xa_mul_xa (i j : zmod (2 * n)) : xa i * xa j = a (n + j - i) := rfl
lemma one_def : (1 : quaternion_group n) = a 0 := rfl
private def fintype_helper : (zmod (2 * n) ⊕ zmod (2 * n)) ≃ quaternion_group n :=
{ inv_fun := λ i, match i with
| (a j) := sum.inl j
| (xa j) := sum.inr j
end,
to_fun := λ i, match i with
| (sum.inl j) := a j
| (sum.inr j) := xa j
end,
left_inv := by rintro (x | x); refl,
right_inv := by rintro (x | x); refl }
/-- The special case that more or less by definition `quaternion_group 0` is isomorphic to the
infinite dihedral group. -/
def quaternion_group_zero_equiv_dihedral_group_zero : quaternion_group 0 ≃* dihedral_group 0 :=
{ to_fun := λ i, quaternion_group.rec_on i dihedral_group.r dihedral_group.sr,
inv_fun := λ i, match i with
| (dihedral_group.r j) := a j
| (dihedral_group.sr j) := xa j
end,
left_inv := by rintro (k | k); refl,
right_inv := by rintro (k | k); refl,
map_mul' := by { rintros (k | k) (l | l); { dsimp, simp, }, } }
/-- Some of the lemmas on `zmod m` require that `m` is positive, as `m = 2 * n` is the case relevant
in this file but we don't want to write `[fact (0 < 2 * n)]` we make this lemma a local instance. -/
private lemma succ_mul_pos_fact {m : ℕ} [hn : fact (0 < n)] : fact (0 < (nat.succ m) * n) :=
⟨nat.succ_mul_pos m hn.1⟩
local attribute [instance] succ_mul_pos_fact
/--
If `0 < n`, then `quaternion_group n` is a finite group.
-/
instance [fact (0 < n)] : fintype (quaternion_group n) := fintype.of_equiv _ fintype_helper
instance : nontrivial (quaternion_group n) := ⟨⟨a 0, xa 0, dec_trivial⟩⟩
/--
If `0 < n`, then `quaternion_group n` has `4n` elements.
-/
lemma card [fact (0 < n)] : fintype.card (quaternion_group n) = 4 * n :=
begin
rw [← fintype.card_eq.mpr ⟨fintype_helper⟩, fintype.card_sum, zmod.card, two_mul],
ring
end
@[simp] lemma a_one_pow (k : ℕ) : (a 1 : quaternion_group n) ^ k = a k :=
begin
induction k with k IH,
{ refl },
{ rw [pow_succ, IH, a_mul_a],
congr' 1,
norm_cast,
rw nat.one_add }
end
@[simp] lemma a_one_pow_n : (a 1 : quaternion_group n)^(2 * n) = 1 :=
begin
rw [a_one_pow, one_def],
congr' 1,
exact zmod.nat_cast_self _
end
@[simp] lemma xa_sq (i : zmod (2 * n)) : xa i ^ 2 = a n :=
begin
simp [sq]
end
@[simp] lemma xa_pow_four (i : zmod (2 * n)) : xa i ^ 4 = 1 :=
begin
simp only [pow_succ, sq, xa_mul_xa, xa_mul_a, add_sub_cancel, add_sub_assoc, add_sub_cancel',
sub_self, add_zero],
norm_cast,
rw ← two_mul,
simp [one_def],
end
/--
If `0 < n`, then `xa i` has order 4.
-/
@[simp] lemma order_of_xa [hpos : fact (0 < n)] (i : zmod (2 * n)) : order_of (xa i) = 4 :=
begin
change _ = 2^2,
haveI : fact(nat.prime 2) := fact.mk (nat.prime_two),
apply order_of_eq_prime_pow,
{ intro h,
simp only [pow_one, xa_sq] at h,
injection h with h',
apply_fun zmod.val at h',
apply_fun ( / n) at h',
simp only [zmod.val_nat_cast, zmod.val_zero, nat.zero_div, nat.mod_mul_left_div_self,
nat.div_self hpos.1] at h',
norm_num at h' },
{ norm_num }
end
/-- In the special case `n = 1`, `quaternion 1` is a cyclic group (of order `4`). -/
lemma quaternion_group_one_is_cyclic : is_cyclic (quaternion_group 1) :=
begin
apply is_cyclic_of_order_of_eq_card,
rw [card, mul_one],
exact order_of_xa 0
end
/--
If `0 < n`, then `a 1` has order `2 * n`.
-/
@[simp] lemma order_of_a_one : order_of (a 1 : quaternion_group n) = 2 * n :=
begin
rcases n.eq_zero_or_pos with rfl | hn,
{ simp_rw [mul_zero, order_of_eq_zero_iff'],
intros n hn,
rw [one_def, a_one_pow],
apply mt a.inj,
simpa using hn.ne' },
haveI := fact.mk hn,
apply (nat.le_of_dvd (nat.succ_mul_pos _ hn)
(order_of_dvd_of_pow_eq_one (@a_one_pow_n n))).lt_or_eq.resolve_left,
intro h,
have h1 : (a 1 : quaternion_group n)^(order_of (a 1)) = 1 := pow_order_of_eq_one _,
rw a_one_pow at h1,
injection h1 with h2,
rw [← zmod.val_eq_zero, zmod.val_nat_cast, nat.mod_eq_of_lt h] at h2,
exact absurd h2.symm (order_of_pos _).ne
end
/--
If `0 < n`, then `a i` has order `(2 * n) / gcd (2 * n) i`.
-/
lemma order_of_a [fact (0 < n)] (i : zmod (2 * n)) :
order_of (a i) = (2 * n) / nat.gcd (2 * n) i.val :=
begin
conv_lhs { rw ← zmod.nat_cast_zmod_val i },
rw [← a_one_pow, order_of_pow, order_of_a_one]
end
lemma exponent : monoid.exponent (quaternion_group n) = 2 * lcm n 2 :=
begin
rw [←normalize_eq 2, ←lcm_mul_left, normalize_eq],
norm_num,
rcases n.eq_zero_or_pos with rfl | hn,
{ simp only [lcm_zero_left, mul_zero],
exact monoid.exponent_eq_zero_of_order_zero order_of_a_one },
haveI := fact.mk hn,
apply nat.dvd_antisymm,
{ apply monoid.exponent_dvd_of_forall_pow_eq_one,
rintro (m | m),
{ rw [←order_of_dvd_iff_pow_eq_one, order_of_a],
refine nat.dvd_trans ⟨gcd (2 * n) m.val, _⟩ (dvd_lcm_left (2 * n) 4),
exact (nat.div_mul_cancel (nat.gcd_dvd_left (2 * n) (m.val))).symm },
{ rw [←order_of_dvd_iff_pow_eq_one, order_of_xa],
exact dvd_lcm_right (2 * n) 4 } },
{ apply lcm_dvd,
{ convert monoid.order_dvd_exponent (a 1),
exact order_of_a_one.symm },
{ convert monoid.order_dvd_exponent (xa 0),
exact (order_of_xa 0).symm } }
end
end quaternion_group
|
50872982b6465c4eb2d30f000ca13e912c863bfc
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/concrete_category/bundled.lean
|
99404f87943e6fdea0b021b582ac964ef516dc3b
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,096
|
lean
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johannes Hölzl, Reid Barton, Sean Leather
-/
import tactic.lint
/-!
# Bundled types
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> https://github.com/leanprover-community/mathlib4/pull/514
> Any changes to this file require a corresponding PR to mathlib4.
`bundled c` provides a uniform structure for bundling a type equipped with a type class.
We provide `category` instances for these in `category_theory/unbundled_hom.lean`
(for categories with unbundled homs, e.g. topological spaces)
and in `category_theory/bundled_hom.lean` (for categories with bundled homs, e.g. monoids).
-/
universes u v
namespace category_theory
variables {c d : Type u → Type v} {α : Type u}
/-- `bundled` is a type bundled with a type class instance for that type. Only
the type class is exposed as a parameter. -/
@[nolint has_nonempty_instance]
structure bundled (c : Type u → Type v) : Type (max (u+1) v) :=
(α : Type u)
(str : c α . tactic.apply_instance)
namespace bundled
/-- A generic function for lifting a type equipped with an instance to a bundled object. -/
-- Usually explicit instances will provide their own version of this, e.g. `Mon.of` and `Top.of`.
def of {c : Type u → Type v} (α : Type u) [str : c α] : bundled c := ⟨α, str⟩
instance : has_coe_to_sort (bundled c) (Type u) := ⟨bundled.α⟩
@[simp] lemma coe_mk (α) (str) : (@bundled.mk c α str : Type u) = α := rfl
/-
`bundled.map` is reducible so that, if we define a category
def Ring : Type (u+1) := induced_category SemiRing (bundled.map @ring.to_semiring)
instance search is able to "see" that a morphism R ⟶ S in Ring is really
a (semi)ring homomorphism from R.α to S.α, and not merely from
`(bundled.map @ring.to_semiring R).α` to `(bundled.map @ring.to_semiring S).α`.
-/
/-- Map over the bundled structure -/
@[reducible] def map (f : Π {α}, c α → d α) (b : bundled c) : bundled d :=
⟨b, f b.str⟩
end bundled
end category_theory
|
5ae9da51bc056d51997c6979e424e2ea3165f1ac
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/leanpkg/leanpkg/resolve.lean
|
fba53f8e1741ccda185bb4f458b8188d14acfa75
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 3,722
|
lean
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Gabriel Ebner
-/
import leanpkg.manifest system.io leanpkg.proc leanpkg.git
namespace leanpkg
def assignment := list (string × string)
namespace assignment
def empty : assignment := []
def find : assignment → string → option string
| [] s := none
| ((k, v) :: kvs) s :=
if k = s then some v else find kvs s
def contains (a : assignment) (s : string) : bool :=
(a.find s).is_some
def insert (a : assignment) (k v : string) : assignment :=
if a.contains k then a else (k, v) :: a
def fold {α} (i : α) (f : α → string → string → α) : assignment → α :=
list.foldl (λ a ⟨k, v⟩, f a k v) i
end assignment
@[reducible] def solver := state_t assignment io
instance {α : Type} : has_coe (io α) (solver α) := ⟨state_t.lift⟩
def not_yet_assigned (d : string) : solver bool := do
assg ← get,
return $ ¬ assg.contains d
def resolved_path (d : string) : solver string := do
assg ← get,
some path ← return (assg.find d) | io.fail "",
return path
-- TODO(gabriel): windows?
def resolve_dir (abs_or_rel : string) (base : string) : string :=
if abs_or_rel.front = '/' then
abs_or_rel -- absolute
else
base ++ "/" ++ abs_or_rel
def materialize (relpath : string) (dep : dependency) : solver punit :=
match dep.src with
| (source.path dir) := do
let depdir := resolve_dir dir relpath,
io.put_str_ln $ dep.name ++ ": using local path " ++ depdir,
modify $ λ assg, assg.insert dep.name depdir
| (source.git url rev branch) := do
let depdir := "_target/deps/" ++ dep.name,
already_there ← io.fs.dir_exists depdir,
if already_there then do {
io.put_str $ dep.name ++ ": trying to update " ++ depdir ++ " to revision " ++ rev,
io.put_str_ln $ match branch with | none := "" | some branch := "@" ++ branch end,
hash ← git_parse_origin_revision depdir rev,
rev_ex ← git_revision_exists depdir hash,
when (¬rev_ex) $
exec_cmd {cmd := "git", args := ["fetch"], cwd := depdir}
} else do {
io.put_str_ln $ dep.name ++ ": cloning " ++ url ++ " to " ++ depdir,
io.fs.mkdir depdir tt,
exec_cmd {cmd := "git", args := ["clone", url, depdir]}
},
hash ← git_parse_origin_revision depdir rev,
exec_cmd {cmd := "git", args := ["checkout", "--detach", hash], cwd := depdir},
modify $ λ assg, assg.insert dep.name depdir
end
def solve_deps_core : ∀ (rel_path : string) (d : manifest) (max_depth : ℕ), solver unit
| _ _ 0 := io.fail "maximum dependency resolution depth reached"
| relpath d (max_depth + 1) := do
deps ← monad.filter (not_yet_assigned ∘ dependency.name) d.dependencies,
deps.mmap' (materialize relpath),
deps.mmap' $ λ dep, do
p ← resolved_path dep.name,
d' ← manifest.from_file $ p ++ "/" ++ "leanpkg.toml",
when (d'.name ≠ dep.name) $
io.fail $ d.name ++ " (in " ++ relpath ++ ") depends on " ++ d'.name ++
", but resolved dependency has name " ++ dep.name ++ " (in " ++ p ++ ")",
solve_deps_core p d' max_depth
def solve_deps (d : manifest) : io assignment := do
(_, assg) ← (solve_deps_core "." d 1024).run $ assignment.empty.insert d.name ".",
return assg
def construct_path_core (depname : string) (dirname : string) : io (list string) :=
list.map (λ relpath, dirname ++ "/" ++ relpath) <$>
manifest.effective_path <$> (manifest.from_file $ dirname ++ "/" ++ leanpkg_toml_fn)
def construct_path (assg : assignment) : io (list string) := do
let assg := assg.fold [] (λ xs depname dirname, (depname, dirname) :: xs),
list.join <$> (assg.mmap $ λ ⟨depname, dirname⟩, construct_path_core depname dirname)
end leanpkg
|
2e612386591168c7bfa31472d6b94084379a75ae
|
55c7fc2bf55d496ace18cd6f3376e12bb14c8cc5
|
/src/data/finset/lattice.lean
|
6ba953b0088d5742970278f0ae82bbd4f158a0e5
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/mathlib
|
62de4ec6544bf3b79086afd27b6529acfaf2c1bb
|
8582b06b0a5d06c33ee07d0bdf7c646cae22cf36
|
refs/heads/master
| 1,669,494,854,016
| 1,595,692,409,000
| 1,595,692,409,000
| 272,046,630
| 0
| 0
|
Apache-2.0
| 1,592,066,143,000
| 1,592,066,142,000
| null |
UTF-8
|
Lean
| false
| false
| 20,156
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import data.finset.fold
import data.multiset.lattice
/-!
# Lattice operations on multisets
-/
variables {α β γ : Type*}
namespace finset
open multiset
/-! ### sup -/
section sup
variables [semilattice_sup_bot α]
/-- Supremum of a finite set: `sup {a, b, c} f = f a ⊔ f b ⊔ f c` -/
def sup (s : finset β) (f : β → α) : α := s.fold (⊔) ⊥ f
variables {s s₁ s₂ : finset β} {f : β → α}
lemma sup_def : s.sup f = (s.1.map f).sup := rfl
@[simp] lemma sup_empty : (∅ : finset β).sup f = ⊥ :=
fold_empty
@[simp] lemma sup_insert [decidable_eq β] {b : β} : (insert b s : finset β).sup f = f b ⊔ s.sup f :=
fold_insert_idem
@[simp] lemma sup_singleton {b : β} : ({b} : finset β).sup f = f b :=
sup_singleton
lemma sup_union [decidable_eq β] : (s₁ ∪ s₂).sup f = s₁.sup f ⊔ s₂.sup f :=
finset.induction_on s₁ (by rw [empty_union, sup_empty, bot_sup_eq]) $ λ a s has ih,
by rw [insert_union, sup_insert, sup_insert, ih, sup_assoc]
theorem sup_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a∈s₂, f a = g a) : s₁.sup f = s₂.sup g :=
by subst hs; exact finset.fold_congr hfg
@[simp] lemma sup_le_iff {a : α} : s.sup f ≤ a ↔ (∀b ∈ s, f b ≤ a) :=
begin
apply iff.trans multiset.sup_le,
simp only [multiset.mem_map, and_imp, exists_imp_distrib],
exact ⟨λ k b hb, k _ _ hb rfl, λ k a' b hb h, h ▸ k _ hb⟩,
end
lemma sup_le {a : α} : (∀b ∈ s, f b ≤ a) → s.sup f ≤ a :=
sup_le_iff.2
lemma le_sup {b : β} (hb : b ∈ s) : f b ≤ s.sup f :=
sup_le_iff.1 (le_refl _) _ hb
lemma sup_mono_fun {g : β → α} (h : ∀b∈s, f b ≤ g b) : s.sup f ≤ s.sup g :=
sup_le (λ b hb, le_trans (h b hb) (le_sup hb))
lemma sup_mono (h : s₁ ⊆ s₂) : s₁.sup f ≤ s₂.sup f :=
sup_le $ assume b hb, le_sup (h hb)
@[simp] lemma sup_lt_iff [is_total α (≤)] {a : α} (ha : ⊥ < a) :
s.sup f < a ↔ (∀b ∈ s, f b < a) :=
by letI := classical.dec_eq β; from
⟨ λh b hb, lt_of_le_of_lt (le_sup hb) h,
finset.induction_on s (by simp [ha]) (by simp {contextual := tt}) ⟩
lemma comp_sup_eq_sup_comp [semilattice_sup_bot γ] {s : finset β}
{f : β → α} (g : α → γ) (g_sup : ∀ x y, g (x ⊔ y) = g x ⊔ g y) (bot : g ⊥ = ⊥) :
g (s.sup f) = s.sup (g ∘ f) :=
by letI := classical.dec_eq β; from
finset.induction_on s (by simp [bot]) (by simp [g_sup] {contextual := tt})
lemma comp_sup_eq_sup_comp_of_is_total [is_total α (≤)] {γ : Type} [semilattice_sup_bot γ]
(g : α → γ) (mono_g : monotone g) (bot : g ⊥ = ⊥) : g (s.sup f) = s.sup (g ∘ f) :=
comp_sup_eq_sup_comp g mono_g.map_sup bot
/-- Computating `sup` in a subtype (closed under `sup`) is the same as computing it in `α`. -/
lemma sup_coe {P : α → Prop}
{Pbot : P ⊥} {Psup : ∀{{x y}}, P x → P y → P (x ⊔ y)}
(t : finset β) (f : β → {x : α // P x}) :
(@sup _ _ (subtype.semilattice_sup_bot Pbot Psup) t f : α) = t.sup (λ x, f x) :=
by { classical, rw [comp_sup_eq_sup_comp coe]; intros; refl }
theorem subset_range_sup_succ (s : finset ℕ) : s ⊆ range (s.sup id).succ :=
λ n hn, mem_range.2 $ nat.lt_succ_of_le $ le_sup hn
theorem exists_nat_subset_range (s : finset ℕ) : ∃n : ℕ, s ⊆ range n :=
⟨_, s.subset_range_sup_succ⟩
end sup
lemma sup_eq_supr [complete_lattice β] (s : finset α) (f : α → β) : s.sup f = (⨆a∈s, f a) :=
le_antisymm
(finset.sup_le $ assume a ha, le_supr_of_le a $ le_supr _ ha)
(supr_le $ assume a, supr_le $ assume ha, le_sup ha)
/-! ### inf -/
section inf
variables [semilattice_inf_top α]
/-- Infimum of a finite set: `inf {a, b, c} f = f a ⊓ f b ⊓ f c` -/
def inf (s : finset β) (f : β → α) : α := s.fold (⊓) ⊤ f
variables {s s₁ s₂ : finset β} {f : β → α}
lemma inf_def : s.inf f = (s.1.map f).inf := rfl
@[simp] lemma inf_empty : (∅ : finset β).inf f = ⊤ :=
fold_empty
lemma le_inf_iff {a : α} : a ≤ s.inf f ↔ ∀b ∈ s, a ≤ f b :=
@sup_le_iff (order_dual α) _ _ _ _ _
@[simp] lemma inf_insert [decidable_eq β] {b : β} : (insert b s : finset β).inf f = f b ⊓ s.inf f :=
fold_insert_idem
@[simp] lemma inf_singleton {b : β} : ({b} : finset β).inf f = f b :=
inf_singleton
lemma inf_union [decidable_eq β] : (s₁ ∪ s₂).inf f = s₁.inf f ⊓ s₂.inf f :=
@sup_union (order_dual α) _ _ _ _ _ _
theorem inf_congr {f g : β → α} (hs : s₁ = s₂) (hfg : ∀a∈s₂, f a = g a) : s₁.inf f = s₂.inf g :=
by subst hs; exact finset.fold_congr hfg
lemma inf_le {b : β} (hb : b ∈ s) : s.inf f ≤ f b :=
le_inf_iff.1 (le_refl _) _ hb
lemma le_inf {a : α} : (∀b ∈ s, a ≤ f b) → a ≤ s.inf f :=
le_inf_iff.2
lemma inf_mono_fun {g : β → α} (h : ∀b∈s, f b ≤ g b) : s.inf f ≤ s.inf g :=
le_inf (λ b hb, le_trans (inf_le hb) (h b hb))
lemma inf_mono (h : s₁ ⊆ s₂) : s₂.inf f ≤ s₁.inf f :=
le_inf $ assume b hb, inf_le (h hb)
lemma lt_inf_iff [h : is_total α (≤)] {a : α} (ha : a < ⊤) : a < s.inf f ↔ (∀b ∈ s, a < f b) :=
@sup_lt_iff (order_dual α) _ _ _ _ (@is_total.swap α _ h) _ ha
lemma comp_inf_eq_inf_comp [semilattice_inf_top γ] {s : finset β}
{f : β → α} (g : α → γ) (g_inf : ∀ x y, g (x ⊓ y) = g x ⊓ g y) (top : g ⊤ = ⊤) :
g (s.inf f) = s.inf (g ∘ f) :=
@comp_sup_eq_sup_comp (order_dual α) _ (order_dual γ) _ _ _ _ _ g_inf top
lemma comp_inf_eq_inf_comp_of_is_total [h : is_total α (≤)] {γ : Type} [semilattice_inf_top γ]
(g : α → γ) (mono_g : monotone g) (top : g ⊤ = ⊤) : g (s.inf f) = s.inf (g ∘ f) :=
comp_inf_eq_inf_comp g mono_g.map_inf top
/-- Computating `inf` in a subtype (closed under `inf`) is the same as computing it in `α`. -/
lemma inf_coe {P : α → Prop}
{Ptop : P ⊤} {Pinf : ∀{{x y}}, P x → P y → P (x ⊓ y)}
(t : finset β) (f : β → {x : α // P x}) :
(@inf _ _ (subtype.semilattice_inf_top Ptop Pinf) t f : α) = t.inf (λ x, f x) :=
by { classical, rw [comp_inf_eq_inf_comp coe]; intros; refl }
end inf
lemma inf_eq_infi [complete_lattice β] (s : finset α) (f : α → β) : s.inf f = (⨅a∈s, f a) :=
@sup_eq_supr _ (order_dual β) _ _ _
/-! ### max and min of finite sets -/
section max_min
variables [decidable_linear_order α]
/-- Let `s` be a finset in a linear order. Then `s.max` is the maximum of `s` if `s` is not empty,
and `none` otherwise. It belongs to `option α`. If you want to get an element of `α`, see
`s.max'`. -/
protected def max : finset α → option α :=
fold (option.lift_or_get max) none some
theorem max_eq_sup_with_bot (s : finset α) :
s.max = @sup (with_bot α) α _ s some := rfl
@[simp] theorem max_empty : (∅ : finset α).max = none := rfl
@[simp] theorem max_insert {a : α} {s : finset α} :
(insert a s).max = option.lift_or_get max (some a) s.max := fold_insert_idem
@[simp] theorem max_singleton {a : α} : finset.max {a} = some a :=
by { rw [← insert_emptyc_eq], exact max_insert }
theorem max_of_mem {s : finset α} {a : α} (h : a ∈ s) : ∃ b, b ∈ s.max :=
(@le_sup (with_bot α) _ _ _ _ _ h _ rfl).imp $ λ b, Exists.fst
theorem max_of_nonempty {s : finset α} (h : s.nonempty) : ∃ a, a ∈ s.max :=
let ⟨a, ha⟩ := h in max_of_mem ha
theorem max_eq_none {s : finset α} : s.max = none ↔ s = ∅ :=
⟨λ h, s.eq_empty_or_nonempty.elim id
(λ H, let ⟨a, ha⟩ := max_of_nonempty H in by rw h at ha; cases ha),
λ h, h.symm ▸ max_empty⟩
theorem mem_of_max {s : finset α} : ∀ {a : α}, a ∈ s.max → a ∈ s :=
finset.induction_on s (λ _ H, by cases H)
(λ b s _ (ih : ∀ {a}, a ∈ s.max → a ∈ s) a (h : a ∈ (insert b s).max),
begin
by_cases p : b = a,
{ induction p, exact mem_insert_self b s },
{ cases option.lift_or_get_choice max_choice (some b) s.max with q q;
rw [max_insert, q] at h,
{ cases h, cases p rfl },
{ exact mem_insert_of_mem (ih h) } }
end)
theorem le_max_of_mem {s : finset α} {a b : α} (h₁ : a ∈ s) (h₂ : b ∈ s.max) : a ≤ b :=
by rcases @le_sup (with_bot α) _ _ _ _ _ h₁ _ rfl with ⟨b', hb, ab⟩;
cases h₂.symm.trans hb; assumption
/-- Let `s` be a finset in a linear order. Then `s.min` is the minimum of `s` if `s` is not empty,
and `none` otherwise. It belongs to `option α`. If you want to get an element of `α`, see
`s.min'`. -/
protected def min : finset α → option α :=
fold (option.lift_or_get min) none some
theorem min_eq_inf_with_top (s : finset α) :
s.min = @inf (with_top α) α _ s some := rfl
@[simp] theorem min_empty : (∅ : finset α).min = none := rfl
@[simp] theorem min_insert {a : α} {s : finset α} :
(insert a s).min = option.lift_or_get min (some a) s.min :=
fold_insert_idem
@[simp] theorem min_singleton {a : α} : finset.min {a} = some a :=
by { rw ← insert_emptyc_eq, exact min_insert }
theorem min_of_mem {s : finset α} {a : α} (h : a ∈ s) : ∃ b, b ∈ s.min :=
(@inf_le (with_top α) _ _ _ _ _ h _ rfl).imp $ λ b, Exists.fst
theorem min_of_nonempty {s : finset α} (h : s.nonempty) : ∃ a, a ∈ s.min :=
let ⟨a, ha⟩ := h in min_of_mem ha
theorem min_eq_none {s : finset α} : s.min = none ↔ s = ∅ :=
⟨λ h, s.eq_empty_or_nonempty.elim id
(λ H, let ⟨a, ha⟩ := min_of_nonempty H in by rw h at ha; cases ha),
λ h, h.symm ▸ min_empty⟩
theorem mem_of_min {s : finset α} : ∀ {a : α}, a ∈ s.min → a ∈ s :=
finset.induction_on s (λ _ H, by cases H) $
λ b s _ (ih : ∀ {a}, a ∈ s.min → a ∈ s) a (h : a ∈ (insert b s).min),
begin
by_cases p : b = a,
{ induction p, exact mem_insert_self b s },
{ cases option.lift_or_get_choice min_choice (some b) s.min with q q;
rw [min_insert, q] at h,
{ cases h, cases p rfl },
{ exact mem_insert_of_mem (ih h) } }
end
theorem min_le_of_mem {s : finset α} {a b : α} (h₁ : b ∈ s) (h₂ : a ∈ s.min) : a ≤ b :=
by rcases @inf_le (with_top α) _ _ _ _ _ h₁ _ rfl with ⟨b', hb, ab⟩;
cases h₂.symm.trans hb; assumption
/-- Given a nonempty finset `s` in a linear order `α `, then `s.min' h` is its minimum, as an
element of `α`, where `h` is a proof of nonemptiness. Without this assumption, use instead `s.min`,
taking values in `option α`. -/
def min' (s : finset α) (H : s.nonempty) : α :=
@option.get _ s.min $
let ⟨k, hk⟩ := H in
let ⟨b, hb⟩ := min_of_mem hk in by simp at hb; simp [hb]
/-- Given a nonempty finset `s` in a linear order `α `, then `s.max' h` is its maximum, as an
element of `α`, where `h` is a proof of nonemptiness. Without this assumption, use instead `s.max`,
taking values in `option α`. -/
def max' (s : finset α) (H : s.nonempty) : α :=
@option.get _ s.max $
let ⟨k, hk⟩ := H in
let ⟨b, hb⟩ := max_of_mem hk in by simp at hb; simp [hb]
variables (s : finset α) (H : s.nonempty)
theorem min'_mem : s.min' H ∈ s := mem_of_min $ by simp [min']
theorem min'_le (x) (H2 : x ∈ s) : s.min' H ≤ x := min_le_of_mem H2 $ option.get_mem _
theorem le_min' (x) (H2 : ∀ y ∈ s, x ≤ y) : x ≤ s.min' H := H2 _ $ min'_mem _ _
theorem max'_mem : s.max' H ∈ s := mem_of_max $ by simp [max']
theorem le_max' (x) (H2 : x ∈ s) : x ≤ s.max' H := le_max_of_mem H2 $ option.get_mem _
theorem max'_le (x) (H2 : ∀ y ∈ s, y ≤ x) : s.max' H ≤ x := H2 _ $ max'_mem _ _
theorem min'_lt_max' {i j} (H1 : i ∈ s) (H2 : j ∈ s) (H3 : i ≠ j) : s.min' H < s.max' H :=
begin
rcases lt_trichotomy i j with H4 | H4 | H4,
{ have H5 := min'_le s H i H1,
have H6 := le_max' s H j H2,
apply lt_of_le_of_lt H5,
apply lt_of_lt_of_le H4 H6 },
{ cc },
{ have H5 := min'_le s H j H2,
have H6 := le_max' s H i H1,
apply lt_of_le_of_lt H5,
apply lt_of_lt_of_le H4 H6 }
end
/--
If there's more than 1 element, the min' is less than the max'. An alternate version of
`min'_lt_max'` which is sometimes more convenient.
-/
lemma min'_lt_max'_of_card (h₂ : 1 < card s) : s.min' H < s.max' H :=
begin
apply lt_of_not_ge,
intro a,
apply not_le_of_lt h₂ (le_of_eq _),
rw card_eq_one,
use max' s H,
rw eq_singleton_iff_unique_mem,
refine ⟨max'_mem _ _, λ t Ht, le_antisymm (le_max' s H t Ht) (le_trans a (min'_le s H t Ht))⟩,
end
end max_min
section exists_max_min
variables [linear_order α]
lemma exists_max_image (s : finset β) (f : β → α) (h : s.nonempty) :
∃ x ∈ s, ∀ x' ∈ s, f x' ≤ f x :=
begin
letI := classical.DLO α,
cases max_of_nonempty (h.image f) with y hy,
rcases mem_image.mp (mem_of_max hy) with ⟨x, hx, rfl⟩,
exact ⟨x, hx, λ x' hx', le_max_of_mem (mem_image_of_mem f hx') hy⟩,
end
lemma exists_min_image (s : finset β) (f : β → α) (h : s.nonempty) :
∃ x ∈ s, ∀ x' ∈ s, f x ≤ f x' :=
@exists_max_image (order_dual α) β _ s f h
end exists_max_min
end finset
namespace multiset
lemma count_sup [decidable_eq β] (s : finset α) (f : α → multiset β) (b : β) :
count b (s.sup f) = s.sup (λa, count b (f a)) :=
begin
letI := classical.dec_eq α,
refine s.induction _ _,
{ exact count_zero _ },
{ assume i s his ih,
rw [finset.sup_insert, sup_eq_union, count_union, finset.sup_insert, ih],
refl }
end
end multiset
section lattice
variables {ι : Type*} {ι' : Sort*} [complete_lattice α]
/-- Supremum of `s i`, `i : ι`, is equal to the supremum over `t : finset ι` of suprema
`⨆ i ∈ t, s i`. This version assumes `ι` is a `Type*`. See `supr_eq_supr_finset'` for a version
that works for `ι : Sort*`. -/
lemma supr_eq_supr_finset (s : ι → α) :
(⨆i, s i) = (⨆t:finset ι, ⨆i∈t, s i) :=
begin
classical,
exact le_antisymm
(supr_le $ assume b, le_supr_of_le {b} $ le_supr_of_le b $ le_supr_of_le
(by simp) $ le_refl _)
(supr_le $ assume t, supr_le $ assume b, supr_le $ assume hb, le_supr _ _)
end
/-- Supremum of `s i`, `i : ι`, is equal to the supremum over `t : finset ι` of suprema
`⨆ i ∈ t, s i`. This version works for `ι : Sort*`. See `supr_eq_supr_finset` for a version
that assumes `ι : Type*` but has no `plift`s. -/
lemma supr_eq_supr_finset' (s : ι' → α) :
(⨆i, s i) = (⨆t:finset (plift ι'), ⨆i∈t, s (plift.down i)) :=
by rw [← supr_eq_supr_finset, ← equiv.plift.surjective.supr_comp]; refl
/-- Infimum of `s i`, `i : ι`, is equal to the infimum over `t : finset ι` of infima
`⨆ i ∈ t, s i`. This version assumes `ι` is a `Type*`. See `infi_eq_infi_finset'` for a version
that works for `ι : Sort*`. -/
lemma infi_eq_infi_finset (s : ι → α) :
(⨅i, s i) = (⨅t:finset ι, ⨅i∈t, s i) :=
@supr_eq_supr_finset (order_dual α) _ _ _
/-- Infimum of `s i`, `i : ι`, is equal to the infimum over `t : finset ι` of infima
`⨆ i ∈ t, s i`. This version works for `ι : Sort*`. See `infi_eq_infi_finset` for a version
that assumes `ι : Type*` but has no `plift`s. -/
lemma infi_eq_infi_finset' (s : ι' → α) :
(⨅i, s i) = (⨅t:finset (plift ι'), ⨅i∈t, s (plift.down i)) :=
@supr_eq_supr_finset' (order_dual α) _ _ _
end lattice
namespace set
variables {ι : Type*} {ι' : Sort*}
/-- Union of an indexed family of sets `s : ι → set α` is equal to the union of the unions
of finite subfamilies. This version assumes `ι : Type*`. See also `Union_eq_Union_finset'` for
a version that works for `ι : Sort*`. -/
lemma Union_eq_Union_finset (s : ι → set α) :
(⋃i, s i) = (⋃t:finset ι, ⋃i∈t, s i) :=
supr_eq_supr_finset s
/-- Union of an indexed family of sets `s : ι → set α` is equal to the union of the unions
of finite subfamilies. This version works for `ι : Sort*`. See also `Union_eq_Union_finset` for
a version that assumes `ι : Type*` but avoids `plift`s in the right hand side. -/
lemma Union_eq_Union_finset' (s : ι' → set α) :
(⋃i, s i) = (⋃t:finset (plift ι'), ⋃i∈t, s (plift.down i)) :=
supr_eq_supr_finset' s
/-- Intersection of an indexed family of sets `s : ι → set α` is equal to the intersection of the
intersections of finite subfamilies. This version assumes `ι : Type*`. See also
`Inter_eq_Inter_finset'` for a version that works for `ι : Sort*`. -/
lemma Inter_eq_Inter_finset (s : ι → set α) :
(⋂i, s i) = (⋂t:finset ι, ⋂i∈t, s i) :=
infi_eq_infi_finset s
/-- Intersection of an indexed family of sets `s : ι → set α` is equal to the intersection of the
intersections of finite subfamilies. This version works for `ι : Sort*`. See also
`Inter_eq_Inter_finset` for a version that assumes `ι : Type*` but avoids `plift`s in the right
hand side. -/
lemma Inter_eq_Inter_finset' (s : ι' → set α) :
(⋂i, s i) = (⋂t:finset (plift ι'), ⋂i∈t, s (plift.down i)) :=
infi_eq_infi_finset' s
end set
namespace finset
/-! ### Interaction with big lattice/set operations -/
section lattice
lemma supr_coe [has_Sup β] (f : α → β) (s : finset α) :
(⨆ x ∈ (↑s : set α), f x) = ⨆ x ∈ s, f x :=
rfl
lemma infi_coe [has_Inf β] (f : α → β) (s : finset α) :
(⨅ x ∈ (↑s : set α), f x) = ⨅ x ∈ s, f x :=
rfl
variables [complete_lattice β]
theorem supr_singleton (a : α) (s : α → β) : (⨆ x ∈ ({a} : finset α), s x) = s a :=
by simp
theorem infi_singleton (a : α) (s : α → β) : (⨅ x ∈ ({a} : finset α), s x) = s a :=
by simp
variables [decidable_eq α]
theorem supr_union {f : α → β} {s t : finset α} :
(⨆ x ∈ s ∪ t, f x) = (⨆x∈s, f x) ⊔ (⨆x∈t, f x) :=
by simp [supr_or, supr_sup_eq]
theorem infi_union {f : α → β} {s t : finset α} :
(⨅ x ∈ s ∪ t, f x) = (⨅ x ∈ s, f x) ⊓ (⨅ x ∈ t, f x) :=
by simp [infi_or, infi_inf_eq]
lemma supr_insert (a : α) (s : finset α) (t : α → β) :
(⨆ x ∈ insert a s, t x) = t a ⊔ (⨆ x ∈ s, t x) :=
by { rw insert_eq, simp only [supr_union, finset.supr_singleton] }
lemma infi_insert (a : α) (s : finset α) (t : α → β) :
(⨅ x ∈ insert a s, t x) = t a ⊓ (⨅ x ∈ s, t x) :=
by { rw insert_eq, simp only [infi_union, finset.infi_singleton] }
lemma supr_finset_image {f : γ → α} {g : α → β} {s : finset γ} :
(⨆ x ∈ s.image f, g x) = (⨆ y ∈ s, g (f y)) :=
by rw [← supr_coe, coe_image, supr_image, supr_coe]
lemma infi_finset_image {f : γ → α} {g : α → β} {s : finset γ} :
(⨅ x ∈ s.image f, g x) = (⨅ y ∈ s, g (f y)) :=
by rw [← infi_coe, coe_image, infi_image, infi_coe]
end lattice
@[simp] theorem bUnion_coe (s : finset α) (t : α → set β) :
(⋃ x ∈ (↑s : set α), t x) = ⋃ x ∈ s, t x :=
rfl
@[simp] theorem bInter_coe (s : finset α) (t : α → set β) :
(⋂ x ∈ (↑s : set α), t x) = ⋂ x ∈ s, t x :=
rfl
@[simp] theorem bUnion_singleton (a : α) (s : α → set β) : (⋃ x ∈ ({a} : finset α), s x) = s a :=
supr_singleton a s
@[simp] theorem bInter_singleton (a : α) (s : α → set β) : (⋂ x ∈ ({a} : finset α), s x) = s a :=
infi_singleton a s
@[simp] lemma bUnion_preimage_singleton (f : α → β) (s : finset β) :
(⋃ y ∈ s, f ⁻¹' {y}) = f ⁻¹' ↑s :=
set.bUnion_preimage_singleton f ↑s
variables [decidable_eq α]
lemma bUnion_union (s t : finset α) (u : α → set β) :
(⋃ x ∈ s ∪ t, u x) = (⋃ x ∈ s, u x) ∪ (⋃ x ∈ t, u x) :=
supr_union
lemma bInter_inter (s t : finset α) (u : α → set β) :
(⋂ x ∈ s ∪ t, u x) = (⋂ x ∈ s, u x) ∩ (⋂ x ∈ t, u x) :=
infi_union
@[simp] lemma bUnion_insert (a : α) (s : finset α) (t : α → set β) :
(⋃ x ∈ insert a s, t x) = t a ∪ (⋃ x ∈ s, t x) :=
supr_insert a s t
@[simp] lemma bInter_insert (a : α) (s : finset α) (t : α → set β) :
(⋂ x ∈ insert a s, t x) = t a ∩ (⋂ x ∈ s, t x) :=
infi_insert a s t
@[simp] lemma bUnion_finset_image {f : γ → α} {g : α → set β} {s : finset γ} :
(⋃x ∈ s.image f, g x) = (⋃y ∈ s, g (f y)) :=
supr_finset_image
@[simp] lemma bInter_finset_image {f : γ → α} {g : α → set β} {s : finset γ} :
(⋂ x ∈ s.image f, g x) = (⋂ y ∈ s, g (f y)) :=
infi_finset_image
end finset
|
780405519a0ddacea42d2209f2862e9857a12848
|
63abd62053d479eae5abf4951554e1064a4c45b4
|
/src/geometry/manifold/times_cont_mdiff.lean
|
f3d9b91f5b321a2825fcd4517e2396a54dde0017
|
[
"Apache-2.0"
] |
permissive
|
Lix0120/mathlib
|
0020745240315ed0e517cbf32e738d8f9811dd80
|
e14c37827456fc6707f31b4d1d16f1f3a3205e91
|
refs/heads/master
| 1,673,102,855,024
| 1,604,151,044,000
| 1,604,151,044,000
| 308,930,245
| 0
| 0
|
Apache-2.0
| 1,604,164,710,000
| 1,604,163,547,000
| null |
UTF-8
|
Lean
| false
| false
| 75,110
|
lean
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import geometry.manifold.mfderiv
import geometry.manifold.local_invariant_properties
/-!
# Smooth functions between smooth manifolds
We define `Cⁿ` functions between smooth manifolds, as functions which are `Cⁿ` in charts, and prove
basic properties of these notions.
## Main definitions and statements
Let `M ` and `M'` be two smooth manifolds, with respect to model with corners `I` and `I'`. Let
`f : M → M'`.
* `times_cont_mdiff_within_at I I' n f s x` states that the function `f` is `Cⁿ` within the set `s`
around the point `x`.
* `times_cont_mdiff_at I I' n f x` states that the function `f` is `Cⁿ` around `x`.
* `times_cont_mdiff_on I I' n f s` states that the function `f` is `Cⁿ` on the set `s`
* `times_cont_mdiff I I' n f` states that the function `f` is `Cⁿ`.
* `times_cont_mdiff_on.comp` gives the invariance of the `Cⁿ` property under composition
* `times_cont_mdiff_on.times_cont_mdiff_on_tangent_map_within` states that the bundled derivative
of a `Cⁿ` function in a domain is `Cᵐ` when `m + 1 ≤ n`.
* `times_cont_mdiff.times_cont_mdiff_tangent_map` states that the bundled derivative
of a `Cⁿ` function is `Cᵐ` when `m + 1 ≤ n`.
* `times_cont_mdiff_iff_times_cont_diff` states that, for functions between vector spaces,
manifold-smoothness is equivalent to usual smoothness.
We also give many basic properties of smooth functions between manifolds, following the API of
smooth functions between vector spaces.
## Implementation details
Many properties follow for free from the corresponding properties of functions in vector spaces,
as being `Cⁿ` is a local property invariant under the smooth groupoid. We take advantage of the
general machinery developed in `local_invariant_properties.lean` to get these properties
automatically. For instance, the fact that being `Cⁿ` does not depend on the chart one considers
is given by `lift_prop_within_at_indep_chart`.
For this to work, the definition of `times_cont_mdiff_within_at` and friends has to
follow definitionally the setup of local invariant properties. Still, we recast the definition
in terms of extended charts in `times_cont_mdiff_on_iff` and `times_cont_mdiff_iff`.
-/
open set charted_space smooth_manifold_with_corners
open_locale topological_space manifold
/-! ### Definition of smooth functions between manifolds -/
variables {𝕜 : Type*} [nondiscrete_normed_field 𝕜]
-- declare a smooth manifold `M` over the pair `(E, H)`.
{E : Type*} [normed_group E] [normed_space 𝕜 E]
{H : Type*} [topological_space H] (I : model_with_corners 𝕜 E H)
{M : Type*} [topological_space M] [charted_space H M] [Is : smooth_manifold_with_corners I M]
-- declare a smooth manifold `M'` over the pair `(E', H')`.
{E' : Type*} [normed_group E'] [normed_space 𝕜 E']
{H' : Type*} [topological_space H'] (I' : model_with_corners 𝕜 E' H')
{M' : Type*} [topological_space M'] [charted_space H' M'] [I's : smooth_manifold_with_corners I' M']
-- declare a smooth manifold `N` over the pair `(F, G)`.
{F : Type*} [normed_group F] [normed_space 𝕜 F]
{G : Type*} [topological_space G] {J : model_with_corners 𝕜 F G}
{N : Type*} [topological_space N] [charted_space G N] [Js : smooth_manifold_with_corners J N]
-- declare a smooth manifold `N'` over the pair `(F', G')`.
{F' : Type*} [normed_group F'] [normed_space 𝕜 F']
{G' : Type*} [topological_space G'] {J' : model_with_corners 𝕜 F' G'}
{N' : Type*} [topological_space N'] [charted_space G' N'] [J's : smooth_manifold_with_corners J' N']
-- declare functions, sets, points and smoothness indices
{f f₁ : M → M'} {s s₁ t : set M} {x : M} {m n : with_top ℕ}
/-- Property in the model space of a model with corners of being `C^n` within at set at a point,
when read in the model vector space. This property will be lifted to manifolds to define smooth
functions between manifolds. -/
def times_cont_diff_within_at_prop (n : with_top ℕ) (f s x) : Prop :=
times_cont_diff_within_at 𝕜 n (I' ∘ f ∘ I.symm) (range I ∩ I.symm ⁻¹' s) (I x)
/-- Being `Cⁿ` in the model space is a local property, invariant under smooth maps. Therefore,
it will lift nicely to manifolds. -/
lemma times_cont_diff_within_at_local_invariant_prop (n : with_top ℕ) :
(times_cont_diff_groupoid ∞ I).local_invariant_prop (times_cont_diff_groupoid ∞ I')
(times_cont_diff_within_at_prop I I' n) :=
{ is_local :=
begin
assume s x u f u_open xu,
have : range I ∩ I.symm ⁻¹' (s ∩ u) = (range I ∩ I.symm ⁻¹' s) ∩ I.symm ⁻¹' u,
by simp only [inter_assoc, preimage_inter],
rw [times_cont_diff_within_at_prop, times_cont_diff_within_at_prop, this],
symmetry,
apply times_cont_diff_within_at_inter,
have : u ∈ 𝓝 (I.symm (I x)),
by { rw [model_with_corners.left_inv], exact mem_nhds_sets u_open xu },
apply continuous_at.preimage_mem_nhds I.continuous_symm.continuous_at this,
end,
right_invariance :=
begin
assume s x f e he hx h,
rw times_cont_diff_within_at_prop at h ⊢,
have : I x = (I ∘ e.symm ∘ I.symm) (I (e x)), by simp only [hx] with mfld_simps,
rw this at h,
have : I (e x) ∈ (I.symm) ⁻¹' e.target ∩ range ⇑I, by simp only [hx] with mfld_simps,
have := ((mem_groupoid_of_pregroupoid.2 he).2.times_cont_diff_within_at this).of_le le_top,
convert h.comp' _ this using 1,
{ ext y, simp only with mfld_simps },
{ mfld_set_tac }
end,
congr :=
begin
assume s x f g h hx hf,
apply hf.congr,
{ assume y hy,
simp only with mfld_simps at hy,
simp only [h, hy] with mfld_simps },
{ simp only [hx] with mfld_simps }
end,
left_invariance :=
begin
assume s x f e' he' hs hx h,
rw times_cont_diff_within_at_prop at h ⊢,
have A : (I' ∘ f ∘ I.symm) (I x) ∈ (I'.symm ⁻¹' e'.source ∩ range I'),
by simp only [hx] with mfld_simps,
have := ((mem_groupoid_of_pregroupoid.2 he').1.times_cont_diff_within_at A).of_le le_top,
convert this.comp _ h _,
{ ext y, simp only with mfld_simps },
{ assume y hy, simp only with mfld_simps at hy, simpa only [hy] with mfld_simps using hs hy.2 }
end }
lemma times_cont_diff_within_at_local_invariant_prop_mono (n : with_top ℕ)
⦃s x t⦄ ⦃f : H → H'⦄ (hts : t ⊆ s) (h : times_cont_diff_within_at_prop I I' n f s x) :
times_cont_diff_within_at_prop I I' n f t x :=
begin
apply h.mono (λ y hy, _),
simp only with mfld_simps at hy,
simp only [hy, hts _] with mfld_simps
end
lemma times_cont_diff_within_at_local_invariant_prop_id (x : H) :
times_cont_diff_within_at_prop I I ∞ id univ x :=
begin
simp [times_cont_diff_within_at_prop],
have : times_cont_diff_within_at 𝕜 ∞ id (range I) (I x) :=
times_cont_diff_id.times_cont_diff_at.times_cont_diff_within_at,
apply this.congr (λ y hy, _),
{ simp only with mfld_simps },
{ simp only [model_with_corners.right_inv I hy] with mfld_simps }
end
/-- A function is `n` times continuously differentiable within a set at a point in a manifold if
it is continuous and it is `n` times continuously differentiable in this set around this point, when
read in the preferred chart at this point. -/
def times_cont_mdiff_within_at (n : with_top ℕ) (f : M → M') (s : set M) (x : M) :=
lift_prop_within_at (times_cont_diff_within_at_prop I I' n) f s x
/-- Abbreviation for `times_cont_mdiff_within_at I I' ⊤ f s x`. See also documentation for `smooth`.
-/
@[reducible] def smooth_within_at (f : M → M') (s : set M) (x : M) :=
times_cont_mdiff_within_at I I' ⊤ f s x
/-- A function is `n` times continuously differentiable at a point in a manifold if
it is continuous and it is `n` times continuously differentiable around this point, when
read in the preferred chart at this point. -/
def times_cont_mdiff_at (n : with_top ℕ) (f : M → M') (x : M) :=
times_cont_mdiff_within_at I I' n f univ x
/-- Abbreviation for `times_cont_mdiff_at I I' ⊤ f x`. See also documentation for `smooth`. -/
@[reducible] def smooth_at (f : M → M') (x : M) := times_cont_mdiff_at I I' ⊤ f x
/-- A function is `n` times continuously differentiable in a set of a manifold if it is continuous
and, for any pair of points, it is `n` times continuously differentiable on this set in the charts
around these points. -/
def times_cont_mdiff_on (n : with_top ℕ) (f : M → M') (s : set M) :=
∀ x ∈ s, times_cont_mdiff_within_at I I' n f s x
/-- Abbreviation for `times_cont_mdiff_on I I' ⊤ f s`. See also documentation for `smooth`. -/
@[reducible] def smooth_on (f : M → M') (s : set M) := times_cont_mdiff_on I I' ⊤ f s
/-- A function is `n` times continuously differentiable in a manifold if it is continuous
and, for any pair of points, it is `n` times continuously differentiable in the charts
around these points. -/
def times_cont_mdiff (n : with_top ℕ) (f : M → M') :=
∀ x, times_cont_mdiff_at I I' n f x
/-- Abbreviation for `times_cont_mdiff I I' ⊤ f`.
Short note to work with these abbreviations: a lemma of the form `times_cont_mdiff_foo.bar` will
apply fine to an assumption `smooth_foo` using dot notation or normal notation.
If the consequence `bar` of the lemma involves `times_cont_diff`, it is still better to restate
the lemma replacing `times_cont_diff` with `smooth` both in the assumption and in the conclusion,
to make it possible to use `smooth` consistently.
This also applies to `smooth_at`, `smooth_on` and `smooth_within_at`.-/
@[reducible] def smooth (f : M → M') := times_cont_mdiff I I' ⊤ f
/-! ### Basic properties of smooth functions between manifolds -/
variables {I I'}
lemma times_cont_mdiff.smooth (h : times_cont_mdiff I I' ⊤ f) : smooth I I' f := h
lemma smooth.times_cont_mdiff (h : smooth I I' f) : times_cont_mdiff I I' ⊤ f := h
lemma times_cont_mdiff_on.smooth_on (h : times_cont_mdiff_on I I' ⊤ f s) : smooth_on I I' f s := h
lemma smooth_on.times_cont_mdiff_on (h : smooth_on I I' f s) : times_cont_mdiff_on I I' ⊤ f s := h
lemma times_cont_mdiff_at.smooth_at (h : times_cont_mdiff_at I I' ⊤ f x) : smooth_at I I' f x := h
lemma smooth_at.times_cont_mdiff_at (h : smooth_at I I' f x) : times_cont_mdiff_at I I' ⊤ f x := h
lemma times_cont_mdiff_within_at.smooth_within_at (h : times_cont_mdiff_within_at I I' ⊤ f s x) :
smooth_within_at I I' f s x := h
lemma smooth_within_at.times_cont_mdiff_within_at (h : smooth_within_at I I' f s x) :
times_cont_mdiff_within_at I I' ⊤ f s x := h
lemma times_cont_mdiff.times_cont_mdiff_at (h : times_cont_mdiff I I' n f) :
times_cont_mdiff_at I I' n f x :=
h x
lemma smooth.smooth_at (h : smooth I I' f) :
smooth_at I I' f x := times_cont_mdiff.times_cont_mdiff_at h
lemma times_cont_mdiff_within_at_univ :
times_cont_mdiff_within_at I I' n f univ x ↔ times_cont_mdiff_at I I' n f x :=
iff.rfl
lemma smooth_at_univ :
smooth_within_at I I' f univ x ↔ smooth_at I I' f x := times_cont_mdiff_within_at_univ
lemma times_cont_mdiff_on_univ :
times_cont_mdiff_on I I' n f univ ↔ times_cont_mdiff I I' n f :=
by simp only [times_cont_mdiff_on, times_cont_mdiff, times_cont_mdiff_within_at_univ,
forall_prop_of_true, mem_univ]
lemma smooth_on_univ :
smooth_on I I' f univ ↔ smooth I I' f := times_cont_mdiff_on_univ
/-- One can reformulate smoothness within a set at a point as continuity within this set at this
point, and smoothness in the corresponding extended chart. -/
lemma times_cont_mdiff_within_at_iff :
times_cont_mdiff_within_at I I' n f s x ↔ continuous_within_at f s x ∧
times_cont_diff_within_at 𝕜 n ((ext_chart_at I' (f x)) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (s ∩ f ⁻¹' (ext_chart_at I' (f x)).source))
(ext_chart_at I x x) :=
begin
rw [times_cont_mdiff_within_at, lift_prop_within_at, times_cont_diff_within_at_prop],
congr' 3,
mfld_set_tac
end
lemma smooth_within_at_iff :
smooth_within_at I I' f s x ↔ continuous_within_at f s x ∧
times_cont_diff_within_at 𝕜 ∞ ((ext_chart_at I' (f x)) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (s ∩ f ⁻¹' (ext_chart_at I' (f x)).source))
(ext_chart_at I x x) :=
times_cont_mdiff_within_at_iff
include Is I's
/-- One can reformulate smoothness on a set as continuity on this set, and smoothness in any
extended chart. -/
lemma times_cont_mdiff_on_iff :
times_cont_mdiff_on I I' n f s ↔ continuous_on f s ∧
∀ (x : M) (y : M'), times_cont_diff_on 𝕜 n ((ext_chart_at I' y) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (s ∩ f ⁻¹' (ext_chart_at I' y).source)) :=
begin
split,
{ assume h,
refine ⟨λ x hx, (h x hx).1, λ x y z hz, _⟩,
simp only with mfld_simps at hz,
let w := (ext_chart_at I x).symm z,
have : w ∈ s, by simp only [w, hz] with mfld_simps,
specialize h w this,
have w1 : w ∈ (chart_at H x).source, by simp only [w, hz] with mfld_simps,
have w2 : f w ∈ (chart_at H' y).source, by simp only [w, hz] with mfld_simps,
convert (((times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_indep_chart
(structure_groupoid.chart_mem_maximal_atlas _ x) w1
(structure_groupoid.chart_mem_maximal_atlas _ y) w2).1 h).2 using 1,
{ mfld_set_tac },
{ simp only [w, hz] with mfld_simps } },
{ rintros ⟨hcont, hdiff⟩ x hx,
refine ⟨hcont x hx, _⟩,
have Z := hdiff x (f x) (ext_chart_at I x x) (by simp only [hx] with mfld_simps),
dsimp [times_cont_diff_within_at_prop],
convert Z using 1,
mfld_set_tac }
end
lemma smooth_on_iff :
smooth_on I I' f s ↔ continuous_on f s ∧
∀ (x : M) (y : M'), times_cont_diff_on 𝕜 ⊤ ((ext_chart_at I' y) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (s ∩ f ⁻¹' (ext_chart_at I' y).source)) :=
times_cont_mdiff_on_iff
/-- One can reformulate smoothness as continuity and smoothness in any extended chart. -/
lemma times_cont_mdiff_iff :
times_cont_mdiff I I' n f ↔ continuous f ∧
∀ (x : M) (y : M'), times_cont_diff_on 𝕜 n ((ext_chart_at I' y) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (f ⁻¹' (ext_chart_at I' y).source)) :=
by simp [← times_cont_mdiff_on_univ, times_cont_mdiff_on_iff, continuous_iff_continuous_on_univ]
lemma smooth_iff :
smooth I I' f ↔ continuous f ∧
∀ (x : M) (y : M'), times_cont_diff_on 𝕜 ⊤ ((ext_chart_at I' y) ∘ f ∘ (ext_chart_at I x).symm)
((ext_chart_at I x).target ∩ (ext_chart_at I x).symm ⁻¹' (f ⁻¹' (ext_chart_at I' y).source)) :=
times_cont_mdiff_iff
omit Is I's
/-! ### Deducing smoothness from higher smoothness -/
lemma times_cont_mdiff_within_at.of_le (hf : times_cont_mdiff_within_at I I' n f s x) (le : m ≤ n) :
times_cont_mdiff_within_at I I' m f s x :=
⟨hf.1, hf.2.of_le le⟩
lemma times_cont_mdiff_at.of_le (hf : times_cont_mdiff_at I I' n f x) (le : m ≤ n) :
times_cont_mdiff_at I I' m f x :=
times_cont_mdiff_within_at.of_le hf le
lemma times_cont_mdiff_on.of_le (hf : times_cont_mdiff_on I I' n f s) (le : m ≤ n) :
times_cont_mdiff_on I I' m f s :=
λ x hx, (hf x hx).of_le le
lemma times_cont_mdiff.of_le (hf : times_cont_mdiff I I' n f) (le : m ≤ n) :
times_cont_mdiff I I' m f :=
λ x, (hf x).of_le le
/-! ### Deducing smoothness from smoothness one step beyond -/
lemma times_cont_mdiff_within_at.of_succ {n : ℕ} (h : times_cont_mdiff_within_at I I' n.succ f s x) :
times_cont_mdiff_within_at I I' n f s x :=
h.of_le (with_top.coe_le_coe.2 (nat.le_succ n))
lemma times_cont_mdiff_at.of_succ {n : ℕ} (h : times_cont_mdiff_at I I' n.succ f x) :
times_cont_mdiff_at I I' n f x :=
times_cont_mdiff_within_at.of_succ h
lemma times_cont_mdiff_on.of_succ {n : ℕ} (h : times_cont_mdiff_on I I' n.succ f s) :
times_cont_mdiff_on I I' n f s :=
λ x hx, (h x hx).of_succ
lemma times_cont_mdiff.of_succ {n : ℕ} (h : times_cont_mdiff I I' n.succ f) :
times_cont_mdiff I I' n f :=
λ x, (h x).of_succ
/-! ### Deducing continuity from smoothness-/
lemma times_cont_mdiff_within_at.continuous_within_at
(hf : times_cont_mdiff_within_at I I' n f s x) : continuous_within_at f s x :=
hf.1
lemma times_cont_mdiff_at.continuous_at
(hf : times_cont_mdiff_at I I' n f x) : continuous_at f x :=
(continuous_within_at_univ _ _ ).1 $ times_cont_mdiff_within_at.continuous_within_at hf
lemma times_cont_mdiff_on.continuous_on
(hf : times_cont_mdiff_on I I' n f s) : continuous_on f s :=
λ x hx, (hf x hx).continuous_within_at
lemma times_cont_mdiff.continuous (hf : times_cont_mdiff I I' n f) :
continuous f :=
continuous_iff_continuous_at.2 $ λ x, (hf x).continuous_at
/-! ### Deducing differentiability from smoothness -/
lemma times_cont_mdiff_within_at.mdifferentiable_within_at
(hf : times_cont_mdiff_within_at I I' n f s x) (hn : 1 ≤ n) :
mdifferentiable_within_at I I' f s x :=
begin
suffices h : mdifferentiable_within_at I I' f (s ∩ (f ⁻¹' (ext_chart_at I' (f x)).source)) x,
{ rwa mdifferentiable_within_at_inter' at h,
apply (hf.1).preimage_mem_nhds_within,
exact mem_nhds_sets (ext_chart_at_open_source I' (f x)) (mem_ext_chart_source I' (f x)) },
rw mdifferentiable_within_at_iff,
exact ⟨hf.1.mono (inter_subset_left _ _),
(hf.2.differentiable_within_at hn).mono (by mfld_set_tac)⟩,
end
lemma times_cont_mdiff_at.mdifferentiable_at (hf : times_cont_mdiff_at I I' n f x) (hn : 1 ≤ n) :
mdifferentiable_at I I' f x :=
mdifferentiable_within_at_univ.1 $ times_cont_mdiff_within_at.mdifferentiable_within_at hf hn
lemma times_cont_mdiff_on.mdifferentiable_on (hf : times_cont_mdiff_on I I' n f s) (hn : 1 ≤ n) :
mdifferentiable_on I I' f s :=
λ x hx, (hf x hx).mdifferentiable_within_at hn
lemma times_cont_mdiff.mdifferentiable (hf : times_cont_mdiff I I' n f) (hn : 1 ≤ n) :
mdifferentiable I I' f :=
λ x, (hf x).mdifferentiable_at hn
lemma smooth.mdifferentiable (hf : smooth I I' f) : mdifferentiable I I' f :=
times_cont_mdiff.mdifferentiable hf le_top
lemma smooth.mdifferentiable_at (hf : smooth I I' f) : mdifferentiable_at I I' f x :=
hf.mdifferentiable x
lemma smooth.mdifferentiable_within_at (hf : smooth I I' f) : mdifferentiable_within_at I I' f s x :=
hf.mdifferentiable_at.mdifferentiable_within_at
/-! ### `C^∞` smoothness -/
lemma times_cont_mdiff_within_at_top :
smooth_within_at I I' f s x ↔ (∀n:ℕ, times_cont_mdiff_within_at I I' n f s x) :=
⟨λ h n, ⟨h.1, times_cont_diff_within_at_top.1 h.2 n⟩,
λ H, ⟨(H 0).1, times_cont_diff_within_at_top.2 (λ n, (H n).2)⟩⟩
lemma times_cont_mdiff_at_top :
smooth_at I I' f x ↔ (∀n:ℕ, times_cont_mdiff_at I I' n f x) :=
times_cont_mdiff_within_at_top
lemma times_cont_mdiff_on_top :
smooth_on I I' f s ↔ (∀n:ℕ, times_cont_mdiff_on I I' n f s) :=
⟨λ h n, h.of_le le_top, λ h x hx, times_cont_mdiff_within_at_top.2 (λ n, h n x hx)⟩
lemma times_cont_mdiff_top :
smooth I I' f ↔ (∀n:ℕ, times_cont_mdiff I I' n f) :=
⟨λ h n, h.of_le le_top, λ h x, times_cont_mdiff_within_at_top.2 (λ n, h n x)⟩
lemma times_cont_mdiff_within_at_iff_nat :
times_cont_mdiff_within_at I I' n f s x ↔
(∀m:ℕ, (m : with_top ℕ) ≤ n → times_cont_mdiff_within_at I I' m f s x) :=
begin
refine ⟨λ h m hm, h.of_le hm, λ h, _⟩,
cases n,
{ exact times_cont_mdiff_within_at_top.2 (λ n, h n le_top) },
{ exact h n (le_refl _) }
end
/-! ### Restriction to a smaller set -/
lemma times_cont_mdiff_within_at.mono (hf : times_cont_mdiff_within_at I I' n f s x) (hts : t ⊆ s) :
times_cont_mdiff_within_at I I' n f t x :=
structure_groupoid.local_invariant_prop.lift_prop_within_at_mono
(times_cont_diff_within_at_local_invariant_prop_mono I I' n) hf hts
lemma times_cont_mdiff_at.times_cont_mdiff_within_at (hf : times_cont_mdiff_at I I' n f x) :
times_cont_mdiff_within_at I I' n f s x :=
times_cont_mdiff_within_at.mono hf (subset_univ _)
lemma smooth_at.smooth_within_at (hf : smooth_at I I' f x) :
smooth_within_at I I' f s x :=
times_cont_mdiff_at.times_cont_mdiff_within_at hf
lemma times_cont_mdiff_on.mono (hf : times_cont_mdiff_on I I' n f s) (hts : t ⊆ s) :
times_cont_mdiff_on I I' n f t :=
λ x hx, (hf x (hts hx)).mono hts
lemma times_cont_mdiff.times_cont_mdiff_on (hf : times_cont_mdiff I I' n f) :
times_cont_mdiff_on I I' n f s :=
λ x hx, (hf x).times_cont_mdiff_within_at
lemma smooth.smooth_on (hf : smooth I I' f) :
smooth_on I I' f s :=
times_cont_mdiff.times_cont_mdiff_on hf
lemma times_cont_mdiff_within_at_inter' (ht : t ∈ 𝓝[s] x) :
times_cont_mdiff_within_at I I' n f (s ∩ t) x ↔ times_cont_mdiff_within_at I I' n f s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_inter' ht
lemma times_cont_mdiff_within_at_inter (ht : t ∈ 𝓝 x) :
times_cont_mdiff_within_at I I' n f (s ∩ t) x ↔ times_cont_mdiff_within_at I I' n f s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_inter ht
lemma times_cont_mdiff_within_at.times_cont_mdiff_at
(h : times_cont_mdiff_within_at I I' n f s x) (ht : s ∈ 𝓝 x) :
times_cont_mdiff_at I I' n f x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_at_of_lift_prop_within_at h ht
lemma smooth_within_at.smooth_at
(h : smooth_within_at I I' f s x) (ht : s ∈ 𝓝 x) :
smooth_at I I' f x :=
times_cont_mdiff_within_at.times_cont_mdiff_at h ht
include Is I's
/-- A function is `C^n` within a set at a point, for `n : ℕ`, if and only if it is `C^n` on
a neighborhood of this point. -/
lemma times_cont_mdiff_within_at_iff_times_cont_mdiff_on_nhds {n : ℕ} :
times_cont_mdiff_within_at I I' n f s x ↔
∃ u ∈ 𝓝[insert x s] x, times_cont_mdiff_on I I' n f u :=
begin
split,
{ assume h,
-- the property is true in charts. We will pull such a good neighborhood in the chart to the
-- manifold. For this, we need to restrict to a small enough set where everything makes sense
obtain ⟨o, o_open, xo, ho, h'o⟩ : ∃ (o : set M),
is_open o ∧ x ∈ o ∧ o ⊆ (chart_at H x).source ∧ o ∩ s ⊆ f ⁻¹' (chart_at H' (f x)).source,
{ have : (chart_at H' (f x)).source ∈ 𝓝 (f x) :=
mem_nhds_sets (local_homeomorph.open_source _) (mem_chart_source H' (f x)),
rcases mem_nhds_within.1 (h.1.preimage_mem_nhds_within this) with ⟨u, u_open, xu, hu⟩,
refine ⟨u ∩ (chart_at H x).source, _, ⟨xu, mem_chart_source _ _⟩, _, _⟩,
{ exact is_open_inter u_open (local_homeomorph.open_source _) },
{ assume y hy, exact hy.2 },
{ assume y hy, exact hu ⟨hy.1.1, hy.2⟩ } },
have h' : times_cont_mdiff_within_at I I' n f (s ∩ o) x := h.mono (inter_subset_left _ _),
simp only [times_cont_mdiff_within_at, lift_prop_within_at, times_cont_diff_within_at_prop] at h',
-- let `u` be a good neighborhood in the chart where the function is smooth
rcases h.2.times_cont_diff_on (le_refl _) with ⟨u, u_nhds, u_subset, hu⟩,
-- pull it back to the manifold, and intersect with a suitable neighborhood of `x`, to get the
-- desired good neighborhood `v`.
let v := ((insert x s) ∩ o) ∩ (ext_chart_at I x) ⁻¹' u,
have v_incl : v ⊆ (chart_at H x).source := λ y hy, ho hy.1.2,
have v_incl' : ∀ y ∈ v, f y ∈ (chart_at H' (f x)).source,
{ assume y hy,
rcases hy.1.1 with rfl|h',
{ simp only with mfld_simps },
{ apply h'o ⟨hy.1.2, h'⟩ } },
refine ⟨v, _, _⟩,
show v ∈ 𝓝[insert x s] x,
{ rw nhds_within_restrict _ xo o_open,
refine filter.inter_mem_sets self_mem_nhds_within _,
suffices : u ∈ 𝓝[(ext_chart_at I x) '' (insert x s ∩ o)] (ext_chart_at I x x),
from (ext_chart_at_continuous_at I x).continuous_within_at.preimage_mem_nhds_within' this,
apply nhds_within_mono _ _ u_nhds,
rw image_subset_iff,
assume y hy,
rcases hy.1 with rfl|h',
{ simp only [mem_insert_iff] with mfld_simps },
{ simp only [mem_insert_iff, ho hy.2, h', h'o ⟨hy.2, h'⟩] with mfld_simps } },
show times_cont_mdiff_on I I' n f v,
{ assume y hy,
apply (((times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_indep_chart
(structure_groupoid.chart_mem_maximal_atlas _ x) (v_incl hy)
(structure_groupoid.chart_mem_maximal_atlas _ (f x)) (v_incl' y hy))).2,
split,
{ apply (((ext_chart_at_continuous_on_symm I' (f x) _ _).comp'
(hu _ hy.2).continuous_within_at).comp' (ext_chart_at_continuous_on I x _ _)).congr_mono,
{ assume z hz,
simp only [v_incl hz, v_incl' z hz] with mfld_simps },
{ assume z hz,
simp only [v_incl hz, v_incl' z hz] with mfld_simps,
exact hz.2 },
{ simp only [v_incl hy, v_incl' y hy] with mfld_simps },
{ simp only [v_incl hy, v_incl' y hy] with mfld_simps },
{ simp only [v_incl hy] with mfld_simps } },
{ apply hu.mono,
{ assume z hz,
simp only [v] with mfld_simps at hz,
have : I ((chart_at H x) (((chart_at H x).symm) (I.symm z))) ∈ u, by simp only [hz],
simpa only [hz] with mfld_simps using this },
{ have exty : I (chart_at H x y) ∈ u := hy.2,
simp only [v_incl hy, v_incl' y hy, exty, hy.1.1, hy.1.2] with mfld_simps } } } },
{ rintros ⟨u, u_nhds, hu⟩,
have : times_cont_mdiff_within_at I I' ↑n f (insert x s ∩ u) x,
{ have : x ∈ insert x s := mem_insert x s,
exact hu.mono (inter_subset_right _ _) _ ⟨this, mem_of_mem_nhds_within this u_nhds⟩ },
rw times_cont_mdiff_within_at_inter' u_nhds at this,
exact this.mono (subset_insert x s) }
end
/-- A function is `C^n` at a point, for `n : ℕ`, if and only if it is `C^n` on
a neighborhood of this point. -/
lemma times_cont_mdiff_at_iff_times_cont_mdiff_on_nhds {n : ℕ} :
times_cont_mdiff_at I I' n f x ↔ ∃ u ∈ 𝓝 x, times_cont_mdiff_on I I' n f u :=
by simp [← times_cont_mdiff_within_at_univ, times_cont_mdiff_within_at_iff_times_cont_mdiff_on_nhds,
nhds_within_univ]
omit Is I's
/-! ### Congruence lemmas -/
lemma times_cont_mdiff_within_at.congr
(h : times_cont_mdiff_within_at I I' n f s x) (h₁ : ∀ y ∈ s, f₁ y = f y)
(hx : f₁ x = f x) : times_cont_mdiff_within_at I I' n f₁ s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_congr h h₁ hx
lemma times_cont_mdiff_within_at_congr (h₁ : ∀ y ∈ s, f₁ y = f y) (hx : f₁ x = f x) :
times_cont_mdiff_within_at I I' n f₁ s x ↔ times_cont_mdiff_within_at I I' n f s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_congr_iff h₁ hx
lemma times_cont_mdiff_within_at.congr_of_eventually_eq
(h : times_cont_mdiff_within_at I I' n f s x) (h₁ : f₁ =ᶠ[𝓝[s] x] f)
(hx : f₁ x = f x) : times_cont_mdiff_within_at I I' n f₁ s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_within_at_congr_of_eventually_eq
h h₁ hx
lemma filter.eventually_eq.times_cont_mdiff_within_at_iff
(h₁ : f₁ =ᶠ[𝓝[s] x] f) (hx : f₁ x = f x) :
times_cont_mdiff_within_at I I' n f₁ s x ↔ times_cont_mdiff_within_at I I' n f s x :=
(times_cont_diff_within_at_local_invariant_prop I I' n)
.lift_prop_within_at_congr_iff_of_eventually_eq h₁ hx
lemma times_cont_mdiff_at.congr_of_eventually_eq
(h : times_cont_mdiff_at I I' n f x) (h₁ : f₁ =ᶠ[𝓝 x] f) :
times_cont_mdiff_at I I' n f₁ x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_at_congr_of_eventually_eq h h₁
lemma filter.eventually_eq.times_cont_mdiff_at_iff (h₁ : f₁ =ᶠ[𝓝 x] f) :
times_cont_mdiff_at I I' n f₁ x ↔ times_cont_mdiff_at I I' n f x :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_at_congr_iff_of_eventually_eq h₁
lemma times_cont_mdiff_on.congr (h : times_cont_mdiff_on I I' n f s) (h₁ : ∀ y ∈ s, f₁ y = f y) :
times_cont_mdiff_on I I' n f₁ s :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_on_congr h h₁
lemma times_cont_mdiff_on_congr (h₁ : ∀ y ∈ s, f₁ y = f y) :
times_cont_mdiff_on I I' n f₁ s ↔ times_cont_mdiff_on I I' n f s :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_on_congr_iff h₁
/-! ### Locality -/
/-- Being `C^n` is a local property. -/
lemma times_cont_mdiff_on_of_locally_times_cont_mdiff_on
(h : ∀x∈s, ∃u, is_open u ∧ x ∈ u ∧ times_cont_mdiff_on I I' n f (s ∩ u)) :
times_cont_mdiff_on I I' n f s :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_on_of_locally_lift_prop_on h
lemma times_cont_mdiff_of_locally_times_cont_mdiff_on
(h : ∀x, ∃u, is_open u ∧ x ∈ u ∧ times_cont_mdiff_on I I' n f u) :
times_cont_mdiff I I' n f :=
(times_cont_diff_within_at_local_invariant_prop I I' n).lift_prop_of_locally_lift_prop_on h
/-! ### Smoothness of the composition of smooth functions between manifolds -/
section composition
variables {E'' : Type*} [normed_group E''] [normed_space 𝕜 E'']
{H'' : Type*} [topological_space H''] {I'' : model_with_corners 𝕜 E'' H''}
{M'' : Type*} [topological_space M''] [charted_space H'' M'']
[smooth_manifold_with_corners I'' M'']
include Is I's
/-- The composition of `C^n` functions on domains is `C^n`. -/
lemma times_cont_mdiff_on.comp {t : set M'} {g : M' → M''}
(hg : times_cont_mdiff_on I' I'' n g t) (hf : times_cont_mdiff_on I I' n f s)
(st : s ⊆ f ⁻¹' t) : times_cont_mdiff_on I I'' n (g ∘ f) s :=
begin
rw times_cont_mdiff_on_iff at hf hg ⊢,
have cont_gf : continuous_on (g ∘ f) s := continuous_on.comp hg.1 hf.1 st,
refine ⟨cont_gf, λx y, _⟩,
apply times_cont_diff_on_of_locally_times_cont_diff_on,
assume z hz,
let x' := (ext_chart_at I x).symm z,
have x'_source : x' ∈ (ext_chart_at I x).source := (ext_chart_at I x).map_target hz.1,
obtain ⟨o, o_open, zo, o_subset⟩ : ∃ o, is_open o ∧ z ∈ o ∧
o ∩ (((ext_chart_at I x).symm ⁻¹' s ∩ range I)) ⊆
(ext_chart_at I x).symm ⁻¹' (f ⁻¹' (ext_chart_at I' (f x')).source),
{ have x'z : (ext_chart_at I x) x' = z, by simp only [x', hz.1, -ext_chart_at] with mfld_simps,
have : continuous_within_at f s x' := hf.1 _ hz.2.1,
have : f ⁻¹' (ext_chart_at I' (f x')).source ∈ 𝓝[s] x' :=
this.preimage_mem_nhds_within
(mem_nhds_sets (ext_chart_at_open_source I' (f x')) (mem_ext_chart_source I' (f x'))),
have : (ext_chart_at I x).symm ⁻¹' (f ⁻¹' (ext_chart_at I' (f x')).source) ∈
𝓝[(ext_chart_at I x).symm ⁻¹' s ∩ range I] ((ext_chart_at I x) x') :=
ext_chart_preimage_mem_nhds_within' _ _ x'_source this,
rw x'z at this,
exact mem_nhds_within.1 this },
refine ⟨o, o_open, zo, _⟩,
let u := ((ext_chart_at I x).target ∩
(ext_chart_at I x).symm ⁻¹' (s ∩ g ∘ f ⁻¹' (ext_chart_at I'' y).source) ∩ o),
-- it remains to show that `g ∘ f` read in the charts is `C^n` on `u`
have u_subset : u ⊆ (ext_chart_at I x).target ∩
(ext_chart_at I x).symm ⁻¹' (s ∩ f ⁻¹' (ext_chart_at I' (f x')).source),
{ rintros p ⟨⟨hp₁, ⟨hp₂, hp₃⟩⟩, hp₄⟩,
refine ⟨hp₁, ⟨hp₂, o_subset ⟨hp₄, ⟨hp₂, _⟩⟩⟩⟩,
have := hp₁.1,
rwa model_with_corners.target at this },
have : times_cont_diff_on 𝕜 n (((ext_chart_at I'' y) ∘ g ∘ (ext_chart_at I' (f x')).symm) ∘
((ext_chart_at I' (f x')) ∘ f ∘ (ext_chart_at I x).symm)) u,
{ refine times_cont_diff_on.comp (hg.2 (f x') y) ((hf.2 x (f x')).mono u_subset) (λp hp, _),
simp only [local_equiv.map_source _ (u_subset hp).2.2, local_equiv.left_inv _ (u_subset hp).2.2,
-ext_chart_at] with mfld_simps,
exact ⟨st (u_subset hp).2.1, hp.1.2.2⟩ },
refine this.congr (λp hp, _),
simp only [local_equiv.left_inv _ (u_subset hp).2.2, -ext_chart_at] with mfld_simps
end
/-- The composition of `C^n` functions on domains is `C^n`. -/
lemma times_cont_mdiff_on.comp' {t : set M'} {g : M' → M''}
(hg : times_cont_mdiff_on I' I'' n g t) (hf : times_cont_mdiff_on I I' n f s) :
times_cont_mdiff_on I I'' n (g ∘ f) (s ∩ f ⁻¹' t) :=
hg.comp (hf.mono (inter_subset_left _ _)) (inter_subset_right _ _)
/-- The composition of `C^n` functions is `C^n`. -/
lemma times_cont_mdiff.comp {g : M' → M''}
(hg : times_cont_mdiff I' I'' n g) (hf : times_cont_mdiff I I' n f) :
times_cont_mdiff I I'' n (g ∘ f) :=
begin
rw ← times_cont_mdiff_on_univ at hf hg ⊢,
exact hg.comp hf subset_preimage_univ,
end
/-- The composition of `C^n` functions within domains at points is `C^n`. -/
lemma times_cont_mdiff_within_at.comp {t : set M'} {g : M' → M''} (x : M)
(hg : times_cont_mdiff_within_at I' I'' n g t (f x))
(hf : times_cont_mdiff_within_at I I' n f s x)
(st : s ⊆ f ⁻¹' t) : times_cont_mdiff_within_at I I'' n (g ∘ f) s x :=
begin
apply times_cont_mdiff_within_at_iff_nat.2 (λ m hm, _),
rcases times_cont_mdiff_within_at_iff_times_cont_mdiff_on_nhds.1 (hg.of_le hm)
with ⟨v, v_nhds, hv⟩,
rcases times_cont_mdiff_within_at_iff_times_cont_mdiff_on_nhds.1 (hf.of_le hm)
with ⟨u, u_nhds, hu⟩,
apply times_cont_mdiff_within_at_iff_times_cont_mdiff_on_nhds.2 ⟨_, _, hv.comp' hu⟩,
apply filter.inter_mem_sets u_nhds,
suffices h : v ∈ 𝓝[f '' s] (f x),
{ refine mem_nhds_within_insert.2 ⟨_, hf.continuous_within_at.preimage_mem_nhds_within' h⟩,
apply mem_of_mem_nhds_within (mem_insert (f x) t) v_nhds },
apply nhds_within_mono _ _ v_nhds,
rw image_subset_iff,
exact subset.trans st (preimage_mono (subset_insert _ _))
end
/-- The composition of `C^n` functions within domains at points is `C^n`. -/
lemma times_cont_mdiff_within_at.comp' {t : set M'} {g : M' → M''} (x : M)
(hg : times_cont_mdiff_within_at I' I'' n g t (f x))
(hf : times_cont_mdiff_within_at I I' n f s x) :
times_cont_mdiff_within_at I I'' n (g ∘ f) (s ∩ f⁻¹' t) x :=
hg.comp x (hf.mono (inter_subset_left _ _)) (inter_subset_right _ _)
/-- The composition of `C^n` functions at points is `C^n`. -/
lemma times_cont_mdiff_at.comp {g : M' → M''} (x : M)
(hg : times_cont_mdiff_at I' I'' n g (f x)) (hf : times_cont_mdiff_at I I' n f x) :
times_cont_mdiff_at I I'' n (g ∘ f) x :=
hg.comp x hf subset_preimage_univ
lemma times_cont_mdiff.comp_times_cont_mdiff_on {f : M → M'} {g : M' → M''} {s : set M}
(hg : times_cont_mdiff I' I'' n g) (hf : times_cont_mdiff_on I I' n f s) :
times_cont_mdiff_on I I'' n (g ∘ f) s :=
hg.times_cont_mdiff_on.comp hf set.subset_preimage_univ
lemma smooth.comp_smooth_on {f : M → M'} {g : M' → M''} {s : set M}
(hg : smooth I' I'' g) (hf : smooth_on I I' f s) :
smooth_on I I'' (g ∘ f) s :=
hg.smooth_on.comp hf set.subset_preimage_univ
end composition
/-! ### Atlas members are smooth -/
section atlas
variables {e : local_homeomorph M H}
include Is
/-- An atlas member is `C^n` for any `n`. -/
lemma times_cont_mdiff_on_of_mem_maximal_atlas
(h : e ∈ maximal_atlas I M) : times_cont_mdiff_on I I n e e.source :=
times_cont_mdiff_on.of_le
((times_cont_diff_within_at_local_invariant_prop I I ∞).lift_prop_on_of_mem_maximal_atlas
(times_cont_diff_within_at_local_invariant_prop_id I) h) le_top
/-- The inverse of an atlas member is `C^n` for any `n`. -/
lemma times_cont_mdiff_on_symm_of_mem_maximal_atlas
(h : e ∈ maximal_atlas I M) : times_cont_mdiff_on I I n e.symm e.target :=
times_cont_mdiff_on.of_le
((times_cont_diff_within_at_local_invariant_prop I I ∞).lift_prop_on_symm_of_mem_maximal_atlas
(times_cont_diff_within_at_local_invariant_prop_id I) h) le_top
lemma times_cont_mdiff_on_chart :
times_cont_mdiff_on I I n (chart_at H x) (chart_at H x).source :=
times_cont_mdiff_on_of_mem_maximal_atlas
((times_cont_diff_groupoid ⊤ I).chart_mem_maximal_atlas x)
lemma times_cont_mdiff_on_chart_symm :
times_cont_mdiff_on I I n (chart_at H x).symm (chart_at H x).target :=
times_cont_mdiff_on_symm_of_mem_maximal_atlas
((times_cont_diff_groupoid ⊤ I).chart_mem_maximal_atlas x)
end atlas
/-! ### The identity is smooth -/
section id
lemma times_cont_mdiff_id : times_cont_mdiff I I n (id : M → M) :=
times_cont_mdiff.of_le ((times_cont_diff_within_at_local_invariant_prop I I ∞).lift_prop_id
(times_cont_diff_within_at_local_invariant_prop_id I)) le_top
lemma smooth_id : smooth I I (id : M → M) := times_cont_mdiff_id
lemma times_cont_mdiff_on_id : times_cont_mdiff_on I I n (id : M → M) s :=
times_cont_mdiff_id.times_cont_mdiff_on
lemma smooth_on_id : smooth_on I I (id : M → M) s := times_cont_mdiff_on_id
lemma times_cont_mdiff_at_id : times_cont_mdiff_at I I n (id : M → M) x :=
times_cont_mdiff_id.times_cont_mdiff_at
lemma smooth_at_id : smooth_at I I (id : M → M) x := times_cont_mdiff_at_id
lemma times_cont_mdiff_within_at_id : times_cont_mdiff_within_at I I n (id : M → M) s x :=
times_cont_mdiff_at_id.times_cont_mdiff_within_at
lemma smooth_within_at_id : smooth_within_at I I (id : M → M) s x := times_cont_mdiff_within_at_id
end id
/-! ### Constants are smooth -/
section id
variable {c : M'}
lemma times_cont_mdiff_const : times_cont_mdiff I I' n (λ (x : M), c) :=
begin
assume x,
refine ⟨continuous_within_at_const, _⟩,
simp only [times_cont_diff_within_at_prop, (∘)],
exact times_cont_diff_within_at_const,
end
lemma smooth_const : smooth I I' (λ (x : M), c) := times_cont_mdiff_const
lemma times_cont_mdiff_on_const : times_cont_mdiff_on I I' n (λ (x : M), c) s :=
times_cont_mdiff_const.times_cont_mdiff_on
lemma smooth_on_const : smooth_on I I' (λ (x : M), c) s :=
times_cont_mdiff_on_const
lemma times_cont_mdiff_at_const : times_cont_mdiff_at I I' n (λ (x : M), c) x :=
times_cont_mdiff_const.times_cont_mdiff_at
lemma smooth_at_const : smooth_at I I' (λ (x : M), c) x :=
times_cont_mdiff_at_const
lemma times_cont_mdiff_within_at_const : times_cont_mdiff_within_at I I' n (λ (x : M), c) s x :=
times_cont_mdiff_at_const.times_cont_mdiff_within_at
lemma smooth_within_at_const : smooth_within_at I I' (λ (x : M), c) s x :=
times_cont_mdiff_within_at_const
end id
/-! ### Equivalence with the basic definition for functions between vector spaces -/
section vector_space
lemma times_cont_mdiff_within_at_iff_times_cont_diff_within_at {f : E → E'} {s : set E} {x : E} :
times_cont_mdiff_within_at (model_with_corners_self 𝕜 E) (model_with_corners_self 𝕜 E') n f s x
↔ times_cont_diff_within_at 𝕜 n f s x :=
begin
simp only [times_cont_mdiff_within_at, lift_prop_within_at, times_cont_diff_within_at_prop,
iff_def] with mfld_simps {contextual := tt},
exact times_cont_diff_within_at.continuous_within_at
end
lemma times_cont_mdiff_at_iff_times_cont_diff_at {f : E → E'} {x : E} :
times_cont_mdiff_at (model_with_corners_self 𝕜 E) (model_with_corners_self 𝕜 E') n f x
↔ times_cont_diff_at 𝕜 n f x :=
by rw [← times_cont_mdiff_within_at_univ,
times_cont_mdiff_within_at_iff_times_cont_diff_within_at, times_cont_diff_within_at_univ]
lemma times_cont_mdiff_on_iff_times_cont_diff_on {f : E → E'} {s : set E} :
times_cont_mdiff_on (model_with_corners_self 𝕜 E) (model_with_corners_self 𝕜 E') n f s
↔ times_cont_diff_on 𝕜 n f s :=
forall_congr $ by simp [times_cont_mdiff_within_at_iff_times_cont_diff_within_at]
lemma times_cont_mdiff_iff_times_cont_diff {f : E → E'} :
times_cont_mdiff (model_with_corners_self 𝕜 E) (model_with_corners_self 𝕜 E') n f
↔ times_cont_diff 𝕜 n f :=
by rw [← times_cont_diff_on_univ, ← times_cont_mdiff_on_univ,
times_cont_mdiff_on_iff_times_cont_diff_on]
end vector_space
/-! ### The tangent map of a smooth function is smooth -/
section tangent_map
/-- If a function is `C^n` with `1 ≤ n` on a domain with unique derivatives, then its bundled
derivative is continuous. In this auxiliary lemma, we prove this fact when the source and target
space are model spaces in models with corners. The general fact is proved in
`times_cont_mdiff_on.continuous_on_tangent_map_within`-/
lemma times_cont_mdiff_on.continuous_on_tangent_map_within_aux
{f : H → H'} {s : set H}
(hf : times_cont_mdiff_on I I' n f s) (hn : 1 ≤ n) (hs : unique_mdiff_on I s) :
continuous_on (tangent_map_within I I' f s) ((tangent_bundle.proj I H) ⁻¹' s) :=
begin
suffices h : continuous_on (λ (p : H × E), (f p.fst,
(fderiv_within 𝕜 (written_in_ext_chart_at I I' p.fst f) (I.symm ⁻¹' s ∩ range I)
((ext_chart_at I p.fst) p.fst) : E →L[𝕜] E') p.snd)) (prod.fst ⁻¹' s),
{ have A := (tangent_bundle_model_space_homeomorph H I).continuous,
rw continuous_iff_continuous_on_univ at A,
have B := ((tangent_bundle_model_space_homeomorph H' I').symm.continuous.comp_continuous_on h)
.comp' A,
have : (univ ∩ ⇑(tangent_bundle_model_space_homeomorph H I) ⁻¹' (prod.fst ⁻¹' s)) =
tangent_bundle.proj I H ⁻¹' s,
by { ext ⟨x, v⟩, simp only with mfld_simps },
rw this at B,
apply B.congr,
rintros ⟨x, v⟩ hx,
dsimp [tangent_map_within],
ext, { refl },
simp only with mfld_simps,
apply congr_fun,
apply congr_arg,
rw mdifferentiable_within_at.mfderiv_within (hf.mdifferentiable_on hn x hx),
refl },
suffices h : continuous_on (λ (p : H × E), (fderiv_within 𝕜 (I' ∘ f ∘ I.symm)
(I.symm ⁻¹' s ∩ range I) (I p.fst) : E →L[𝕜] E') p.snd) (prod.fst ⁻¹' s),
{ dsimp [written_in_ext_chart_at, ext_chart_at],
apply continuous_on.prod
(continuous_on.comp hf.continuous_on continuous_fst.continuous_on (subset.refl _)),
apply h.congr,
assume p hp,
refl },
suffices h : continuous_on (fderiv_within 𝕜 (I' ∘ f ∘ I.symm)
(I.symm ⁻¹' s ∩ range I)) (I '' s),
{ have C := continuous_on.comp h I.continuous_to_fun.continuous_on (subset.refl _),
have A : continuous (λq : (E →L[𝕜] E') × E, q.1 q.2) := is_bounded_bilinear_map_apply.continuous,
have B : continuous_on (λp : H × E,
(fderiv_within 𝕜 (I' ∘ f ∘ I.symm) (I.symm ⁻¹' s ∩ range I)
(I p.1), p.2)) (prod.fst ⁻¹' s),
{ apply continuous_on.prod _ continuous_snd.continuous_on,
refine (continuous_on.comp C continuous_fst.continuous_on _ : _),
exact preimage_mono (subset_preimage_image _ _) },
exact A.comp_continuous_on B },
rw times_cont_mdiff_on_iff at hf,
let x : H := I.symm (0 : E),
let y : H' := I'.symm (0 : E'),
have A := hf.2 x y,
simp only [I.image, inter_comm] with mfld_simps at A ⊢,
apply A.continuous_on_fderiv_within _ hn,
convert hs.unique_diff_on x using 1,
simp only [inter_comm] with mfld_simps
end
/-- If a function is `C^n` on a domain with unique derivatives, then its bundled derivative is
`C^m` when `m+1 ≤ n`. In this auxiliary lemma, we prove this fact when the source and target space
are model spaces in models with corners. The general fact is proved in
`times_cont_mdiff_on.times_cont_mdiff_on_tangent_map_within` -/
lemma times_cont_mdiff_on.times_cont_mdiff_on_tangent_map_within_aux
{f : H → H'} {s : set H}
(hf : times_cont_mdiff_on I I' n f s) (hmn : m + 1 ≤ n) (hs : unique_mdiff_on I s) :
times_cont_mdiff_on I.tangent I'.tangent m (tangent_map_within I I' f s)
((tangent_bundle.proj I H) ⁻¹' s) :=
begin
have m_le_n : m ≤ n,
{ apply le_trans _ hmn,
have : m + 0 ≤ m + 1 := add_le_add_left (zero_le _) _,
simpa only [add_zero] using this },
have one_le_n : 1 ≤ n,
{ apply le_trans _ hmn,
change 0 + 1 ≤ m + 1,
exact add_le_add_right (zero_le _) _ },
have U': unique_diff_on 𝕜 (range I ∩ I.symm ⁻¹' s),
{ assume y hy,
simpa only [unique_mdiff_on, unique_mdiff_within_at, hy.1, inter_comm] with mfld_simps
using hs (I.symm y) hy.2 },
have U : unique_diff_on 𝕜 (set.prod (range I ∩ I.symm ⁻¹' s) (univ : set E)) :=
U'.prod unique_diff_on_univ,
rw times_cont_mdiff_on_iff,
refine ⟨hf.continuous_on_tangent_map_within_aux one_le_n hs, λp q, _⟩,
have A : (range I).prod univ ∩
((equiv.sigma_equiv_prod H E).symm ∘ λ (p : E × E), ((I.symm) p.fst, p.snd)) ⁻¹'
(tangent_bundle.proj I H ⁻¹' s)
= set.prod (range I ∩ I.symm ⁻¹' s) univ,
by { ext ⟨x, v⟩, simp only with mfld_simps },
suffices h : times_cont_diff_on 𝕜 m (((λ (p : H' × E'), (I' p.fst, p.snd)) ∘
(equiv.sigma_equiv_prod H' E')) ∘ tangent_map_within I I' f s ∘
((equiv.sigma_equiv_prod H E).symm) ∘ λ (p : E × E), (I.symm p.fst, p.snd))
((range ⇑I ∩ ⇑(I.symm) ⁻¹' s).prod univ),
by simpa [A] using h,
change times_cont_diff_on 𝕜 m (λ (p : E × E),
((I' (f (I.symm p.fst)), ((mfderiv_within I I' f s (I.symm p.fst)) : E → E') p.snd) : E' × E'))
(set.prod (range I ∩ I.symm ⁻¹' s) univ),
-- check that all bits in this formula are `C^n`
have hf' := times_cont_mdiff_on_iff.1 hf,
have A : times_cont_diff_on 𝕜 m (I' ∘ f ∘ I.symm) (range I ∩ I.symm ⁻¹' s) :=
by simpa only with mfld_simps using (hf'.2 (I.symm 0) (I'.symm 0)).of_le m_le_n,
have B : times_cont_diff_on 𝕜 m ((I' ∘ f ∘ I.symm) ∘ prod.fst)
(set.prod (range I ∩ I.symm ⁻¹' s) (univ : set E)) :=
A.comp (times_cont_diff_fst.times_cont_diff_on) (prod_subset_preimage_fst _ _),
suffices C : times_cont_diff_on 𝕜 m (λ (p : E × E),
((fderiv_within 𝕜 (I' ∘ f ∘ I.symm) (I.symm ⁻¹' s ∩ range I) p.1 : _) p.2))
(set.prod (range I ∩ I.symm ⁻¹' s) univ),
{ apply times_cont_diff_on.prod B _,
apply C.congr (λp hp, _),
simp only with mfld_simps at hp,
simp only [mfderiv_within, hf.mdifferentiable_on one_le_n _ hp.2, hp.1, dif_pos]
with mfld_simps },
have D : times_cont_diff_on 𝕜 m (λ x,
(fderiv_within 𝕜 (I' ∘ f ∘ I.symm) (I.symm ⁻¹' s ∩ range I) x))
(range I ∩ I.symm ⁻¹' s),
{ have : times_cont_diff_on 𝕜 n (I' ∘ f ∘ I.symm) (range I ∩ I.symm ⁻¹' s) :=
by simpa only with mfld_simps using (hf'.2 (I.symm 0) (I'.symm 0)),
simpa only [inter_comm] using this.fderiv_within U' hmn },
have := D.comp (times_cont_diff_fst.times_cont_diff_on) (prod_subset_preimage_fst _ _),
have := times_cont_diff_on.prod this (times_cont_diff_snd.times_cont_diff_on),
exact is_bounded_bilinear_map_apply.times_cont_diff.comp_times_cont_diff_on this,
end
include Is I's
/-- If a function is `C^n` on a domain with unique derivatives, then its bundled derivative
is `C^m` when `m+1 ≤ n`. -/
theorem times_cont_mdiff_on.times_cont_mdiff_on_tangent_map_within
(hf : times_cont_mdiff_on I I' n f s) (hmn : m + 1 ≤ n) (hs : unique_mdiff_on I s) :
times_cont_mdiff_on I.tangent I'.tangent m (tangent_map_within I I' f s)
((tangent_bundle.proj I M) ⁻¹' s) :=
begin
/- The strategy of the proof is to avoid unfolding the definitions, and reduce by functoriality
to the case of functions on the model spaces, where we have already proved the result.
Let `l` and `r` be the charts to the left and to the right, so that we have
```
l^{-1} f r
H --------> M ---> M' ---> H'
```
Then the tangent map `T(r ∘ f ∘ l)` is smooth by a previous result. Consider the composition
```
Tl T(r ∘ f ∘ l^{-1}) Tr^{-1}
TM -----> TH -------------------> TH' ---------> TM'
```
where `Tr^{-1}` and `Tl` are the tangent maps of `r^{-1}` and `l`. Writing `Tl` and `Tr^{-1}` as
composition of charts (called `Dl` and `il` for `l` and `Dr` and `ir` in the proof below), it
follows that they are smooth. The composition of all these maps is `Tf`, and is therefore smooth
as a composition of smooth maps.
-/
have m_le_n : m ≤ n,
{ apply le_trans _ hmn,
have : m + 0 ≤ m + 1 := add_le_add_left (zero_le _) _,
simpa only [add_zero] },
have one_le_n : 1 ≤ n,
{ apply le_trans _ hmn,
change 0 + 1 ≤ m + 1,
exact add_le_add_right (zero_le _) _ },
/- First step: local reduction on the space, to a set `s'` which is contained in chart domains. -/
refine times_cont_mdiff_on_of_locally_times_cont_mdiff_on (λp hp, _),
have hf' := times_cont_mdiff_on_iff.1 hf,
simp [tangent_bundle.proj] at hp,
let l := chart_at H p.1,
set Dl := chart_at (model_prod H E) p with hDl,
let r := chart_at H' (f p.1),
let Dr := chart_at (model_prod H' E') (tangent_map_within I I' f s p),
let il := chart_at (model_prod H E) (tangent_map I I l p),
let ir := chart_at (model_prod H' E') (tangent_map I I' (r ∘ f) p),
let s' := f ⁻¹' r.source ∩ s ∩ l.source,
let s'_lift := (tangent_bundle.proj I M)⁻¹' s',
let s'l := l.target ∩ l.symm ⁻¹' s',
let s'l_lift := (tangent_bundle.proj I H) ⁻¹' s'l,
rcases continuous_on_iff'.1 hf'.1 r.source r.open_source with ⟨o, o_open, ho⟩,
suffices h : times_cont_mdiff_on I.tangent I'.tangent m (tangent_map_within I I' f s) s'_lift,
{ refine ⟨(tangent_bundle.proj I M)⁻¹' (o ∩ l.source), _, _, _⟩,
show is_open ((tangent_bundle.proj I M)⁻¹' (o ∩ l.source)), from
tangent_bundle_proj_continuous _ _ _ (is_open_inter o_open l.open_source),
show p ∈ tangent_bundle.proj I M ⁻¹' (o ∩ l.source),
{ simp [tangent_bundle.proj] at ⊢,
have : p.1 ∈ f ⁻¹' r.source ∩ s, by simp [hp],
rw ho at this,
exact this.1 },
{ have : tangent_bundle.proj I M ⁻¹' s ∩ tangent_bundle.proj I M ⁻¹' (o ∩ l.source) = s'_lift,
{ dsimp only [s'_lift, s'], rw [ho], mfld_set_tac },
rw this,
exact h } },
/- Second step: check that all functions are smooth, and use the chain rule to write the bundled
derivative as a composition of a function between model spaces and of charts.
Convention: statements about the differentiability of `a ∘ b ∘ c` are named `diff_abc`. Statements
about differentiability in the bundle have a `_lift` suffix. -/
have U' : unique_mdiff_on I s',
{ apply unique_mdiff_on.inter _ l.open_source,
rw [ho, inter_comm],
exact hs.inter o_open },
have U'l : unique_mdiff_on I s'l :=
U'.unique_mdiff_on_preimage (mdifferentiable_chart _ _),
have diff_f : times_cont_mdiff_on I I' n f s' :=
hf.mono (by mfld_set_tac),
have diff_r : times_cont_mdiff_on I' I' n r r.source :=
times_cont_mdiff_on_chart,
have diff_rf : times_cont_mdiff_on I I' n (r ∘ f) s',
{ apply times_cont_mdiff_on.comp diff_r diff_f (λx hx, _),
simp only [s'] with mfld_simps at hx, simp only [hx] with mfld_simps },
have diff_l : times_cont_mdiff_on I I n l.symm s'l,
{ have A : times_cont_mdiff_on I I n l.symm l.target :=
times_cont_mdiff_on_chart_symm,
exact A.mono (by mfld_set_tac) },
have diff_rfl : times_cont_mdiff_on I I' n (r ∘ f ∘ l.symm) s'l,
{ apply times_cont_mdiff_on.comp diff_rf diff_l,
mfld_set_tac },
have diff_rfl_lift : times_cont_mdiff_on I.tangent I'.tangent m
(tangent_map_within I I' (r ∘ f ∘ l.symm) s'l) s'l_lift :=
diff_rfl.times_cont_mdiff_on_tangent_map_within_aux hmn U'l,
have diff_irrfl_lift : times_cont_mdiff_on I.tangent I'.tangent m
(ir ∘ (tangent_map_within I I' (r ∘ f ∘ l.symm) s'l)) s'l_lift,
{ have A : times_cont_mdiff_on I'.tangent I'.tangent m ir ir.source := times_cont_mdiff_on_chart,
exact times_cont_mdiff_on.comp A diff_rfl_lift (λp hp, by simp only [ir] with mfld_simps) },
have diff_Drirrfl_lift : times_cont_mdiff_on I.tangent I'.tangent m
(Dr.symm ∘ (ir ∘ (tangent_map_within I I' (r ∘ f ∘ l.symm) s'l))) s'l_lift,
{ have A : times_cont_mdiff_on I'.tangent I'.tangent m Dr.symm Dr.target :=
times_cont_mdiff_on_chart_symm,
apply times_cont_mdiff_on.comp A diff_irrfl_lift (λp hp, _),
simp only [s'l_lift, tangent_bundle.proj] with mfld_simps at hp,
simp only [ir, @local_equiv.refl_coe (model_prod H' E'), hp] with mfld_simps },
-- conclusion of this step: the composition of all the maps above is smooth
have diff_DrirrflilDl : times_cont_mdiff_on I.tangent I'.tangent m
(Dr.symm ∘ (ir ∘ (tangent_map_within I I' (r ∘ f ∘ l.symm) s'l)) ∘
(il.symm ∘ Dl)) s'_lift,
{ have A : times_cont_mdiff_on I.tangent I.tangent m Dl Dl.source := times_cont_mdiff_on_chart,
have A' : times_cont_mdiff_on I.tangent I.tangent m Dl s'_lift,
{ apply A.mono (λp hp, _),
simp only [s'_lift, tangent_bundle.proj] with mfld_simps at hp,
simp only [Dl, hp] with mfld_simps },
have B : times_cont_mdiff_on I.tangent I.tangent m il.symm il.target :=
times_cont_mdiff_on_chart_symm,
have C : times_cont_mdiff_on I.tangent I.tangent m (il.symm ∘ Dl) s'_lift :=
times_cont_mdiff_on.comp B A' (λp hp, by simp only [il] with mfld_simps),
apply times_cont_mdiff_on.comp diff_Drirrfl_lift C (λp hp, _),
simp only [s'_lift, tangent_bundle.proj] with mfld_simps at hp,
simp only [il, s'l_lift, hp, tangent_bundle.proj] with mfld_simps },
/- Third step: check that the composition of all the maps indeed coincides with the derivative we
are looking for -/
have eq_comp : ∀q ∈ s'_lift, tangent_map_within I I' f s q =
(Dr.symm ∘ ir ∘ (tangent_map_within I I' (r ∘ f ∘ l.symm) s'l) ∘
(il.symm ∘ Dl)) q,
{ assume q hq,
simp only [s'_lift, tangent_bundle.proj] with mfld_simps at hq,
have U'q : unique_mdiff_within_at I s' q.1,
by { apply U', simp only [hq, s'] with mfld_simps },
have U'lq : unique_mdiff_within_at I s'l (Dl q).1,
by { apply U'l, simp only [hq, s'l] with mfld_simps },
have A : tangent_map_within I I' ((r ∘ f) ∘ l.symm) s'l (il.symm (Dl q)) =
tangent_map_within I I' (r ∘ f) s' (tangent_map_within I I l.symm s'l (il.symm (Dl q))),
{ refine tangent_map_within_comp_at (il.symm (Dl q)) _ _ (λp hp, _) U'lq,
{ apply diff_rf.mdifferentiable_on one_le_n,
simp only [hq] with mfld_simps },
{ apply diff_l.mdifferentiable_on one_le_n,
simp only [s'l, hq] with mfld_simps },
{ simp only with mfld_simps at hp, simp only [hp] with mfld_simps } },
have B : tangent_map_within I I l.symm s'l (il.symm (Dl q)) = q,
{ have : tangent_map_within I I l.symm s'l (il.symm (Dl q))
= tangent_map I I l.symm (il.symm (Dl q)),
{ refine tangent_map_within_eq_tangent_map U'lq _,
refine mdifferentiable_at_atlas_symm _ (chart_mem_atlas _ _) _,
simp only [hq] with mfld_simps },
rw [this, tangent_map_chart_symm, hDl],
{ simp only [hq] with mfld_simps,
have : q ∈ (chart_at (model_prod H E) p).source, by simp only [hq] with mfld_simps,
exact (chart_at (model_prod H E) p).left_inv this },
{ simp only [hq] with mfld_simps } },
have C : tangent_map_within I I' (r ∘ f) s' q
= tangent_map_within I' I' r r.source (tangent_map_within I I' f s' q),
{ refine tangent_map_within_comp_at q _ _ (λr hr, _) U'q,
{ apply diff_r.mdifferentiable_on one_le_n,
simp only [hq] with mfld_simps },
{ apply diff_f.mdifferentiable_on one_le_n,
simp only [hq] with mfld_simps },
{ simp only [s'] with mfld_simps at hr,
simp only [hr] with mfld_simps } },
have D : Dr.symm (ir (tangent_map_within I' I' r r.source (tangent_map_within I I' f s' q)))
= tangent_map_within I I' f s' q,
{ have A : tangent_map_within I' I' r r.source (tangent_map_within I I' f s' q) =
tangent_map I' I' r (tangent_map_within I I' f s' q),
{ apply tangent_map_within_eq_tangent_map,
{ apply is_open.unique_mdiff_within_at _ r.open_source, simp [hq] },
{ refine mdifferentiable_at_atlas _ (chart_mem_atlas _ _) _,
simp only [hq] with mfld_simps } },
have : f p.1 = (tangent_map_within I I' f s p).1 := rfl,
rw [A],
dsimp [r, Dr],
rw [this, tangent_map_chart],
{ simp only [hq] with mfld_simps,
have : tangent_map_within I I' f s' q ∈
(chart_at (model_prod H' E') (tangent_map_within I I' f s p)).source,
by simp only [hq] with mfld_simps,
exact (chart_at (model_prod H' E') (tangent_map_within I I' f s p)).left_inv this },
{ simp only [hq] with mfld_simps } },
have E : tangent_map_within I I' f s' q = tangent_map_within I I' f s q,
{ refine tangent_map_within_subset (by mfld_set_tac) U'q _,
apply hf.mdifferentiable_on one_le_n,
simp only [hq] with mfld_simps },
simp only [(∘), A, B, C, D, E.symm] },
exact diff_DrirrflilDl.congr eq_comp,
end
/-- If a function is `C^n` on a domain with unique derivatives, with `1 ≤ n`, then its bundled
derivative is continuous there. -/
theorem times_cont_mdiff_on.continuous_on_tangent_map_within
(hf : times_cont_mdiff_on I I' n f s) (hmn : 1 ≤ n) (hs : unique_mdiff_on I s) :
continuous_on (tangent_map_within I I' f s) ((tangent_bundle.proj I M) ⁻¹' s) :=
begin
have : times_cont_mdiff_on I.tangent I'.tangent 0 (tangent_map_within I I' f s)
((tangent_bundle.proj I M) ⁻¹' s) :=
hf.times_cont_mdiff_on_tangent_map_within hmn hs,
exact this.continuous_on
end
/-- If a function is `C^n`, then its bundled derivative is `C^m` when `m+1 ≤ n`. -/
theorem times_cont_mdiff.times_cont_mdiff_tangent_map
(hf : times_cont_mdiff I I' n f) (hmn : m + 1 ≤ n) :
times_cont_mdiff I.tangent I'.tangent m (tangent_map I I' f) :=
begin
rw ← times_cont_mdiff_on_univ at hf ⊢,
convert hf.times_cont_mdiff_on_tangent_map_within hmn unique_mdiff_on_univ,
rw tangent_map_within_univ
end
/-- If a function is `C^n`, with `1 ≤ n`, then its bundled derivative is continuous. -/
theorem times_cont_mdiff.continuous_tangent_map
(hf : times_cont_mdiff I I' n f) (hmn : 1 ≤ n) :
continuous (tangent_map I I' f) :=
begin
rw ← times_cont_mdiff_on_univ at hf,
rw continuous_iff_continuous_on_univ,
convert hf.continuous_on_tangent_map_within hmn unique_mdiff_on_univ,
rw tangent_map_within_univ
end
end tangent_map
/-! ### Smoothness of the projection in a basic smooth bundle -/
namespace basic_smooth_bundle_core
variables (Z : basic_smooth_bundle_core I M E')
lemma times_cont_mdiff_proj :
times_cont_mdiff ((I.prod (model_with_corners_self 𝕜 E'))) I n
Z.to_topological_fiber_bundle_core.proj :=
begin
assume x,
rw [times_cont_mdiff_at, times_cont_mdiff_within_at_iff],
refine ⟨Z.to_topological_fiber_bundle_core.continuous_proj.continuous_at.continuous_within_at, _⟩,
simp only [(∘), chart_at, chart] with mfld_simps,
apply times_cont_diff_within_at_fst.congr,
{ rintros ⟨a, b⟩ hab,
simp only with mfld_simps at hab,
simp only [hab] with mfld_simps },
{ simp only with mfld_simps }
end
lemma smooth_proj :
smooth ((I.prod (model_with_corners_self 𝕜 E'))) I Z.to_topological_fiber_bundle_core.proj :=
times_cont_mdiff_proj Z
lemma times_cont_mdiff_on_proj {s : set (Z.to_topological_fiber_bundle_core.total_space)} :
times_cont_mdiff_on ((I.prod (model_with_corners_self 𝕜 E'))) I n
Z.to_topological_fiber_bundle_core.proj s :=
Z.times_cont_mdiff_proj.times_cont_mdiff_on
lemma smooth_on_proj {s : set (Z.to_topological_fiber_bundle_core.total_space)} :
smooth_on ((I.prod (model_with_corners_self 𝕜 E'))) I Z.to_topological_fiber_bundle_core.proj s :=
times_cont_mdiff_on_proj Z
lemma times_cont_mdiff_at_proj {p : Z.to_topological_fiber_bundle_core.total_space} :
times_cont_mdiff_at ((I.prod (model_with_corners_self 𝕜 E'))) I n
Z.to_topological_fiber_bundle_core.proj p :=
Z.times_cont_mdiff_proj.times_cont_mdiff_at
lemma smooth_at_proj {p : Z.to_topological_fiber_bundle_core.total_space} :
smooth_at ((I.prod (model_with_corners_self 𝕜 E'))) I Z.to_topological_fiber_bundle_core.proj p :=
Z.times_cont_mdiff_at_proj
lemma times_cont_mdiff_within_at_proj
{s : set (Z.to_topological_fiber_bundle_core.total_space)}
{p : Z.to_topological_fiber_bundle_core.total_space} :
times_cont_mdiff_within_at ((I.prod (model_with_corners_self 𝕜 E'))) I n
Z.to_topological_fiber_bundle_core.proj s p :=
Z.times_cont_mdiff_at_proj.times_cont_mdiff_within_at
lemma smooth_within_at_proj
{s : set (Z.to_topological_fiber_bundle_core.total_space)}
{p : Z.to_topological_fiber_bundle_core.total_space} :
smooth_within_at ((I.prod (model_with_corners_self 𝕜 E'))) I
Z.to_topological_fiber_bundle_core.proj s p :=
Z.times_cont_mdiff_within_at_proj
/-- If an element of `E'` is invariant under all coordinate changes, then one can define a
corresponding section of the fiber bundle, which is smooth. This applies in particular to the
zero section of a vector bundle. Another example (not yet defined) would be the identity
section of the endomorphism bundle of a vector bundle. -/
lemma smooth_const_section (v : E')
(h : ∀ (i j : atlas H M), ∀ x ∈ i.1.source ∩ j.1.source, Z.coord_change i j (i.1 x) v = v) :
smooth I ((I.prod (model_with_corners_self 𝕜 E')))
(show M → Z.to_topological_fiber_bundle_core.total_space, from λ x, ⟨x, v⟩) :=
begin
assume x,
rw [times_cont_mdiff_at, times_cont_mdiff_within_at_iff],
split,
{ apply continuous.continuous_within_at,
apply topological_fiber_bundle_core.continuous_const_section,
assume i j y hy,
exact h _ _ _ hy },
{ have : times_cont_diff 𝕜 ⊤ (λ (y : E), (y, v)) := times_cont_diff_id.prod times_cont_diff_const,
apply this.times_cont_diff_within_at.congr,
{ assume y hy,
simp only with mfld_simps at hy,
simp only [chart, hy, chart_at, prod.mk.inj_iff, to_topological_fiber_bundle_core]
with mfld_simps,
apply h,
simp only [hy] with mfld_simps },
{ simp only [chart, chart_at, prod.mk.inj_iff, to_topological_fiber_bundle_core] with mfld_simps,
apply h,
simp only with mfld_simps } }
end
end basic_smooth_bundle_core
/-! ### Smoothness of the tangent bundle projection -/
namespace tangent_bundle
include Is
lemma times_cont_mdiff_proj :
times_cont_mdiff I.tangent I n (proj I M) :=
basic_smooth_bundle_core.times_cont_mdiff_proj _
lemma smooth_proj : smooth I.tangent I (proj I M) :=
basic_smooth_bundle_core.smooth_proj _
lemma times_cont_mdiff_on_proj {s : set (tangent_bundle I M)} :
times_cont_mdiff_on I.tangent I n (proj I M) s :=
basic_smooth_bundle_core.times_cont_mdiff_on_proj _
lemma smooth_on_proj {s : set (tangent_bundle I M)} :
smooth_on I.tangent I (proj I M) s :=
basic_smooth_bundle_core.smooth_on_proj _
lemma times_cont_mdiff_at_proj {p : tangent_bundle I M} :
times_cont_mdiff_at I.tangent I n
(proj I M) p :=
basic_smooth_bundle_core.times_cont_mdiff_at_proj _
lemma smooth_at_proj {p : tangent_bundle I M} :
smooth_at I.tangent I (proj I M) p :=
basic_smooth_bundle_core.smooth_at_proj _
lemma times_cont_mdiff_within_at_proj
{s : set (tangent_bundle I M)} {p : tangent_bundle I M} :
times_cont_mdiff_within_at I.tangent I n
(proj I M) s p :=
basic_smooth_bundle_core.times_cont_mdiff_within_at_proj _
lemma smooth_within_at_proj
{s : set (tangent_bundle I M)} {p : tangent_bundle I M} :
smooth_within_at I.tangent I
(proj I M) s p :=
basic_smooth_bundle_core.smooth_within_at_proj _
variables (I M)
/-- The zero section of the tangent bundle -/
def zero_section : M → tangent_bundle I M := λ x, ⟨x, 0⟩
variables {I M}
lemma smooth_zero_section : smooth I I.tangent (zero_section I M) :=
begin
apply basic_smooth_bundle_core.smooth_const_section (tangent_bundle_core I M) 0,
assume i j x hx,
simp only [tangent_bundle_core, continuous_linear_map.map_zero] with mfld_simps
end
/-- The derivative of the zero section of the tangent bundle maps `⟨x, v⟩` to `⟨⟨x, 0⟩, ⟨v, 0⟩⟩`.
Note that, as currently framed, this is a statement in coordinates, thus reliant on the choice
of the coordinate system we use on the tangent bundle.
However, the result itself is coordinate-dependent only to the extent that the coordinates
determine a splitting of the tangent bundle. Moreover, there is a canonical splitting at each
point of the zero section (since there is a canonical horizontal space there, the tangent space
to the zero section, in addition to the canonical vertical space which is the kernel of the
derivative of the projection), and this canonical splitting is also the one that comes from the
coordinates on the tangent bundle in our definitions. So this statement is not as crazy as it
may seem.
TODO define splittings of vector bundles; state this result invariantly. -/
lemma tangent_map_tangent_bundle_pure (p : tangent_bundle I M) :
tangent_map I I.tangent (tangent_bundle.zero_section I M) p = ⟨⟨p.1, 0⟩, ⟨p.2, 0⟩⟩ :=
begin
rcases p with ⟨x, v⟩,
have N : I.symm ⁻¹' (chart_at H x).target ∈ 𝓝 (I ((chart_at H x) x)),
{ apply mem_nhds_sets,
apply I.continuous_inv_fun _ (local_homeomorph.open_target _),
simp only with mfld_simps },
have A : mdifferentiable_at I I.tangent (λ (x : M), (⟨x, 0⟩ : tangent_bundle I M)) x :=
tangent_bundle.smooth_zero_section.mdifferentiable_at,
have B : fderiv_within 𝕜 (λ (x_1 : E), (x_1, (0 : E))) (set.range ⇑I) (I ((chart_at H x) x)) v
= (v, 0),
{ rw [fderiv_within_eq_fderiv, differentiable_at.fderiv_prod],
{ simp },
{ exact differentiable_at_id' },
{ exact differentiable_at_const _ },
{ exact model_with_corners.unique_diff_at_image I },
{ exact differentiable_at_id'.prod (differentiable_at_const _) } },
simp only [tangent_bundle.zero_section, tangent_map, mfderiv,
A, dif_pos, chart_at, basic_smooth_bundle_core.chart,
basic_smooth_bundle_core.to_topological_fiber_bundle_core, tangent_bundle_core,
function.comp, continuous_linear_map.map_zero] with mfld_simps,
rw ← fderiv_within_inter N (I.unique_diff (I ((chart_at H x) x)) (set.mem_range_self _)) at B,
rw [← fderiv_within_inter N (I.unique_diff (I ((chart_at H x) x)) (set.mem_range_self _)), ← B],
congr' 1,
apply fderiv_within_congr _ (λ y hy, _),
{ simp only with mfld_simps, },
{ apply unique_diff_within_at.inter (I.unique_diff _ _) N,
simp only with mfld_simps },
{ simp only with mfld_simps at hy,
simp only [hy] with mfld_simps },
end
end tangent_bundle
/-! ### Smoothness of standard maps associated to the product of manifolds -/
section prod_mk
lemma times_cont_mdiff_within_at.prod_mk {f : M → M'} {g : M → N'}
(hf : times_cont_mdiff_within_at I I' n f s x) (hg : times_cont_mdiff_within_at I J' n g s x) :
times_cont_mdiff_within_at I (I'.prod J') n (λ x, (f x, g x)) s x :=
begin
rw times_cont_mdiff_within_at_iff at *,
refine ⟨hf.1.prod hg.1, (hf.2.mono _).prod (hg.2.mono _)⟩;
mfld_set_tac,
end
lemma times_cont_mdiff_at.prod_mk {f : M → M'} {g : M → N'}
(hf : times_cont_mdiff_at I I' n f x) (hg : times_cont_mdiff_at I J' n g x) :
times_cont_mdiff_at I (I'.prod J') n (λ x, (f x, g x)) x :=
hf.prod_mk hg
lemma times_cont_mdiff_on.prod_mk {f : M → M'} {g : M → N'}
(hf : times_cont_mdiff_on I I' n f s) (hg : times_cont_mdiff_on I J' n g s) :
times_cont_mdiff_on I (I'.prod J') n (λ x, (f x, g x)) s :=
λ x hx, (hf x hx).prod_mk (hg x hx)
lemma times_cont_mdiff.prod_mk {f : M → M'} {g : M → N'}
(hf : times_cont_mdiff I I' n f) (hg : times_cont_mdiff I J' n g) :
times_cont_mdiff I (I'.prod J') n (λ x, (f x, g x)) :=
λ x, (hf x).prod_mk (hg x)
lemma smooth_within_at.prod_mk {f : M → M'} {g : M → N'}
(hf : smooth_within_at I I' f s x) (hg : smooth_within_at I J' g s x) :
smooth_within_at I (I'.prod J') (λ x, (f x, g x)) s x :=
hf.prod_mk hg
lemma smooth_at.prod_mk {f : M → M'} {g : M → N'}
(hf : smooth_at I I' f x) (hg : smooth_at I J' g x) :
smooth_at I (I'.prod J') (λ x, (f x, g x)) x :=
hf.prod_mk hg
lemma smooth_on.prod_mk {f : M → M'} {g : M → N'}
(hf : smooth_on I I' f s) (hg : smooth_on I J' g s) :
smooth_on I (I'.prod J') (λ x, (f x, g x)) s :=
hf.prod_mk hg
lemma smooth.prod_mk {f : M → M'} {g : M → N'}
(hf : smooth I I' f) (hg : smooth I J' g) :
smooth I (I'.prod J') (λ x, (f x, g x)) :=
hf.prod_mk hg
end prod_mk
section projections
lemma times_cont_mdiff_within_at_fst {s : set (M × N)} {p : M × N} :
times_cont_mdiff_within_at (I.prod J) I n prod.fst s p :=
begin
rw times_cont_mdiff_within_at_iff,
refine ⟨continuous_within_at_fst, _⟩,
refine times_cont_diff_within_at_fst.congr (λ y hy, _) _,
{ simp only with mfld_simps at hy,
simp only [hy] with mfld_simps },
{ simp only with mfld_simps }
end
lemma times_cont_mdiff_at_fst {p : M × N} :
times_cont_mdiff_at (I.prod J) I n prod.fst p :=
times_cont_mdiff_within_at_fst
lemma times_cont_mdiff_on_fst {s : set (M × N)} :
times_cont_mdiff_on (I.prod J) I n prod.fst s :=
λ x hx, times_cont_mdiff_within_at_fst
lemma times_cont_mdiff_fst :
times_cont_mdiff (I.prod J) I n (@prod.fst M N) :=
λ x, times_cont_mdiff_at_fst
lemma smooth_within_at_fst {s : set (M × N)} {p : M × N} :
smooth_within_at (I.prod J) I prod.fst s p :=
times_cont_mdiff_within_at_fst
lemma smooth_at_fst {p : M × N} :
smooth_at (I.prod J) I prod.fst p :=
times_cont_mdiff_at_fst
lemma smooth_on_fst {s : set (M × N)} :
smooth_on (I.prod J) I prod.fst s :=
times_cont_mdiff_on_fst
lemma smooth_fst :
smooth (I.prod J) I (@prod.fst M N) :=
times_cont_mdiff_fst
lemma times_cont_mdiff_within_at_snd {s : set (M × N)} {p : M × N} :
times_cont_mdiff_within_at (I.prod J) J n prod.snd s p :=
begin
rw times_cont_mdiff_within_at_iff,
refine ⟨continuous_within_at_snd, _⟩,
refine times_cont_diff_within_at_snd.congr (λ y hy, _) _,
{ simp only with mfld_simps at hy,
simp only [hy] with mfld_simps },
{ simp only with mfld_simps }
end
lemma times_cont_mdiff_at_snd {p : M × N} :
times_cont_mdiff_at (I.prod J) J n prod.snd p :=
times_cont_mdiff_within_at_snd
lemma times_cont_mdiff_on_snd {s : set (M × N)} :
times_cont_mdiff_on (I.prod J) J n prod.snd s :=
λ x hx, times_cont_mdiff_within_at_snd
lemma times_cont_mdiff_snd :
times_cont_mdiff (I.prod J) J n (@prod.snd M N) :=
λ x, times_cont_mdiff_at_snd
lemma smooth_within_at_snd {s : set (M × N)} {p : M × N} :
smooth_within_at (I.prod J) J prod.snd s p :=
times_cont_mdiff_within_at_snd
lemma smooth_at_snd {p : M × N} :
smooth_at (I.prod J) J prod.snd p :=
times_cont_mdiff_at_snd
lemma smooth_on_snd {s : set (M × N)} :
smooth_on (I.prod J) J prod.snd s :=
times_cont_mdiff_on_snd
lemma smooth_snd :
smooth (I.prod J) J (@prod.snd M N) :=
times_cont_mdiff_snd
include Is I's J's
lemma smooth_iff_proj_smooth {f : M → M' × N'} :
(smooth I (I'.prod J') f) ↔ (smooth I I' (prod.fst ∘ f)) ∧ (smooth I J' (prod.snd ∘ f)) :=
begin
split,
{ intro h, exact ⟨smooth_fst.comp h, smooth_snd.comp h⟩ },
{ rintro ⟨h_fst, h_snd⟩, simpa only [prod.mk.eta] using h_fst.prod_mk h_snd, }
end
end projections
section prod_map
variables {g : N → N'} {r : set N} {y : N}
include Is I's Js J's
/-- The product map of two `C^n` functions within a set at a point is `C^n`
within the product set at the product point. -/
lemma times_cont_mdiff_within_at.prod_map' {p : M × N}
(hf : times_cont_mdiff_within_at I I' n f s p.1) (hg : times_cont_mdiff_within_at J J' n g r p.2) :
times_cont_mdiff_within_at (I.prod J) (I'.prod J') n (prod.map f g) (s.prod r) p :=
(hf.comp p times_cont_mdiff_within_at_fst (prod_subset_preimage_fst _ _)).prod_mk $
hg.comp p times_cont_mdiff_within_at_snd (prod_subset_preimage_snd _ _)
lemma times_cont_mdiff_within_at.prod_map
(hf : times_cont_mdiff_within_at I I' n f s x) (hg : times_cont_mdiff_within_at J J' n g r y) :
times_cont_mdiff_within_at (I.prod J) (I'.prod J') n (prod.map f g) (s.prod r) (x, y) :=
times_cont_mdiff_within_at.prod_map' hf hg
lemma times_cont_mdiff_at.prod_map
(hf : times_cont_mdiff_at I I' n f x) (hg : times_cont_mdiff_at J J' n g y) :
times_cont_mdiff_at (I.prod J) (I'.prod J') n (prod.map f g) (x, y) :=
begin
rw ← times_cont_mdiff_within_at_univ at *,
convert hf.prod_map hg,
exact univ_prod_univ.symm
end
lemma times_cont_mdiff_at.prod_map' {p : M × N}
(hf : times_cont_mdiff_at I I' n f p.1) (hg : times_cont_mdiff_at J J' n g p.2) :
times_cont_mdiff_at (I.prod J) (I'.prod J') n (prod.map f g) p :=
begin
rcases p,
exact hf.prod_map hg
end
lemma times_cont_mdiff_on.prod_map
(hf : times_cont_mdiff_on I I' n f s) (hg : times_cont_mdiff_on J J' n g r) :
times_cont_mdiff_on (I.prod J) (I'.prod J') n (prod.map f g) (s.prod r) :=
(hf.comp times_cont_mdiff_on_fst (prod_subset_preimage_fst _ _)).prod_mk $
hg.comp (times_cont_mdiff_on_snd) (prod_subset_preimage_snd _ _)
lemma times_cont_mdiff.prod_map
(hf : times_cont_mdiff I I' n f) (hg : times_cont_mdiff J J' n g) :
times_cont_mdiff (I.prod J) (I'.prod J') n (prod.map f g) :=
begin
assume p,
exact (hf p.1).prod_map' (hg p.2)
end
lemma smooth_within_at.prod_map
(hf : smooth_within_at I I' f s x) (hg : smooth_within_at J J' g r y) :
smooth_within_at (I.prod J) (I'.prod J') (prod.map f g) (s.prod r) (x, y) :=
hf.prod_map hg
lemma smooth_at.prod_map
(hf : smooth_at I I' f x) (hg : smooth_at J J' g y) :
smooth_at (I.prod J) (I'.prod J') (prod.map f g) (x, y) :=
hf.prod_map hg
lemma smooth_on.prod_map
(hf : smooth_on I I' f s) (hg : smooth_on J J' g r) :
smooth_on (I.prod J) (I'.prod J') (prod.map f g) (s.prod r) :=
hf.prod_map hg
lemma smooth.prod_map
(hf : smooth I I' f) (hg : smooth J J' g) :
smooth (I.prod J) (I'.prod J') (prod.map f g) :=
hf.prod_map hg
end prod_map
/-! ### Linear maps between normed spaces are smooth -/
lemma continuous_linear_map.times_cont_mdiff (L : E →L[𝕜] F) :
times_cont_mdiff 𝓘(𝕜, E) 𝓘(𝕜, F) n L :=
begin
rw times_cont_mdiff_iff,
refine ⟨L.cont, λ x y, _⟩,
simp only with mfld_simps,
rw times_cont_diff_on_univ,
exact continuous_linear_map.times_cont_diff L,
end
/-! ### Smoothness of standard operations -/
variables {V : Type*} [normed_group V] [normed_space 𝕜 V]
/-- On any vector space, multiplication by a scalar is a smooth operation. -/
lemma smooth_smul : smooth (𝓘(𝕜).prod 𝓘(𝕜, V)) 𝓘(𝕜, V) (λp : 𝕜 × V, p.1 • p.2) :=
begin
rw smooth_iff,
refine ⟨continuous_smul, λ x y, _⟩,
simp only [prod.mk.eta] with mfld_simps,
rw times_cont_diff_on_univ,
exact times_cont_diff_smul,
end
lemma smooth.smul {N : Type*} [topological_space N] [charted_space H N]
[smooth_manifold_with_corners I N] {f : N → 𝕜} {g : N → V}
(hf : smooth I 𝓘(𝕜) f) (hg : smooth I 𝓘(𝕜, V) g) :
smooth I 𝓘(𝕜, V) (λ p, f p • g p) :=
smooth_smul.comp (hf.prod_mk hg)
|
78cb1faf0082da17a22fd1eac6ad5753b5d0b51a
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/real/basic_auto.lean
|
6b4d224b708079d9cdd50f78002d888e080855ba
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 15,419
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Floris van Doorn
The (classical) real numbers ℝ. This is a direct construction
from Cauchy sequences.
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.order.conditionally_complete_lattice
import Mathlib.data.real.cau_seq_completion
import Mathlib.algebra.archimedean
import Mathlib.algebra.star.basic
import Mathlib.PostPort
namespace Mathlib
/-- The type `ℝ` of real numbers constructed as equivalence classes of Cauchy sequences of rational
numbers. -/
def real := cau_seq.completion.Cauchy
notation:1024 "ℝ" => Mathlib.real
namespace real
def comm_ring_aux : comm_ring ℝ := cau_seq.completion.Cauchy.comm_ring
protected instance comm_ring : comm_ring ℝ :=
comm_ring.mk comm_ring.add comm_ring.add_assoc comm_ring.zero comm_ring.zero_add
comm_ring.add_zero comm_ring.neg comm_ring.sub comm_ring.add_left_neg comm_ring.add_comm
comm_ring.mul comm_ring.mul_assoc comm_ring.one comm_ring.one_mul comm_ring.mul_one
comm_ring.left_distrib comm_ring.right_distrib comm_ring.mul_comm
/- Extra instances to short-circuit type class resolution -/
protected instance ring : ring ℝ := comm_ring.to_ring ℝ
protected instance comm_semiring : comm_semiring ℝ := comm_ring.to_comm_semiring
protected instance semiring : semiring ℝ := ring.to_semiring
protected instance add_comm_group : add_comm_group ℝ := ring.to_add_comm_group ℝ
protected instance add_group : add_group ℝ := add_comm_group.to_add_group ℝ
protected instance add_comm_monoid : add_comm_monoid ℝ := add_comm_group.to_add_comm_monoid ℝ
protected instance add_monoid : add_monoid ℝ := sub_neg_monoid.to_add_monoid ℝ
protected instance add_left_cancel_semigroup : add_left_cancel_semigroup ℝ :=
add_left_cancel_monoid.to_add_left_cancel_semigroup ℝ
protected instance add_right_cancel_semigroup : add_right_cancel_semigroup ℝ :=
add_right_cancel_monoid.to_add_right_cancel_semigroup ℝ
protected instance add_comm_semigroup : add_comm_semigroup ℝ :=
add_comm_monoid.to_add_comm_semigroup ℝ
protected instance add_semigroup : add_semigroup ℝ := add_monoid.to_add_semigroup ℝ
protected instance comm_monoid : comm_monoid ℝ := comm_semiring.to_comm_monoid ℝ
protected instance monoid : monoid ℝ := ring.to_monoid ℝ
protected instance comm_semigroup : comm_semigroup ℝ := comm_ring.to_comm_semigroup ℝ
protected instance semigroup : semigroup ℝ := monoid.to_semigroup ℝ
protected instance inhabited : Inhabited ℝ := { default := 0 }
/-- The real numbers are a *-ring, with the trivial *-structure. -/
protected instance star_ring : star_ring ℝ := star_ring_of_comm
/-- Coercion `ℚ` → `ℝ` as a `ring_hom`. Note that this
is `cau_seq.completion.of_rat`, not `rat.cast`. -/
def of_rat : ℚ →+* ℝ := ring_hom.mk cau_seq.completion.of_rat sorry sorry sorry sorry
/-- Make a real number from a Cauchy sequence of rationals (by taking the equivalence class). -/
def mk (x : cau_seq ℚ abs) : ℝ := cau_seq.completion.mk x
theorem of_rat_sub (x : ℚ) (y : ℚ) : coe_fn of_rat (x - y) = coe_fn of_rat x - coe_fn of_rat y :=
congr_arg mk (cau_seq.const_sub x y)
protected instance has_lt : HasLess ℝ :=
{ Less := fun (x y : ℝ) => quotient.lift_on₂ x y Less sorry }
@[simp] theorem mk_lt {f : cau_seq ℚ abs} {g : cau_seq ℚ abs} : mk f < mk g ↔ f < g := iff.rfl
theorem mk_eq {f : cau_seq ℚ abs} {g : cau_seq ℚ abs} : mk f = mk g ↔ f ≈ g :=
cau_seq.completion.mk_eq
theorem quotient_mk_eq_mk (f : cau_seq ℚ abs) : quotient.mk f = mk f := rfl
theorem mk_eq_mk {f : cau_seq ℚ abs} : cau_seq.completion.mk f = mk f := rfl
@[simp] theorem mk_pos {f : cau_seq ℚ abs} : 0 < mk f ↔ cau_seq.pos f :=
iff_of_eq (congr_arg cau_seq.pos (sub_zero f))
protected def le (x : ℝ) (y : ℝ) := x < y ∨ x = y
protected instance has_le : HasLessEq ℝ := { LessEq := real.le }
@[simp] theorem mk_le {f : cau_seq ℚ abs} {g : cau_seq ℚ abs} : mk f ≤ mk g ↔ f ≤ g :=
or_congr iff.rfl quotient.eq
theorem add_lt_add_iff_left {a : ℝ} {b : ℝ} (c : ℝ) : c + a < c + b ↔ a < b := sorry
protected instance partial_order : partial_order ℝ := partial_order.mk LessEq Less sorry sorry sorry
protected instance preorder : preorder ℝ := partial_order.to_preorder ℝ
theorem of_rat_lt {x : ℚ} {y : ℚ} : coe_fn of_rat x < coe_fn of_rat y ↔ x < y := cau_seq.const_lt
protected theorem zero_lt_one : 0 < 1 := iff.mpr of_rat_lt zero_lt_one
protected theorem mul_pos {a : ℝ} {b : ℝ} : 0 < a → 0 < b → 0 < a * b := sorry
protected instance ordered_ring : ordered_ring ℝ :=
ordered_ring.mk comm_ring.add comm_ring.add_assoc comm_ring.zero comm_ring.zero_add
comm_ring.add_zero comm_ring.neg comm_ring.sub comm_ring.add_left_neg comm_ring.add_comm
comm_ring.mul comm_ring.mul_assoc comm_ring.one comm_ring.one_mul comm_ring.mul_one
comm_ring.left_distrib comm_ring.right_distrib partial_order.le partial_order.lt
partial_order.le_refl partial_order.le_trans partial_order.le_antisymm sorry sorry real.mul_pos
protected instance ordered_semiring : ordered_semiring ℝ := ordered_ring.to_ordered_semiring
protected instance ordered_add_comm_group : ordered_add_comm_group ℝ :=
ordered_ring.to_ordered_add_comm_group ℝ
protected instance ordered_cancel_add_comm_monoid : ordered_cancel_add_comm_monoid ℝ :=
ordered_semiring.to_ordered_cancel_add_comm_monoid ℝ
protected instance ordered_add_comm_monoid : ordered_add_comm_monoid ℝ :=
ordered_cancel_add_comm_monoid.to_ordered_add_comm_monoid
protected instance has_one : HasOne ℝ := monoid.to_has_one ℝ
protected instance has_zero : HasZero ℝ := mul_zero_class.to_has_zero ℝ
protected instance has_mul : Mul ℝ := distrib.to_has_mul ℝ
protected instance has_add : Add ℝ := distrib.to_has_add ℝ
protected instance has_sub : Sub ℝ := sub_neg_monoid.to_has_sub ℝ
protected instance nontrivial : nontrivial ℝ :=
nontrivial.mk (Exists.intro 0 (Exists.intro 1 (ne_of_lt real.zero_lt_one)))
protected instance linear_order : linear_order ℝ :=
linear_order.mk partial_order.le partial_order.lt partial_order.le_refl partial_order.le_trans
partial_order.le_antisymm sorry (fun (a b : ℝ) => classical.prop_decidable (a ≤ b))
Mathlib.decidable_eq_of_decidable_le Mathlib.decidable_lt_of_decidable_le
protected instance linear_ordered_comm_ring : linear_ordered_comm_ring ℝ :=
linear_ordered_comm_ring.mk ordered_ring.add ordered_ring.add_assoc ordered_ring.zero
ordered_ring.zero_add ordered_ring.add_zero ordered_ring.neg ordered_ring.sub
ordered_ring.add_left_neg ordered_ring.add_comm ordered_ring.mul ordered_ring.mul_assoc
ordered_ring.one ordered_ring.one_mul ordered_ring.mul_one ordered_ring.left_distrib
ordered_ring.right_distrib ordered_ring.le ordered_ring.lt ordered_ring.le_refl
ordered_ring.le_trans ordered_ring.le_antisymm ordered_ring.add_le_add_left
ordered_ring.zero_le_one ordered_ring.mul_pos linear_order.le_total linear_order.decidable_le
linear_order.decidable_eq linear_order.decidable_lt nontrivial.exists_pair_ne comm_ring.mul_comm
/- Extra instances to short-circuit type class resolution -/
protected instance linear_ordered_ring : linear_ordered_ring ℝ :=
linear_ordered_comm_ring.to_linear_ordered_ring ℝ
protected instance linear_ordered_semiring : linear_ordered_semiring ℝ :=
linear_ordered_comm_ring.to_linear_ordered_semiring
protected instance domain : domain ℝ :=
domain.mk comm_ring.add comm_ring.add_assoc comm_ring.zero comm_ring.zero_add comm_ring.add_zero
comm_ring.neg comm_ring.sub comm_ring.add_left_neg comm_ring.add_comm comm_ring.mul
comm_ring.mul_assoc comm_ring.one comm_ring.one_mul comm_ring.mul_one comm_ring.left_distrib
comm_ring.right_distrib nontrivial.exists_pair_ne sorry
protected instance linear_ordered_field : linear_ordered_field ℝ := sorry
/- Extra instances to short-circuit type class resolution -/
protected instance linear_ordered_add_comm_group : linear_ordered_add_comm_group ℝ :=
linear_ordered_ring.to_linear_ordered_add_comm_group
protected instance field : field ℝ := linear_ordered_field.to_field ℝ
protected instance division_ring : division_ring ℝ := field.to_division_ring
protected instance integral_domain : integral_domain ℝ := field.to_integral_domain
protected instance distrib_lattice : distrib_lattice ℝ := Mathlib.distrib_lattice_of_linear_order
protected instance lattice : lattice ℝ := Mathlib.lattice_of_linear_order
protected instance semilattice_inf : semilattice_inf ℝ := lattice.to_semilattice_inf ℝ
protected instance semilattice_sup : semilattice_sup ℝ := lattice.to_semilattice_sup ℝ
protected instance has_inf : has_inf ℝ := semilattice_inf.to_has_inf ℝ
protected instance has_sup : has_sup ℝ := semilattice_sup.to_has_sup ℝ
protected instance decidable_lt (a : ℝ) (b : ℝ) : Decidable (a < b) := has_lt.lt.decidable a b
protected instance decidable_le (a : ℝ) (b : ℝ) : Decidable (a ≤ b) := has_le.le.decidable a b
protected instance decidable_eq (a : ℝ) (b : ℝ) : Decidable (a = b) := quotient.decidable_eq a b
@[simp] theorem of_rat_eq_cast (x : ℚ) : coe_fn of_rat x = ↑x := ring_hom.eq_rat_cast of_rat
theorem le_mk_of_forall_le {x : ℝ} {f : cau_seq ℚ abs} :
(∃ (i : ℕ), ∀ (j : ℕ), j ≥ i → x ≤ ↑(coe_fn f j)) → x ≤ mk f :=
sorry
theorem mk_le_of_forall_le {f : cau_seq ℚ abs} {x : ℝ} :
(∃ (i : ℕ), ∀ (j : ℕ), j ≥ i → ↑(coe_fn f j) ≤ x) → mk f ≤ x :=
sorry
theorem mk_near_of_forall_near {f : cau_seq ℚ abs} {x : ℝ} {ε : ℝ}
(H : ∃ (i : ℕ), ∀ (j : ℕ), j ≥ i → abs (↑(coe_fn f j) - x) ≤ ε) : abs (mk f - x) ≤ ε :=
sorry
protected instance archimedean : archimedean ℝ :=
iff.mpr archimedean_iff_rat_le
fun (x : ℝ) => quotient.induction_on x fun (f : cau_seq ℚ abs) => sorry
/- mark `real` irreducible in order to prevent `auto_cases` unfolding reals,
since users rarely want to consider real numbers as Cauchy sequences.
Marking `comm_ring_aux` `irreducible` is done to ensure that there are no problems
with non definitionally equal instances, caused by making `real` irreducible-/
protected instance floor_ring : floor_ring ℝ := archimedean.floor_ring ℝ
theorem is_cau_seq_iff_lift {f : ℕ → ℚ} : is_cau_seq abs f ↔ is_cau_seq abs fun (i : ℕ) => ↑(f i) :=
sorry
theorem of_near (f : ℕ → ℚ) (x : ℝ)
(h : ∀ (ε : ℝ), ε > 0 → ∃ (i : ℕ), ∀ (j : ℕ), j ≥ i → abs (↑(f j) - x) < ε) :
∃ (h' : is_cau_seq abs f), mk { val := f, property := h' } = x :=
sorry
theorem exists_floor (x : ℝ) : ∃ (ub : ℤ), ↑ub ≤ x ∧ ∀ (z : ℤ), ↑z ≤ x → z ≤ ub := sorry
theorem exists_sup (S : set ℝ) :
(∃ (x : ℝ), x ∈ S) →
(∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x) →
∃ (x : ℝ), ∀ (y : ℝ), x ≤ y ↔ ∀ (z : ℝ), z ∈ S → z ≤ y :=
sorry
protected instance has_Sup : has_Sup ℝ :=
has_Sup.mk
fun (S : set ℝ) =>
dite ((∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)
(fun (h : (∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x) => classical.some sorry)
fun (h : ¬((∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)) => 0
theorem Sup_def (S : set ℝ) :
Sup S =
dite ((∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)
(fun (h : (∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x) =>
classical.some (exists_sup S (and.left h) (and.right h)))
fun (h : ¬((∃ (x : ℝ), x ∈ S) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)) => 0 :=
rfl
theorem Sup_le (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)
{y : ℝ} : Sup S ≤ y ↔ ∀ (z : ℝ), z ∈ S → z ≤ y :=
sorry
-- this proof times out without this
theorem lt_Sup (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x)
{y : ℝ} : y < Sup S ↔ ∃ (z : ℝ), ∃ (H : z ∈ S), y < z :=
sorry
theorem le_Sup (S : set ℝ) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → y ≤ x) {x : ℝ} (xS : x ∈ S) :
x ≤ Sup S :=
iff.mp (Sup_le S (Exists.intro x xS) h₂) (le_refl (Sup S)) x xS
theorem Sup_le_ub (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) {ub : ℝ} (h₂ : ∀ (y : ℝ), y ∈ S → y ≤ ub) :
Sup S ≤ ub :=
iff.mpr (Sup_le S h₁ (Exists.intro ub h₂)) h₂
protected theorem is_lub_Sup {s : set ℝ} {a : ℝ} {b : ℝ} (ha : a ∈ s) (hb : b ∈ upper_bounds s) :
is_lub s (Sup s) :=
{ left := fun (x : ℝ) (xs : x ∈ s) => le_Sup s (Exists.intro b hb) xs,
right := fun (u : ℝ) (h : u ∈ upper_bounds s) => Sup_le_ub s (Exists.intro a ha) h }
protected instance has_Inf : has_Inf ℝ :=
has_Inf.mk fun (S : set ℝ) => -Sup (set_of fun (x : ℝ) => -x ∈ S)
theorem Inf_def (S : set ℝ) : Inf S = -Sup (set_of fun (x : ℝ) => -x ∈ S) := rfl
theorem le_Inf (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → x ≤ y)
{y : ℝ} : y ≤ Inf S ↔ ∀ (z : ℝ), z ∈ S → y ≤ z :=
sorry
-- this proof times out without this
theorem Inf_lt (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → x ≤ y)
{y : ℝ} : Inf S < y ↔ ∃ (z : ℝ), ∃ (H : z ∈ S), z < y :=
sorry
theorem Inf_le (S : set ℝ) (h₂ : ∃ (x : ℝ), ∀ (y : ℝ), y ∈ S → x ≤ y) {x : ℝ} (xS : x ∈ S) :
Inf S ≤ x :=
iff.mp (le_Inf S (Exists.intro x xS) h₂) (le_refl (Inf S)) x xS
theorem lb_le_Inf (S : set ℝ) (h₁ : ∃ (x : ℝ), x ∈ S) {lb : ℝ} (h₂ : ∀ (y : ℝ), y ∈ S → lb ≤ y) :
lb ≤ Inf S :=
iff.mpr (le_Inf S h₁ (Exists.intro lb h₂)) h₂
protected instance conditionally_complete_linear_order : conditionally_complete_linear_order ℝ :=
conditionally_complete_linear_order.mk lattice.sup linear_order.le linear_order.lt
linear_order.le_refl linear_order.le_trans linear_order.le_antisymm lattice.le_sup_left
lattice.le_sup_right lattice.sup_le lattice.inf lattice.inf_le_left lattice.inf_le_right
lattice.le_inf Sup Inf sorry sorry sorry sorry linear_order.le_total (classical.dec_rel LessEq)
linear_order.decidable_eq linear_order.decidable_lt
theorem Sup_empty : Sup ∅ = 0 := sorry
theorem Sup_of_not_bdd_above {s : set ℝ} (hs : ¬bdd_above s) : Sup s = 0 :=
dif_neg fun (h : (∃ (x : ℝ), x ∈ s) ∧ ∃ (x : ℝ), ∀ (y : ℝ), y ∈ s → y ≤ x) => hs (and.right h)
theorem Sup_univ : Sup set.univ = 0 := sorry
theorem Inf_empty : Inf ∅ = 0 := sorry
theorem Inf_of_not_bdd_below {s : set ℝ} (hs : ¬bdd_below s) : Inf s = 0 := sorry
theorem cau_seq_converges (f : cau_seq ℝ abs) : ∃ (x : ℝ), f ≈ cau_seq.const abs x := sorry
protected instance abs.cau_seq.is_complete : cau_seq.is_complete ℝ abs :=
cau_seq.is_complete.mk cau_seq_converges
end Mathlib
|
9396fcd518560053fedf7c3cef88be919443d81e
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/data/multiset/pi.lean
|
916174981c99f4274e5f7dab6ff428aca6c83790
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 5,046
|
lean
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import data.multiset.nodup
/-!
# The cartesian product of multisets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
namespace multiset
section pi
variables {α : Type*}
open function
/-- Given `δ : α → Type*`, `pi.empty δ` is the trivial dependent function out of the empty
multiset. -/
def pi.empty (δ : α → Sort*) : (Πa∈(0:multiset α), δ a) .
variables [decidable_eq α] {β : α → Type*} {δ : α → Sort*}
/-- Given `δ : α → Type*`, a multiset `m` and a term `a`, as well as a term `b : δ a` and a
function `f` such that `f a' : δ a'` for all `a'` in `m`, `pi.cons m a b f` is a function `g` such
that `g a'' : δ a''` for all `a''` in `a ::ₘ m`. -/
def pi.cons (m : multiset α) (a : α) (b : δ a) (f : Πa∈m, δ a) : Πa'∈a ::ₘ m, δ a' :=
λa' ha', if h : a' = a then eq.rec b h.symm else f a' $ (mem_cons.1 ha').resolve_left h
lemma pi.cons_same {m : multiset α} {a : α} {b : δ a} {f : Πa∈m, δ a} (h : a ∈ a ::ₘ m) :
pi.cons m a b f a h = b :=
dif_pos rfl
lemma pi.cons_ne {m : multiset α} {a a' : α} {b : δ a} {f : Πa∈m, δ a}
(h' : a' ∈ a ::ₘ m) (h : a' ≠ a) :
pi.cons m a b f a' h' = f a' ((mem_cons.1 h').resolve_left h) :=
dif_neg h
lemma pi.cons_swap {a a' : α} {b : δ a} {b' : δ a'} {m : multiset α} {f : Πa∈m, δ a} (h : a ≠ a') :
pi.cons (a' ::ₘ m) a b (pi.cons m a' b' f) == pi.cons (a ::ₘ m) a' b' (pi.cons m a b f) :=
begin
apply hfunext rfl,
rintro a'' _ rfl,
refine hfunext (by rw [cons_swap]) (λ ha₁ ha₂ _, _),
rcases ne_or_eq a'' a with h₁ | rfl,
rcases eq_or_ne a'' a' with rfl | h₂,
all_goals { simp [*, pi.cons_same, pi.cons_ne] },
end
@[simp]
lemma pi.cons_eta {m : multiset α} {a : α} (f : Π a' ∈ a ::ₘ m, δ a') :
pi.cons m a (f _ (mem_cons_self _ _)) (λ a' ha', f a' (mem_cons_of_mem ha')) = f :=
begin
ext a' h',
by_cases a' = a,
{ subst h, rw [pi.cons_same] },
{ rw [pi.cons_ne _ h] }
end
lemma pi.cons_injective {a : α} {b : δ a} {s : multiset α} (hs : a ∉ s) :
function.injective (pi.cons s a b) :=
assume f₁ f₂ eq, funext $ assume a', funext $ assume h',
have ne : a ≠ a', from assume h, hs $ h.symm ▸ h',
have a' ∈ a ::ₘ s, from mem_cons_of_mem h',
calc f₁ a' h' = pi.cons s a b f₁ a' this : by rw [pi.cons_ne this ne.symm]
... = pi.cons s a b f₂ a' this : by rw [eq]
... = f₂ a' h' : by rw [pi.cons_ne this ne.symm]
/-- `pi m t` constructs the Cartesian product over `t` indexed by `m`. -/
def pi (m : multiset α) (t : Πa, multiset (β a)) : multiset (Πa∈m, β a) :=
m.rec_on {pi.empty β} (λa m (p : multiset (Πa∈m, β a)), (t a).bind $ λb, p.map $ pi.cons m a b)
begin
intros a a' m n,
by_cases eq : a = a',
{ subst eq },
{ simp [map_bind, bind_bind (t a') (t a)],
apply bind_hcongr, { rw [cons_swap a a'] },
intros b hb,
apply bind_hcongr, { rw [cons_swap a a'] },
intros b' hb',
apply map_hcongr, { rw [cons_swap a a'] },
intros f hf,
exact pi.cons_swap eq }
end
@[simp] lemma pi_zero (t : Πa, multiset (β a)) : pi 0 t = {pi.empty β} := rfl
@[simp] lemma pi_cons (m : multiset α) (t : Πa, multiset (β a)) (a : α) :
pi (a ::ₘ m) t = ((t a).bind $ λb, (pi m t).map $ pi.cons m a b) :=
rec_on_cons a m
lemma card_pi (m : multiset α) (t : Πa, multiset (β a)) :
card (pi m t) = prod (m.map $ λa, card (t a)) :=
multiset.induction_on m (by simp) (by simp [mul_comm] {contextual := tt})
protected lemma nodup.pi {s : multiset α} {t : Π a, multiset (β a)} :
nodup s → (∀a∈s, nodup (t a)) → nodup (pi s t) :=
multiset.induction_on s (assume _ _, nodup_singleton _)
begin
assume a s ih hs ht,
have has : a ∉ s, by simp at hs; exact hs.1,
have hs : nodup s, by simp at hs; exact hs.2,
simp,
refine ⟨λ b hb, (ih hs $ λ a' h', ht a' $ mem_cons_of_mem h').map (pi.cons_injective has), _⟩,
refine (ht a $ mem_cons_self _ _).pairwise _,
from assume b₁ hb₁ b₂ hb₂ neb, disjoint_map_map.2 (assume f hf g hg eq,
have pi.cons s a b₁ f a (mem_cons_self _ _) = pi.cons s a b₂ g a (mem_cons_self _ _),
by rw [eq],
neb $ show b₁ = b₂, by rwa [pi.cons_same, pi.cons_same] at this)
end
lemma mem_pi (m : multiset α) (t : Πa, multiset (β a)) :
∀f:Πa∈m, β a, (f ∈ pi m t) ↔ (∀a (h : a ∈ m), f a h ∈ t a) :=
begin
intro f,
induction m using multiset.induction_on with a m ih,
{ simpa using show f = pi.empty β, by funext a ha; exact ha.elim },
simp_rw [pi_cons, mem_bind, mem_map, ih],
split,
{ rintro ⟨b, hb, f', hf', rfl⟩ a' ha',
by_cases a' = a,
{ subst h, rwa [pi.cons_same] },
{ rw [pi.cons_ne _ h], apply hf' } },
{ intro hf,
refine ⟨_, hf a (mem_cons_self _ _), _, λ a ha, hf a (mem_cons_of_mem ha), _⟩,
rw pi.cons_eta }
end
end pi
end multiset
|
c91635e8cd691467e7704acc1e10a47758bf1d0a
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/analysis/convex/measure.lean
|
e80f6a9481d9c774bb4917bc7ff45c914e740d61
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,468
|
lean
|
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import analysis.convex.topology
import analysis.normed_space.add_torsor_bases
import measure_theory.measure.lebesgue.eq_haar
/-!
# Convex sets are null-measurable
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
Let `E` be a finite dimensional real vector space, let `μ` be a Haar measure on `E`, let `s` be a
convex set in `E`. Then the frontier of `s` has measure zero (see `convex.add_haar_frontier`), hence
`s` is a `measure_theory.null_measurable_set` (see `convex.null_measurable_set`).
-/
open measure_theory measure_theory.measure set metric filter finite_dimensional (finrank)
open_locale topology nnreal ennreal
variables {E : Type*} [normed_add_comm_group E] [normed_space ℝ E] [measurable_space E]
[borel_space E] [finite_dimensional ℝ E] (μ : measure E) [is_add_haar_measure μ] {s : set E}
namespace convex
/-- Haar measure of the frontier of a convex set is zero. -/
lemma add_haar_frontier (hs : convex ℝ s) : μ (frontier s) = 0 :=
begin
/- If `s` is included in a hyperplane, then `frontier s ⊆ closure s` is included in the same
hyperplane, hence it has measure zero. -/
cases ne_or_eq (affine_span ℝ s) ⊤ with hspan hspan,
{ refine measure_mono_null _ (add_haar_affine_subspace _ _ hspan),
exact frontier_subset_closure.trans (closure_minimal (subset_affine_span _ _)
(affine_span ℝ s).closed_of_finite_dimensional) },
rw ← hs.interior_nonempty_iff_affine_span_eq_top at hspan,
rcases hspan with ⟨x, hx⟩,
/- Without loss of generality, `s` is bounded. Indeed, `∂s ⊆ ⋃ n, ∂(s ∩ ball x (n + 1))`, hence it
suffices to prove that `∀ n, μ (s ∩ ball x (n + 1)) = 0`; the latter set is bounded.
-/
suffices H : ∀ t : set E, convex ℝ t → x ∈ interior t → bounded t → μ (frontier t) = 0,
{ set B : ℕ → set E := λ n, ball x (n + 1),
have : μ (⋃ n : ℕ, frontier (s ∩ B n)) = 0,
{ refine measure_Union_null (λ n, H _ (hs.inter (convex_ball _ _)) _
(bounded_ball.mono (inter_subset_right _ _))),
rw [interior_inter, is_open_ball.interior_eq],
exact ⟨hx, mem_ball_self (add_pos_of_nonneg_of_pos n.cast_nonneg zero_lt_one)⟩ },
refine measure_mono_null (λ y hy, _) this, clear this,
set N : ℕ := ⌊dist y x⌋₊,
refine mem_Union.2 ⟨N, _⟩,
have hN : y ∈ B N, by { simp only [B, N], simp [nat.lt_floor_add_one] },
suffices : y ∈ frontier (s ∩ B N) ∩ B N, from this.1,
rw [frontier_inter_open_inter is_open_ball],
exact ⟨hy, hN⟩ },
clear hx hs s, intros s hs hx hb,
/- Since `s` is bounded, we have `μ (interior s) ≠ ∞`, hence it suffices to prove
`μ (closure s) ≤ μ (interior s)`. -/
replace hb : μ (interior s) ≠ ∞, from (hb.mono interior_subset).measure_lt_top.ne,
suffices : μ (closure s) ≤ μ (interior s),
{ rwa [frontier, measure_diff interior_subset_closure is_open_interior.measurable_set hb,
tsub_eq_zero_iff_le] },
/- Due to `convex.closure_subset_image_homothety_interior_of_one_lt`, for any `r > 1` we have
`closure s ⊆ homothety x r '' interior s`, hence `μ (closure s) ≤ r ^ d * μ (interior s)`,
where `d = finrank ℝ E`. -/
set d : ℕ := finite_dimensional.finrank ℝ E,
have : ∀ r : ℝ≥0, 1 < r → μ (closure s) ≤ ↑(r ^ d) * μ (interior s),
{ intros r hr,
refine (measure_mono $ hs.closure_subset_image_homothety_interior_of_one_lt hx r hr).trans_eq _,
rw [add_haar_image_homothety, ← nnreal.coe_pow, nnreal.abs_eq, ennreal.of_real_coe_nnreal] },
have : ∀ᶠ r in 𝓝[>] (1 : ℝ≥0), μ (closure s) ≤ ↑(r ^ d) * μ (interior s),
from mem_of_superset self_mem_nhds_within this,
/- Taking the limit as `r → 1`, we get `μ (closure s) ≤ μ (interior s)`. -/
refine ge_of_tendsto _ this,
refine (((ennreal.continuous_mul_const hb).comp
(ennreal.continuous_coe.comp (continuous_pow d))).tendsto' _ _ _).mono_left nhds_within_le_nhds,
simp
end
/-- A convex set in a finite dimensional real vector space is null measurable with respect to an
additive Haar measure on this space. -/
protected lemma null_measurable_set (hs : convex ℝ s) : null_measurable_set s μ :=
null_measurable_set_of_null_frontier (hs.add_haar_frontier μ)
end convex
|
5d703b894993a1b380a00e51dc54f550b644eea6
|
36938939954e91f23dec66a02728db08a7acfcf9
|
/lean/deps/galois_stdlib/src/galois/data/array/lex_order.lean
|
4e97a311745452ee34c223b32b9def4f32f8fd17
|
[
"Apache-2.0"
] |
permissive
|
pnwamk/reopt-vcg
|
f8b56dd0279392a5e1c6aee721be8138e6b558d3
|
c9f9f185fbefc25c36c4b506bbc85fd1a03c3b6d
|
refs/heads/master
| 1,631,145,017,772
| 1,593,549,019,000
| 1,593,549,143,000
| 254,191,418
| 0
| 0
| null | 1,586,377,077,000
| 1,586,377,077,000
| null |
UTF-8
|
Lean
| false
| false
| 6,258
|
lean
|
-- Defines predicates for strict and non-strict lexicographic orderings over arrays.
namespace array
/--
This compares two arrays lexicographically with an extra argument to denote the value of the predicate
if they are equal.
-/
inductive lex_compare {α} [h:linear_order α] (are_eq : Prop) {m n} (x : array m α) (y : array n α) : Prop
| has_lt_index {}
(j : ℕ)
(j_m : j < m) (j_n : j < n)
(is_lt : x.read ⟨j,j_m⟩ < y.read ⟨j, j_n⟩)
(eq : ∀(k : ℕ) (k_j : k < j), x.read ⟨k,trans k_j j_m⟩ = y.read ⟨k, trans k_j j_n⟩)
: lex_compare
| is_prefix
(shorter_array : are_eq)
(eq : ∀(k : ℕ) (k_m : k < m) (k_n : k < n), x.read ⟨k,k_m⟩ = y.read ⟨k,k_n⟩)
: lex_compare
namespace lex_compare
/- This decides the lex_compare relation given decidability of the underlying relation. -/
def decide {α:Type _}
[h:linear_order α] [r:decidable_rel (h.lt)] (when_eq : Prop) {m} (x:array m α) {n} (y:array n α)
[decidable when_eq]
: Π(i:ℕ) -- Index in array to check next
(j:ℕ) -- Number of elements remaining to check
(p:i + j = min m n)
(q : ∀(k:ℕ) (k_i : k < i) (k_lt_m : k < m) (k_lt_n : k < n), x.read ⟨k, k_lt_m⟩ = y.read ⟨k, k_lt_n⟩),
decidable (lex_compare when_eq x y)
| i 0 p q :=
if m_le_n : when_eq then
decidable.is_true
begin
apply lex_compare.is_prefix m_le_n,
intros k k_lt_m k_lt_n,
simp only [nat.add_zero] at p,
have k_lt_i : k < i,
{ simp only [p], exact (lt_min k_lt_m k_lt_n), },
apply q k k_lt_i k_lt_m k_lt_n,
end
else
decidable.is_false
begin
intro c,
cases c,
case lex_compare.is_prefix { contradiction, },
case lex_compare.has_lt_index : j j_lt_m j_lt_n read_x_lt_read_y read_eq {
simp only [nat.add_zero] at p,
have j_lt_i : j < i, { simp only [p], exact (lt_min j_lt_m j_lt_n), },
have r : read x ⟨j, j_lt_m⟩ = read y ⟨j, j_lt_n⟩ := q j j_lt_i j_lt_m j_lt_n,
simp only [r] at read_x_lt_read_y,
exact (lt_irrefl _ read_x_lt_read_y),
},
end
| i (nat.succ j) p q :=
let i_lt_min : i < min m n :=
begin
simp only [eq.symm p],
apply nat.lt_add_of_zero_lt_left _ _ (nat.zero_lt_succ _),
end in
let i_lt_m : i < m := lt_of_lt_of_le i_lt_min (min_le_left m n) in
let i_lt_n : i < n := lt_of_lt_of_le i_lt_min (min_le_right m n) in
if read_x_lt_read_y : x.read ⟨i, i_lt_m⟩ < y.read ⟨i, i_lt_n⟩ then
decidable.is_true
begin
apply lex_compare.has_lt_index i i_lt_m i_lt_n read_x_lt_read_y,
intros k k_lt_i,
apply q k k_lt_i,
end
else if read_y_lt_read_x : y.read ⟨i, i_lt_n⟩ < x.read ⟨i, i_lt_m⟩ then
decidable.is_false
begin
intro c,
cases c,
case lex_compare.is_prefix : m_le_n c_eq {
have read_i_eq := c_eq i i_lt_m i_lt_n,
simp only [read_i_eq] at read_y_lt_read_x,
apply lt_irrefl _ read_y_lt_read_x,
},
case lex_compare.has_lt_index : j j_lt_m j_lt_n read_j_lt k_eq {
have tri := lt_trichotomy i j,
cases tri,
case or.inl : i_lt_j {
have read_i_eq := k_eq i i_lt_j,
simp only [read_i_eq] at read_y_lt_read_x,
apply lt_irrefl _ read_y_lt_read_x,
},
cases tri,
case or.inl : i_eq_j {
have fin_m_i_eq_j : fin.mk i i_lt_m = fin.mk j j_lt_m := fin.eq_of_veq i_eq_j,
have fin_n_i_eq_j : fin.mk i i_lt_n = fin.mk j j_lt_n := fin.eq_of_veq i_eq_j,
simp only [fin_m_i_eq_j, fin_n_i_eq_j] at read_y_lt_read_x,
exact (not_lt_of_gt read_y_lt_read_x read_j_lt),
},
case or.inr : j_lt_i {
have read_j_eq := q j j_lt_i j_lt_m j_lt_n,
simp only [read_j_eq] at read_j_lt,
apply lt_irrefl _ read_j_lt,
},
},
end
else
let idx_inv : i + 1 + j = min m n := by simp [symm p] in
-- Prove we checked all the elements less than i+1
let prev_checked : ∀ (k : ℕ) (k_lt : k < i + 1) (k_m : k < m) (k_n : k < n),
read x ⟨k, k_m⟩ = read y ⟨k, k_n⟩ :=
begin
intros k k_lt_succ_i k_lt_m k_lt_n,
have k_choices : k < i ∨ k = i := lt_or_eq_of_le (nat.le_of_lt_succ k_lt_succ_i ),
cases k_choices,
case or.inl : k_lt_i {
exact (q _ k_lt_i k_lt_m k_lt_n),
},
case or.inr : k_eq_i {
-- Replace k with i in goal
have fin_m_k_eq_i : fin.mk k k_lt_m = fin.mk i i_lt_m := fin.eq_of_veq k_eq_i,
have fin_n_k_eq_i : fin.mk k k_lt_n = fin.mk i i_lt_n := fin.eq_of_veq k_eq_i,
simp [fin_m_k_eq_i, fin_n_k_eq_i],
-- Prove x.read i = y.read i
have read_tri := lt_trichotomy (read x ⟨i, i_lt_m⟩) (read y ⟨i, i_lt_n⟩),
simp [read_x_lt_read_y, read_y_lt_read_x] at read_tri,
exact read_tri,
},
end in
decide (i+1) j idx_inv prev_checked
instance {α:Type _} [h:linear_order α] [r:decidable_rel (h.lt)] {m} (x:array m α) {n} (y:array n α)
(when_eq : Prop) [decidable when_eq]
: decidable (lex_compare when_eq x y) :=
let p : ∀(k:ℕ) (k_i : k < 0) (k_m : k < m) (k_n : k < n), x.read ⟨k, k_m⟩ = y.read ⟨k, k_n⟩ :=
begin
intros k k_lt_zero,
exact false.elim (nat.not_lt_zero k k_lt_zero),
end in
decide when_eq x y 0 (min m n) (nat.zero_add (min m n)) p
end lex_compare
/-- One array is lexicographically strictly less than or equal another. -/
def lex_le {α} [h:linear_order α] {m n} (x : array m α) (y : array n α) : Prop :=
lex_compare (m ≤ n) x y
namespace lex_le
instance {α:Type _} [h:linear_order α] [r:decidable_rel (h.lt)] {m} (x:array m α) {n} (y:array n α)
: decidable (lex_le x y) :=
begin unfold lex_le, apply_instance, end
end lex_le
/-- One array is lexicographically strictly less than or equal another. -/
def lex_lt {α} [h:linear_order α] {m n} (x : array m α) (y : array n α) : Prop :=
lex_compare (m < n) x y
namespace lex_lt
instance {α:Type _} [h:linear_order α] [r:decidable_rel (h.lt)] {m} (x:array m α) {n} (y:array n α)
: decidable (lex_lt x y) :=
begin unfold lex_lt, apply_instance, end
end lex_lt
end array
|
fe635e204896dfe8727323c016062120a10606c0
|
54f4ad05b219d444b709f56c2f619dd87d14ec29
|
/my_project/src/love07_metaprogramming_demo.lean
|
266b31bf7f01ff979f0d6168f347be3f6723da7b
|
[] |
no_license
|
yizhou7/learning-lean
|
8efcf838c7276e235a81bd291f467fa43ce56e0a
|
91fb366c624df6e56e19555b2e482ce767cd8224
|
refs/heads/master
| 1,675,649,087,737
| 1,609,022,281,000
| 1,609,022,281,000
| 272,072,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 14,382
|
lean
|
import .love05_inductive_predicates_demo
/- # LoVe Demo 7: Metaprogramming
Users can extend Lean with custom monadic tactics and tools. This kind of
programming—programming the prover—is called metaprogramming.
Lean's metaprogramming framework uses mostly the same notions and syntax as
Lean's input language itself.
Abstract syntax trees __reflect__ internal data structures, e.g., for
expressions (terms).
The prover's C++ internals are exposed through Lean interfaces, which we can
use for accessing the current context and goal, unifying expressions, querying
and modifying the environment, and setting attributes (e.g., `@[simp]`).
Most of Lean's predefined tactics are implemented in Lean (and not in C++).
Example applications:
* proof goal transformations;
* heuristic proof search;
* decision procedures;
* definition generators;
* advisor tools;
* exporters;
* ad hoc automation.
Advantages of Lean's metaprogramming framework:
* Users do not need to learn another programming language to write
metaprograms; they can work with the same constructs and notation used to
define ordinary objects in the prover's library.
* Everything in that library is available for metaprogramming purposes.
* Metaprograms can be written and debugged in the same interactive environment,
encouraging a style where formal libraries and supporting automation are
developed at the same time. -/
set_option pp.beta true
set_option pp.generalized_field_notation false
namespace LoVe
/- ## Tactics and Tactic Combinators
When programming our own tactics, we often need to repeat some actions on
several goals, or to recover if a tactic fails. Tactic combinators help in such
case.
`repeat` applies its argument repeatedly on all (sub…sub)goals until it cannot
be applied any further. -/
lemma repeat_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
repeat { apply even.add_two },
repeat { sorry }
end
/- The "orelse" combinator `<|>` tries its first argument and applies its
second argument in case of failure. -/
lemma repeat_orelse_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
repeat {
apply even.add_two
<|> apply even.zero },
repeat { sorry }
end
/- `iterate` works repeatedly on the first goal until it fails; then it
stops. -/
lemma iterate_orelse_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
iterate {
apply even.add_two
<|> apply even.zero },
repeat { sorry }
end
/- `all_goals` applies its argument exactly once to each goal. It succeeds only
if the argument succeeds on **all** goals. -/
lemma all_goals_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
all_goals { apply even.add_two }, -- fails
repeat { sorry }
end
/- `try` transforms its argument into a tactic that never fails. -/
lemma all_goals_try_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
all_goals { try { apply even.add_two } },
repeat { sorry }
end
/- `any_goals` applies its argument exactly once to each goal. It succeeds
if the argument succeeds on **any** goal. -/
lemma any_goals_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
any_goals { apply even.add_two },
repeat { sorry }
end
/- `solve1` transforms its argument into an all-or-nothing tactic. If the
argument does not prove the goal, `solve1` fails. -/
lemma any_goals_solve1_repeat_orelse_example :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
repeat { apply and.intro },
any_goals { solve1 { repeat {
apply even.add_two
<|> apply even.zero } } },
repeat { sorry }
end
/- The combinators `repeat`, `iterate`, `all_goals`, and `any_goals` can easily
lead to infinite looping: -/
/-
lemma repeat_not_example :
¬ even 1 :=
begin
repeat { apply not.intro },
sorry
end
-/
/- Let us start with the actual metaprogramming, by coding a custom tactic. The
tactic embodies the behavior we hardcoded in the `solve1` example above: -/
meta def intro_and_even : tactic unit :=
do
tactic.repeat (tactic.applyc ``and.intro),
tactic.any_goals (tactic.solve1 (tactic.repeat
(tactic.applyc ``even.add_two
<|> tactic.applyc ``even.zero))),
pure ()
/- The `meta` keyword makes it possible for the function to call other
metafunctions. The `do` keyword enters a monad, and the `<|>` operator is the
"orelse" operator of alternative monads. At the end, we return `()`, of type
`unit`, to ensure the metaprogram has the desired type.
Any executable Lean definition can be used as a metaprogram. In addition, we can
put `meta` in front of a definition to indicate that is a metadefinition. Such
definitions need not terminate but cannot be used in non-`meta` contexts.
Let us apply our custom tactic: -/
lemma any_goals_solve1_repeat_orelse_example₂ :
even 4 ∧ even 7 ∧ even 3 ∧ even 0 :=
begin
intro_and_even,
repeat { sorry }
end
/- ## The Metaprogramming Monad
Tactics have access to
* the list of **goals** as metavariables (each metavariables has a type and a
local context (hypothesis); they can optionally be instantiated);
* the **elaborator** (to elaborate expressions and compute their type);
* the **environment**, containing all declarations and inductive types;
* the **attributes** (e.g., the list of `@[simp]` rules).
The tactic monad is an alternative monad, with `fail` and `<|>`. Tactics can
also produce trace messages. -/
lemma even_14 :
even 14 :=
by do
tactic.trace "Proving evenness …",
intro_and_even
meta def hello_then_intro_and_even : tactic unit :=
do
tactic.trace "Proving evenness …",
intro_and_even
lemma even_16 :
even 16 :=
by hello_then_intro_and_even
run_cmd tactic.trace "Hello, Metaworld!"
meta def trace_goals : tactic unit :=
do
tactic.trace "local context:",
ctx ← tactic.local_context,
tactic.trace ctx,
tactic.trace "target:",
P ← tactic.target,
tactic.trace P,
tactic.trace "all missing proofs:",
Hs ← tactic.get_goals,
tactic.trace Hs,
τs ← list.mmap tactic.infer_type Hs,
tactic.trace τs
lemma even_18_and_even_20 (α : Type) (a : α) :
even 18 ∧ even 20 :=
by do
trace_goals,
tactic.applyc ``and.intro,
intro_and_even
lemma triv_imp (a : Prop) (h : a) :
a :=
by do
h ← tactic.get_local `h,
tactic.trace "h:",
tactic.trace h,
tactic.trace "raw h:",
tactic.trace (expr.to_raw_fmt h),
tactic.trace "type of h:",
τ ← tactic.infer_type h,
tactic.trace τ,
tactic.trace "type of type of h:",
υ ← tactic.infer_type τ,
tactic.trace υ,
tactic.apply h
meta def exact_list : list expr → tactic unit
| [] := tactic.fail "no matching expression found"
| (h :: hs) :=
do {
tactic.trace "trying",
tactic.trace h,
tactic.exact h }
<|> exact_list hs
meta def hypothesis : tactic unit :=
do
hs ← tactic.local_context,
exact_list hs
lemma app_of_app {α : Type} {p : α → Prop} {a : α}
(h : p a) :
p a :=
by hypothesis
/- ## Names, Expressions, Declarations, and Environments
The metaprogramming framework is articulated around five main types:
* `tactic` manages the proof state, the global context, and more;
* `name` represents a structured name (e.g., `x`, `even.add_two`);
* `expr` represents an expression (a term) as an abstract syntax tree;
* `declaration` represents a constant declaration, a definition, an axiom, or a
lemma;
* `environment` stores all the declarations and notations that make up the
global context. -/
#print expr
#check expr tt -- elaborated expressions
#check expr ff -- unelaborated expressions (pre-expressions)
#print name
#check (expr.const `ℕ [] : expr)
#check expr.sort level.zero -- Sort 0, i.e., Prop
#check expr.sort (level.succ level.zero)
-- Sort 1, i.e., Type
#check expr.var 0 -- bound variable with De Bruijn index 0
#check (expr.local_const `uniq_name `pp_name binder_info.default
`(ℕ) : expr)
#check (expr.mvar `uniq_name `pp_name `(ℕ) : expr)
#check (expr.pi `pp_name binder_info.default `(ℕ)
(expr.sort level.zero) : expr)
#check (expr.lam `pp_name binder_info.default `(ℕ)
(expr.var 0) : expr)
#check expr.elet
#check expr.macro
/- We can create literal expressions conveniently using backticks and
parentheses:
* Expressions with a single backtick must be fully elaborated.
* Expressions with two backticks are __pre-expressions__: They may contain some
holes to be filled in later, based on some context.
* Expressions with three backticks are pre-expressions without name checking. -/
run_cmd do
let e : expr := `(list.map (λn : ℕ, n + 1) [1, 2, 3]),
tactic.trace e
run_cmd do
let e : expr := `(list.map _ [1, 2, 3]), -- fails
tactic.trace e
run_cmd do
let e₁ : pexpr := ``(list.map (λn, n + 1) [1, 2, 3]),
let e₂ : pexpr := ``(list.map _ [1, 2, 3]),
tactic.trace e₁,
tactic.trace e₂
run_cmd do
let e : pexpr := ```(seattle.washington),
tactic.trace e
/- We can also create literal names with backticks:
* Names with a single backtick, `n, are not checked for existence.
* Names with two backticks, ``n, are resolved and checked. -/
run_cmd tactic.trace `and.intro
run_cmd tactic.trace `intro_and_even
run_cmd tactic.trace `seattle.washington
run_cmd tactic.trace ``and.intro
run_cmd tactic.trace ``intro_and_even
run_cmd tactic.trace ``seattle.washington -- fails
/- __Antiquotations__ embed an existing expression in a larger expression. They
are announced by the prefix `%%` followed by a name from the current context.
Antiquotations are available with one, two, and three backticks: -/
run_cmd do
let x : expr := `(2 : ℕ),
let e : expr := `(%%x + 1),
tactic.trace e
run_cmd do
let x : expr := `(@id ℕ),
let e : pexpr := ``(list.map %%x),
tactic.trace e
run_cmd do
let x : expr := `(@id ℕ),
let e : pexpr := ```(a _ %%x),
tactic.trace e
lemma one_add_two_eq_three :
1 + 2 = 3 :=
by do
`(%%a + %%b = %%c) ← tactic.target,
tactic.trace a,
tactic.trace b,
tactic.trace c,
`(@eq %%α %%l %%r) ← tactic.target,
tactic.trace α,
tactic.trace l,
tactic.trace r,
tactic.exact `(refl _ : 3 = 3)
#print declaration
/- The `environment` type is presented as an abstract type, equipped with some
operations to query and modify it. The `environment.fold` metafunction iterates
over all declarations making up the environment. -/
run_cmd do
env ← tactic.get_env,
tactic.trace (environment.fold env 0 (λdecl n, n + 1))
/- ## First Example: A Conjuction-Destructing Tactic
We define a `destruct_and` tactic that automates the elimination of `∧` in
premises, automating proofs such as these: -/
lemma abcd_a (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) :
a :=
and.elim_left h
lemma abcd_b (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) :
b :=
and.elim_left (and.elim_left (and.elim_right h))
lemma abcd_bc (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) :
b ∧ c :=
and.elim_left (and.elim_right h)
/- Our tactic relies on a helper metafunction, which takes as argument the
hypothesis `h` to use as an expression rather than as a name: -/
meta def destruct_and_helper : expr → tactic unit
| h :=
do
t ← tactic.infer_type h,
match t with
| `(%%a ∧ %%b) :=
tactic.exact h
<|>
do {
ha ← tactic.to_expr ``(and.elim_left %%h),
destruct_and_helper ha }
<|>
do {
hb ← tactic.to_expr ``(and.elim_right %%h),
destruct_and_helper hb }
| _ := tactic.exact h
end
meta def destruct_and (nam : name) : tactic unit :=
do
h ← tactic.get_local nam,
destruct_and_helper h
/- Let us check that our tactic works: -/
lemma abc_a (a b c : Prop) (h : a ∧ b ∧ c) :
a :=
by destruct_and `h
lemma abc_b (a b c : Prop) (h : a ∧ b ∧ c) :
b :=
by destruct_and `h
lemma abc_bc (a b c : Prop) (h : a ∧ b ∧ c) :
b ∧ c :=
by destruct_and `h
lemma abc_ac (a b c : Prop) (h : a ∧ b ∧ c) :
a ∧ c :=
by destruct_and `h -- fails
/- ## Second Example: A Provability Advisor
Next, we implement a `prove_direct` tool that traverses all lemmas in the
database and checks whether one of them can be used to prove the current goal. A
similar tactic is available in `mathlib` under the name `library_search`. -/
meta def is_theorem : declaration → bool
| (declaration.defn _ _ _ _ _ _) := ff
| (declaration.thm _ _ _ _) := tt
| (declaration.cnst _ _ _ _) := ff
| (declaration.ax _ _ _) := tt
meta def get_all_theorems : tactic (list name) :=
do
env ← tactic.get_env,
pure (environment.fold env [] (λdecl nams,
if is_theorem decl then declaration.to_name decl :: nams
else nams))
meta def prove_with_name (nam : name) : tactic unit :=
do
tactic.applyc nam
({ md := tactic.transparency.reducible, unify := ff }
: tactic.apply_cfg),
tactic.all_goals tactic.assumption,
pure ()
meta def prove_direct : tactic unit :=
do
nams ← get_all_theorems,
list.mfirst (λnam,
do
prove_with_name nam,
tactic.trace ("directly proved by " ++ to_string nam))
nams
lemma nat.eq_symm (x y : ℕ) (h : x = y) :
y = x :=
by prove_direct
lemma nat.eq_symm₂ (x y : ℕ) (h : x = y) :
y = x :=
by library_search
lemma list.reverse_twice (xs : list ℕ) :
list.reverse (list.reverse xs) = xs :=
by prove_direct
lemma list.reverse_twice_symm (xs : list ℕ) :
xs = list.reverse (list.reverse xs) :=
by prove_direct -- fails
/- As a small refinement, we propose a version of `prove_direct` that also
looks for equalities stated in symmetric form. -/
meta def prove_direct_symm : tactic unit :=
prove_direct
<|>
do {
tactic.applyc `eq.symm,
prove_direct }
lemma list.reverse_twice₂ (xs : list ℕ) :
list.reverse (list.reverse xs) = xs :=
by prove_direct_symm
lemma list.reverse_twice_symm₂ (xs : list ℕ) :
xs = list.reverse (list.reverse xs) :=
by prove_direct_symm
/- ## A Look at Two Predefined Tactics
Quite a few of Lean's predefined tactics are implemented as metaprograms and
not in C++. We can find these definitions by clicking the name of a construct
in Visual Studio Code while holding the control or command key. -/
#check tactic.intro
#check tactic.assumption
end LoVe
|
604741988895f22382dabd6fa3f6a18cd34a5095
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/linear_algebra/lagrange.lean
|
dcbc5907f444443c3f5112ff88af6efdf629907b
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,818
|
lean
|
/-
Copyright (c) 2020 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import algebra.big_operators.basic
import ring_theory.polynomial.basic
/-!
# Lagrange interpolation
## Main definitions
* `lagrange.basis s x` where `s : finset F` and `x : F`: the Lagrange basis polynomial
that evaluates to `1` at `x` and `0` at other elements of `s`.
* `lagrange.interpolate s f` where `s : finset F` and `f : F → F`: the Lagrange interpolant
that evaluates to `f x` at `x` for `x ∈ s`.
-/
noncomputable theory
open_locale big_operators classical
universe u
namespace lagrange
variables {F : Type u} [decidable_eq F] [field F] (s : finset F)
variables {F' : Type u} [field F'] (s' : finset F')
open polynomial
/-- Lagrange basis polynomials that evaluate to 1 at `x` and 0 at other elements of `s`. -/
def basis (x : F) : polynomial F :=
∏ y in s.erase x, C (x - y)⁻¹ * (X - C y)
@[simp] theorem basis_empty (x : F) : basis ∅ x = 1 :=
rfl
@[simp] theorem basis_singleton_self (x : F) : basis {x} x = 1 :=
by rw [basis, finset.erase_singleton, finset.prod_empty]
@[simp] theorem eval_basis_self (x : F) : (basis s x).eval x = 1 :=
begin
rw [basis, ← coe_eval_ring_hom, (eval_ring_hom x).map_prod, coe_eval_ring_hom,
finset.prod_eq_one],
intros y hy, simp_rw [eval_mul, eval_sub, eval_C, eval_X],
exact inv_mul_cancel (sub_ne_zero_of_ne (finset.ne_of_mem_erase hy).symm)
end
@[simp] theorem eval_basis_ne (x y : F) (h1 : y ∈ s) (h2 : y ≠ x) : (basis s x).eval y = 0 :=
begin
rw [basis,
← coe_eval_ring_hom, (eval_ring_hom y).map_prod, coe_eval_ring_hom,
finset.prod_eq_zero (finset.mem_erase.2 ⟨h2, h1⟩)],
simp_rw [eval_mul, eval_sub, eval_C, eval_X, sub_self, mul_zero]
end
theorem eval_basis (x y : F) (h : y ∈ s) : (basis s x).eval y = if y = x then 1 else 0 :=
by { split_ifs with H, { subst H, apply eval_basis_self }, { exact eval_basis_ne s x y h H } }
@[simp] theorem nat_degree_basis (x : F) (hx : x ∈ s) : (basis s x).nat_degree = s.card - 1 :=
begin
unfold basis, generalize hsx : s.erase x = sx,
have : x ∉ sx := hsx ▸ finset.not_mem_erase x s,
rw [← finset.insert_erase hx, hsx, finset.card_insert_of_not_mem this, add_tsub_cancel_right],
clear hx hsx s, revert this, apply sx.induction_on,
{ intros hx, rw [finset.prod_empty, nat_degree_one], refl },
{ intros y s hys ih hx, rw [finset.mem_insert, not_or_distrib] at hx,
have h1 : C (x - y)⁻¹ ≠ C 0 := λ h, hx.1 (eq_of_sub_eq_zero $ inv_eq_zero.1 $ C_inj.1 h),
have h2 : X ^ 1 - C y ≠ 0 := by convert X_pow_sub_C_ne_zero zero_lt_one y,
rw C_0 at h1, rw pow_one at h2,
rw [finset.prod_insert hys, nat_degree_mul (mul_ne_zero h1 h2), ih hx.2,
finset.card_insert_of_not_mem hys, nat_degree_mul h1 h2,
nat_degree_C, zero_add, nat_degree, degree_X_sub_C, add_comm], refl,
rw [ne, finset.prod_eq_zero_iff], rintro ⟨z, hzs, hz⟩,
rw mul_eq_zero at hz, cases hz with hz hz,
{ rw [← C_0, C_inj, inv_eq_zero, sub_eq_zero] at hz, exact hx.2 (hz.symm ▸ hzs) },
{ rw ← pow_one (X : polynomial F) at hz, exact X_pow_sub_C_ne_zero zero_lt_one _ hz } }
end
variables (f : F → F)
/-- Lagrange interpolation: given a finset `s` and a function `f : F → F`,
`interpolate s f` is the unique polynomial of degree `< s.card`
that takes value `f x` on all `x` in `s`. -/
def interpolate : polynomial F :=
∑ x in s, C (f x) * basis s x
@[simp] theorem interpolate_empty (f) : interpolate (∅ : finset F) f = 0 :=
rfl
@[simp] theorem interpolate_singleton (f) (x : F) : interpolate {x} f = C (f x) :=
by rw [interpolate, finset.sum_singleton, basis_singleton_self, mul_one]
@[simp] theorem eval_interpolate (x) (H : x ∈ s) : eval x (interpolate s f) = f x :=
begin
rw [interpolate, ←coe_eval_ring_hom, ring_hom.map_sum, coe_eval_ring_hom, finset.sum_eq_single x],
{ simp },
{ intros y hy hxy, simp [eval_basis_ne s y x H hxy.symm] },
{ intro h, exact (h H).elim }
end
theorem degree_interpolate_lt : (interpolate s f).degree < s.card :=
if H : s = ∅ then by { subst H, rw [interpolate_empty, degree_zero], exact with_bot.bot_lt_coe _ }
else (degree_sum_le _ _).trans_lt $ (finset.sup_lt_iff $ with_bot.bot_lt_coe s.card).2 $ λ b _,
calc (C (f b) * basis s b).degree
≤ (C (f b)).degree + (basis s b).degree : degree_mul_le _ _
... ≤ 0 + (basis s b).degree : add_le_add_right degree_C_le _
... = (basis s b).degree : zero_add _
... ≤ (basis s b).nat_degree : degree_le_nat_degree
... = (s.card - 1 : ℕ) : by { rwa nat_degree_basis }
... < s.card : with_bot.coe_lt_coe.2 (nat.pred_lt $ mt finset.card_eq_zero.1 H)
theorem degree_interpolate_erase {x} (hx : x ∈ s) :
(interpolate (s.erase x) f).degree < (s.card - 1 : ℕ) :=
begin
convert degree_interpolate_lt (s.erase x) f,
rw [finset.card_erase_of_mem hx, nat.pred_eq_sub_one]
end
theorem interpolate_eq_of_eval_eq (f g : F → F) {s : finset F} (hs : ∀ x ∈ s, f x = g x) :
interpolate s f = interpolate s g :=
begin
rw [interpolate, interpolate],
refine finset.sum_congr rfl (λ x hx, _),
rw hs x hx,
end
/-- Linear version of `interpolate`. -/
def linterpolate : (F → F) →ₗ[F] polynomial F :=
{ to_fun := interpolate s,
map_add' := λ f g, by { simp_rw [interpolate, ← finset.sum_add_distrib, ← add_mul, ← C_add],
refl },
map_smul' := λ c f, by { simp_rw [interpolate, finset.smul_sum, C_mul', smul_smul], refl } }
@[simp] lemma interpolate_add (f g) : interpolate s (f + g) = interpolate s f + interpolate s g :=
(linterpolate s).map_add f g
@[simp] lemma interpolate_zero : interpolate s 0 = 0 :=
(linterpolate s).map_zero
@[simp] lemma interpolate_neg (f) : interpolate s (-f) = -interpolate s f :=
(linterpolate s).map_neg f
@[simp] lemma interpolate_sub (f g) : interpolate s (f - g) = interpolate s f - interpolate s g :=
(linterpolate s).map_sub f g
@[simp] lemma interpolate_smul (c : F) (f) : interpolate s (c • f) = c • interpolate s f :=
(linterpolate s).map_smul c f
theorem eq_zero_of_eval_eq_zero {f : polynomial F'} (hf1 : f.degree < s'.card)
(hf2 : ∀ x ∈ s', f.eval x = 0) : f = 0 :=
by_contradiction $ λ hf3, not_le_of_lt hf1 $
calc (s'.card : with_bot ℕ)
≤ f.roots.to_finset.card : with_bot.coe_le_coe.2 $ finset.card_le_of_subset $ λ x hx,
(multiset.mem_to_finset).mpr $ (mem_roots hf3).2 $ hf2 x hx
... ≤ f.roots.card : with_bot.coe_le_coe.2 $ f.roots.to_finset_card_le
... ≤ f.degree : card_roots hf3
theorem eq_of_eval_eq {f g : polynomial F'} (hf : f.degree < s'.card) (hg : g.degree < s'.card)
(hfg : ∀ x ∈ s', f.eval x = g.eval x) : f = g :=
eq_of_sub_eq_zero $ eq_zero_of_eval_eq_zero s'
(lt_of_le_of_lt (degree_sub_le f g) $ max_lt hf hg)
(λ x hx, by rw [eval_sub, hfg x hx, sub_self])
theorem eq_interpolate_of_eval_eq {g : polynomial F} (hg : g.degree < s.card)
(hgf : ∀ x ∈ s, g.eval x = f x) : interpolate s f = g :=
eq_of_eval_eq s (degree_interpolate_lt _ _) hg $ λ x hx, begin
rw hgf x hx,
exact eval_interpolate _ _ _ hx,
end
theorem eq_interpolate (f : polynomial F) (hf : f.degree < s.card) :
interpolate s (λ x, f.eval x) = f :=
eq_of_eval_eq s (degree_interpolate_lt s _) hf $ λ x hx, eval_interpolate s _ x hx
/-- Lagrange interpolation induces isomorphism between functions from `s` and polynomials
of degree less than `s.card`. -/
def fun_equiv_degree_lt : degree_lt F s.card ≃ₗ[F] (s → F) :=
{ to_fun := λ f x, f.1.eval x,
map_add' := λ f g, funext $ λ x, eval_add,
map_smul' := λ c f, funext $ by simp,
inv_fun := λ f, ⟨interpolate s (λ x, if hx : x ∈ s then f ⟨x, hx⟩ else 0),
mem_degree_lt.2 $ degree_interpolate_lt _ _⟩,
left_inv := λ f, begin apply subtype.eq,
simp only [subtype.coe_mk, subtype.val_eq_coe, dite_eq_ite],
convert eq_interpolate s f (mem_degree_lt.1 f.2) using 1,
rw interpolate_eq_of_eval_eq,
intros x hx,
rw if_pos hx end,
right_inv := λ f, funext $ λ ⟨x, hx⟩, begin
convert eval_interpolate s _ x hx,
simp_rw dif_pos hx end }
theorem interpolate_eq_interpolate_erase_add {x y : F} (hx : x ∈ s) (hy : y ∈ s) (hxy : x ≠ y) :
interpolate s f =
C (y - x)⁻¹ * ((X - C x) * interpolate (s.erase x) f + (C y - X) * interpolate (s.erase y) f) :=
begin
refine eq_interpolate_of_eval_eq _ _ _ (λ z hz, _),
{ rw [degree_mul, degree_C (inv_ne_zero (sub_ne_zero.2 hxy.symm)), zero_add],
refine lt_of_le_of_lt (degree_add_le _ _) (max_lt _ _),
{ rw [degree_mul, degree_X_sub_C],
convert (with_bot.add_lt_add_iff_left (with_bot.coe_ne_bot _)).2
(degree_interpolate_erase s f hx),
simp [nat.one_add, nat.sub_one, nat.succ_pred_eq_of_pos (finset.card_pos.2 ⟨x, hx⟩)] },
{ rw [degree_mul, ←neg_sub, degree_neg, degree_X_sub_C],
convert (with_bot.add_lt_add_iff_left (with_bot.coe_ne_bot _)).2
(degree_interpolate_erase s f hy),
simp [nat.one_add, nat.sub_one, nat.succ_pred_eq_of_pos (finset.card_pos.2 ⟨y, hy⟩)] } },
{ by_cases hzx : z = x,
{ simp [hzx, eval_interpolate (s.erase y) f x (finset.mem_erase_of_ne_of_mem hxy hx),
inv_mul_eq_iff_eq_mul₀ (sub_ne_zero_of_ne hxy.symm)] },
{ by_cases hzy : z = y,
{ simp [hzy, eval_interpolate (s.erase x) f y (finset.mem_erase_of_ne_of_mem hxy.symm hy),
inv_mul_eq_iff_eq_mul₀ (sub_ne_zero_of_ne hxy.symm)] },
{ simp only [eval_interpolate (s.erase x) f z (finset.mem_erase_of_ne_of_mem hzx hz),
eval_interpolate (s.erase y) f z (finset.mem_erase_of_ne_of_mem hzy hz),
inv_mul_eq_iff_eq_mul₀ (sub_ne_zero_of_ne hxy.symm), eval_mul, eval_C, eval_add,
eval_sub, eval_X],
ring } } }
end
end lagrange
|
5ad9bb25a8df911c18d0df322cf63941a2096610
|
cf39355caa609c0f33405126beee2739aa3cb77e
|
/tests/lean/run/div_wf.lean
|
1063e153f653eda7fbb84af941686a18474a996a
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/lean
|
12b87f69d92e614daea8bcc9d4de9a9ace089d0e
|
cce7990ea86a78bdb383e38ed7f9b5ba93c60ce0
|
refs/heads/master
| 1,687,508,156,644
| 1,684,951,104,000
| 1,684,951,104,000
| 169,960,991
| 457
| 107
|
Apache-2.0
| 1,686,744,372,000
| 1,549,790,268,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 1,743
|
lean
|
open nat well_founded decidable prod
set_option pp.all true
-- Auxiliary lemma used to justify recursive call
private definition lt_aux {x y : nat} (H : 0 < y ∧ y ≤ x) : x - y < x :=
and.rec_on H (λ ypos ylex,
nat.sub_lt (nat.lt_of_lt_of_le ypos ylex) ypos)
definition wdiv.F (x : nat) (f : Π x₁, x₁ < x → nat → nat) (y : nat) : nat :=
if H : 0 < y ∧ y ≤ x then f (x - y) (lt_aux H) y + 1 else 0
definition wdiv (x y : nat) :=
fix lt_wf wdiv.F x y
theorem wdiv_def (x y : nat) : wdiv x y = if H : 0 < y ∧ y ≤ x then wdiv (x - y) y + 1 else 0 :=
congr_fun (well_founded.fix_eq lt_wf wdiv.F x) y
/-
See comment at fib_wrec.
example : wdiv 5 2 = 2 :=
rfl
example : wdiv 9 3 = 3 :=
rfl
-/
-- There is a little bit of cheating in the definition above.
-- I avoid the packing/unpacking into tuples.
-- The actual definitional package would not do that.
-- It will always pack things.
definition pair_nat.lt := lex nat.lt nat.lt -- Could also be (lex lt empty_rel)
definition pair_nat.lt.wf : well_founded pair_nat.lt :=
prod.lex_wf lt_wf lt_wf
infixl `≺`:50 := pair_nat.lt
-- Recursive lemma used to justify recursive call
definition plt_aux (x y : nat) (H : 0 < y ∧ y ≤ x) : (x - y, y) ≺ (x, y) :=
lex.left _ _ (lt_aux H)
definition pdiv.F (p₁ : nat × nat) : (Π p₂ : nat × nat, p₂ ≺ p₁ → nat) → nat :=
prod.cases_on p₁ (λ x y f,
if H : 0 < y ∧ y ≤ x then f (x - y, y) (plt_aux x y H) + 1 else 0)
definition pdiv (x y : nat) :=
fix pair_nat.lt.wf pdiv.F (x, y)
theorem pdiv_def (x y : nat) : pdiv x y = if H : 0 < y ∧ y ≤ x then pdiv (x - y) y + 1 else 0 :=
well_founded.fix_eq pair_nat.lt.wf pdiv.F (x, y)
/-
See comment at fib_wrec.
example : pdiv 17 2 = 8 :=
rfl
-/
|
db84a11e5dbbab6cd722f0b33462e16274558ae9
|
bbecf0f1968d1fba4124103e4f6b55251d08e9c4
|
/src/topology/instances/ennreal.lean
|
bffd0fccc86661219a1a8fe775d64066206fd193
|
[
"Apache-2.0"
] |
permissive
|
waynemunro/mathlib
|
e3fd4ff49f4cb43d4a8ded59d17be407bc5ee552
|
065a70810b5480d584033f7bbf8e0409480c2118
|
refs/heads/master
| 1,693,417,182,397
| 1,634,644,781,000
| 1,634,644,781,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 55,375
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
-/
import topology.instances.nnreal
import topology.algebra.ordered.liminf_limsup
import topology.metric_space.lipschitz
/-!
# Extended non-negative reals
-/
noncomputable theory
open classical set filter metric
open_locale classical topological_space ennreal nnreal big_operators filter
variables {α : Type*} {β : Type*} {γ : Type*}
namespace ennreal
variables {a b c d : ℝ≥0∞} {r p q : ℝ≥0}
variables {x y z : ℝ≥0∞} {ε ε₁ ε₂ : ℝ≥0∞} {s : set ℝ≥0∞}
section topological_space
open topological_space
/-- Topology on `ℝ≥0∞`.
Note: this is different from the `emetric_space` topology. The `emetric_space` topology has
`is_open {⊤}`, while this topology doesn't have singleton elements. -/
instance : topological_space ℝ≥0∞ := preorder.topology ℝ≥0∞
instance : order_topology ℝ≥0∞ := ⟨rfl⟩
instance : t2_space ℝ≥0∞ := by apply_instance -- short-circuit type class inference
instance : second_countable_topology ℝ≥0∞ :=
⟨⟨⋃q ≥ (0:ℚ), {{a : ℝ≥0∞ | a < real.to_nnreal q}, {a : ℝ≥0∞ | ↑(real.to_nnreal q) < a}},
(countable_encodable _).bUnion $ assume a ha, (countable_singleton _).insert _,
le_antisymm
(le_generate_from $ by simp [or_imp_distrib, is_open_lt', is_open_gt'] {contextual := tt})
(le_generate_from $ λ s h, begin
rcases h with ⟨a, hs | hs⟩;
[ rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ a < real.to_nnreal q}, {b | ↑(real.to_nnreal q) < b},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn a b, and_assoc]),
rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ ↑(real.to_nnreal q) < a}, {b | b < ↑(real.to_nnreal q)},
from set.ext (assume b,
by simp [hs, @ennreal.lt_iff_exists_rat_btwn b a, and_comm, and_assoc])];
{ apply is_open_Union, intro q,
apply is_open_Union, intro hq,
exact generate_open.basic _ (mem_bUnion hq.1 $ by simp) }
end)⟩⟩
lemma embedding_coe : embedding (coe : ℝ≥0 → ℝ≥0∞) :=
⟨⟨begin
refine le_antisymm _ _,
{ rw [@order_topology.topology_eq_generate_intervals ℝ≥0∞ _,
← coinduced_le_iff_le_induced],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
show is_open {b : ℝ≥0 | a < ↑b},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_lt'] },
show is_open {b : ℝ≥0 | ↑b < a},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_gt', is_open_const] } },
{ rw [@order_topology.topology_eq_generate_intervals ℝ≥0 _],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
exact ⟨Ioi a, is_open_Ioi, by simp [Ioi]⟩,
exact ⟨Iio a, is_open_Iio, by simp [Iio]⟩ }
end⟩,
assume a b, coe_eq_coe.1⟩
lemma is_open_ne_top : is_open {a : ℝ≥0∞ | a ≠ ⊤} := is_open_ne
lemma is_open_Ico_zero : is_open (Ico 0 b) := by { rw ennreal.Ico_eq_Iio, exact is_open_Iio}
lemma open_embedding_coe : open_embedding (coe : ℝ≥0 → ℝ≥0∞) :=
⟨embedding_coe, by { convert is_open_ne_top, ext (x|_); simp [none_eq_top, some_eq_coe] }⟩
lemma coe_range_mem_nhds : range (coe : ℝ≥0 → ℝ≥0∞) ∈ 𝓝 (r : ℝ≥0∞) :=
is_open.mem_nhds open_embedding_coe.open_range $ mem_range_self _
@[norm_cast] lemma tendsto_coe {f : filter α} {m : α → ℝ≥0} {a : ℝ≥0} :
tendsto (λa, (m a : ℝ≥0∞)) f (𝓝 ↑a) ↔ tendsto m f (𝓝 a) :=
embedding_coe.tendsto_nhds_iff.symm
lemma continuous_coe : continuous (coe : ℝ≥0 → ℝ≥0∞) :=
embedding_coe.continuous
lemma continuous_coe_iff {α} [topological_space α] {f : α → ℝ≥0} :
continuous (λa, (f a : ℝ≥0∞)) ↔ continuous f :=
embedding_coe.continuous_iff.symm
lemma nhds_coe {r : ℝ≥0} : 𝓝 (r : ℝ≥0∞) = (𝓝 r).map coe :=
(open_embedding_coe.map_nhds_eq r).symm
lemma tendsto_nhds_coe_iff {α : Type*} {l : filter α} {x : ℝ≥0} {f : ℝ≥0∞ → α} :
tendsto f (𝓝 ↑x) l ↔ tendsto (f ∘ coe : ℝ≥0 → α) (𝓝 x) l :=
show _ ≤ _ ↔ _ ≤ _, by rw [nhds_coe, filter.map_map]
lemma continuous_at_coe_iff {α : Type*} [topological_space α] {x : ℝ≥0} {f : ℝ≥0∞ → α} :
continuous_at f (↑x) ↔ continuous_at (f ∘ coe : ℝ≥0 → α) x :=
tendsto_nhds_coe_iff
lemma nhds_coe_coe {r p : ℝ≥0} :
𝓝 ((r : ℝ≥0∞), (p : ℝ≥0∞)) = (𝓝 (r, p)).map (λp:ℝ≥0×ℝ≥0, (p.1, p.2)) :=
((open_embedding_coe.prod open_embedding_coe).map_nhds_eq (r, p)).symm
lemma continuous_of_real : continuous ennreal.of_real :=
(continuous_coe_iff.2 continuous_id).comp nnreal.continuous_of_real
lemma tendsto_of_real {f : filter α} {m : α → ℝ} {a : ℝ} (h : tendsto m f (𝓝 a)) :
tendsto (λa, ennreal.of_real (m a)) f (𝓝 (ennreal.of_real a)) :=
tendsto.comp (continuous.tendsto continuous_of_real _) h
lemma tendsto_to_nnreal {a : ℝ≥0∞} (ha : a ≠ ⊤) :
tendsto ennreal.to_nnreal (𝓝 a) (𝓝 a.to_nnreal) :=
begin
lift a to ℝ≥0 using ha,
rw [nhds_coe, tendsto_map'_iff],
exact tendsto_id
end
lemma eventually_eq_of_to_real_eventually_eq {l : filter α} {f g : α → ℝ≥0∞}
(hfi : ∀ᶠ x in l, f x ≠ ∞) (hgi : ∀ᶠ x in l, g x ≠ ∞)
(hfg : (λ x, (f x).to_real) =ᶠ[l] (λ x, (g x).to_real)) :
f =ᶠ[l] g :=
begin
filter_upwards [hfi, hgi, hfg],
intros x hfx hgx hfgx,
rwa ← ennreal.to_real_eq_to_real hfx hgx,
end
lemma continuous_on_to_nnreal : continuous_on ennreal.to_nnreal {a | a ≠ ∞} :=
λ a ha, continuous_at.continuous_within_at (tendsto_to_nnreal ha)
lemma tendsto_to_real {a : ℝ≥0∞} (ha : a ≠ ⊤) : tendsto ennreal.to_real (𝓝 a) (𝓝 a.to_real) :=
nnreal.tendsto_coe.2 $ tendsto_to_nnreal ha
/-- The set of finite `ℝ≥0∞` numbers is homeomorphic to `ℝ≥0`. -/
def ne_top_homeomorph_nnreal : {a | a ≠ ∞} ≃ₜ ℝ≥0 :=
{ continuous_to_fun := continuous_on_iff_continuous_restrict.1 continuous_on_to_nnreal,
continuous_inv_fun := continuous_subtype_mk _ continuous_coe,
.. ne_top_equiv_nnreal }
/-- The set of finite `ℝ≥0∞` numbers is homeomorphic to `ℝ≥0`. -/
def lt_top_homeomorph_nnreal : {a | a < ∞} ≃ₜ ℝ≥0 :=
by refine (homeomorph.set_congr $ set.ext $ λ x, _).trans ne_top_homeomorph_nnreal;
simp only [mem_set_of_eq, lt_top_iff_ne_top]
lemma nhds_top : 𝓝 ∞ = ⨅ a ≠ ∞, 𝓟 (Ioi a) :=
nhds_top_order.trans $ by simp [lt_top_iff_ne_top, Ioi]
lemma nhds_top' : 𝓝 ∞ = ⨅ r : ℝ≥0, 𝓟 (Ioi r) :=
nhds_top.trans $ infi_ne_top _
lemma nhds_top_basis : (𝓝 ∞).has_basis (λ a, a < ∞) (λ a, Ioi a) := nhds_top_basis
lemma tendsto_nhds_top_iff_nnreal {m : α → ℝ≥0∞} {f : filter α} :
tendsto m f (𝓝 ⊤) ↔ ∀ x : ℝ≥0, ∀ᶠ a in f, ↑x < m a :=
by simp only [nhds_top', tendsto_infi, tendsto_principal, mem_Ioi]
lemma tendsto_nhds_top_iff_nat {m : α → ℝ≥0∞} {f : filter α} :
tendsto m f (𝓝 ⊤) ↔ ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a :=
tendsto_nhds_top_iff_nnreal.trans ⟨λ h n, by simpa only [ennreal.coe_nat] using h n,
λ h x, let ⟨n, hn⟩ := exists_nat_gt x in
(h n).mono (λ y, lt_trans $ by rwa [← ennreal.coe_nat, coe_lt_coe])⟩
lemma tendsto_nhds_top {m : α → ℝ≥0∞} {f : filter α}
(h : ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a) : tendsto m f (𝓝 ⊤) :=
tendsto_nhds_top_iff_nat.2 h
lemma tendsto_nat_nhds_top : tendsto (λ n : ℕ, ↑n) at_top (𝓝 ∞) :=
tendsto_nhds_top $ λ n, mem_at_top_sets.2
⟨n+1, λ m hm, ennreal.coe_nat_lt_coe_nat.2 $ nat.lt_of_succ_le hm⟩
@[simp, norm_cast] lemma tendsto_coe_nhds_top {f : α → ℝ≥0} {l : filter α} :
tendsto (λ x, (f x : ℝ≥0∞)) l (𝓝 ∞) ↔ tendsto f l at_top :=
by rw [tendsto_nhds_top_iff_nnreal, at_top_basis_Ioi.tendsto_right_iff];
[simp, apply_instance, apply_instance]
lemma nhds_zero : 𝓝 (0 : ℝ≥0∞) = ⨅a ≠ 0, 𝓟 (Iio a) :=
nhds_bot_order.trans $ by simp [bot_lt_iff_ne_bot, Iio]
lemma nhds_zero_basis : (𝓝 (0 : ℝ≥0∞)).has_basis (λ a : ℝ≥0∞, 0 < a) (λ a, Iio a) := nhds_bot_basis
lemma nhds_zero_basis_Iic : (𝓝 (0 : ℝ≥0∞)).has_basis (λ a : ℝ≥0∞, 0 < a) Iic := nhds_bot_basis_Iic
@[instance] lemma nhds_within_Ioi_coe_ne_bot {r : ℝ≥0} : (𝓝[Ioi r] (r : ℝ≥0∞)).ne_bot :=
nhds_within_Ioi_self_ne_bot' ennreal.coe_lt_top
@[instance] lemma nhds_within_Ioi_zero_ne_bot : (𝓝[Ioi 0] (0 : ℝ≥0∞)).ne_bot :=
nhds_within_Ioi_coe_ne_bot
-- using Icc because
-- • don't have 'Ioo (x - ε) (x + ε) ∈ 𝓝 x' unless x > 0
-- • (x - y ≤ ε ↔ x ≤ ε + y) is true, while (x - y < ε ↔ x < ε + y) is not
lemma Icc_mem_nhds (xt : x ≠ ⊤) (ε0 : ε ≠ 0) : Icc (x - ε) (x + ε) ∈ 𝓝 x :=
begin
rw _root_.mem_nhds_iff,
by_cases x0 : x = 0,
{ use Iio (x + ε),
have : Iio (x + ε) ⊆ Icc (x - ε) (x + ε), assume a, rw x0, simpa using le_of_lt,
use this, exact ⟨is_open_Iio, mem_Iio_self_add xt ε0⟩ },
{ use Ioo (x - ε) (x + ε), use Ioo_subset_Icc_self,
exact ⟨is_open_Ioo, mem_Ioo_self_sub_add xt x0 ε0 ε0 ⟩ }
end
lemma nhds_of_ne_top (xt : x ≠ ⊤) : 𝓝 x = ⨅ ε > 0, 𝓟 (Icc (x - ε) (x + ε)) :=
begin
refine le_antisymm _ _,
-- first direction
simp only [le_infi_iff, le_principal_iff], assume ε ε0, exact Icc_mem_nhds xt ε0.lt.ne',
-- second direction
rw nhds_generate_from, refine le_infi (assume s, le_infi $ assume hs, _),
rcases hs with ⟨xs, ⟨a, (rfl : s = Ioi a)|(rfl : s = Iio a)⟩⟩,
{ rcases exists_between xs with ⟨b, ab, bx⟩,
have xb_pos : 0 < x - b := ennreal.sub_pos.2 bx,
have xxb : x - (x - b) = b := sub_sub_cancel xt bx.le,
refine infi_le_of_le (x - b) (infi_le_of_le xb_pos _),
simp only [mem_principal, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xxb at h₁, calc a < b : ab ... ≤ y : h₁ },
{ rcases exists_between xs with ⟨b, xb, ba⟩,
have bx_pos : 0 < b - x := ennreal.sub_pos.2 xb,
have xbx : x + (b - x) = b := add_sub_cancel_of_le xb.le,
refine infi_le_of_le (b - x) (infi_le_of_le bx_pos _),
simp only [mem_principal, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xbx at h₂, calc y ≤ b : h₂ ... < a : ba },
end
/-- Characterization of neighborhoods for `ℝ≥0∞` numbers. See also `tendsto_order`
for a version with strict inequalities. -/
protected theorem tendsto_nhds {f : filter α} {u : α → ℝ≥0∞} {a : ℝ≥0∞} (ha : a ≠ ⊤) :
tendsto u f (𝓝 a) ↔ ∀ ε > 0, ∀ᶠ x in f, (u x) ∈ Icc (a - ε) (a + ε) :=
by simp only [nhds_of_ne_top ha, tendsto_infi, tendsto_principal, mem_Icc]
protected lemma tendsto_at_top [nonempty β] [semilattice_sup β] {f : β → ℝ≥0∞} {a : ℝ≥0∞}
(ha : a ≠ ⊤) : tendsto f at_top (𝓝 a) ↔ ∀ε>0, ∃N, ∀n≥N, (f n) ∈ Icc (a - ε) (a + ε) :=
by simp only [ennreal.tendsto_nhds ha, mem_at_top_sets, mem_set_of_eq, filter.eventually]
instance : has_continuous_add ℝ≥0∞ :=
begin
refine ⟨continuous_iff_continuous_at.2 _⟩,
rintro ⟨(_|a), b⟩,
{ exact tendsto_nhds_top_mono' continuous_at_fst (λ p, le_add_right le_rfl) },
rcases b with (_|b),
{ exact tendsto_nhds_top_mono' continuous_at_snd (λ p, le_add_left le_rfl) },
simp only [continuous_at, some_eq_coe, nhds_coe_coe, ← coe_add, tendsto_map'_iff, (∘),
tendsto_coe, tendsto_add]
end
protected lemma tendsto_at_top_zero [hβ : nonempty β] [semilattice_sup β] {f : β → ℝ≥0∞} :
filter.at_top.tendsto f (𝓝 0) ↔ ∀ ε > 0, ∃ N, ∀ n ≥ N, f n ≤ ε :=
begin
rw ennreal.tendsto_at_top zero_ne_top,
{ simp_rw [set.mem_Icc, zero_add, zero_sub, zero_le _, true_and], },
{ exact hβ, },
end
protected lemma tendsto_mul (ha : a ≠ 0 ∨ b ≠ ⊤) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) (𝓝 (a, b)) (𝓝 (a * b)) :=
have ht : ∀b:ℝ≥0∞, b ≠ 0 → tendsto (λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) (𝓝 ((⊤:ℝ≥0∞), b)) (𝓝 ⊤),
begin
refine assume b hb, tendsto_nhds_top_iff_nnreal.2 $ assume n, _,
rcases lt_iff_exists_nnreal_btwn.1 (pos_iff_ne_zero.2 hb) with ⟨ε, hε, hεb⟩,
replace hε : 0 < ε, from coe_pos.1 hε,
filter_upwards [prod_is_open.mem_nhds (lt_mem_nhds $ @coe_lt_top (n / ε)) (lt_mem_nhds hεb)],
rintros ⟨a₁, a₂⟩ ⟨h₁, h₂⟩,
dsimp at h₁ h₂ ⊢,
rw [← div_mul_cancel n hε.ne', coe_mul],
exact mul_lt_mul h₁ h₂
end,
begin
cases a, {simp [none_eq_top] at hb, simp [none_eq_top, ht b hb, top_mul, hb] },
cases b, {
simp [none_eq_top] at ha,
simp [*, nhds_swap (a : ℝ≥0∞) ⊤, none_eq_top, some_eq_coe, top_mul, tendsto_map'_iff, (∘),
mul_comm] },
simp [some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_mul.symm, tendsto_coe, tendsto_mul]
end
protected lemma tendsto.mul {f : filter α} {ma : α → ℝ≥0∞} {mb : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λa, ma a * mb a) f (𝓝 (a * b)) :=
show tendsto ((λp:ℝ≥0∞×ℝ≥0∞, p.1 * p.2) ∘ (λa, (ma a, mb a))) f (𝓝 (a * b)), from
tendsto.comp (ennreal.tendsto_mul ha hb) (hma.prod_mk_nhds hmb)
protected lemma tendsto.const_mul {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) : tendsto (λb, a * m b) f (𝓝 (a * b)) :=
by_cases
(assume : a = 0, by simp [this, tendsto_const_nhds])
(assume ha : a ≠ 0, ennreal.tendsto.mul tendsto_const_nhds (or.inl ha) hm hb)
protected lemma tendsto.mul_const {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) : tendsto (λx, m x * b) f (𝓝 (a * b)) :=
by simpa only [mul_comm] using ennreal.tendsto.const_mul hm ha
lemma tendsto_finset_prod_of_ne_top {ι : Type*} {f : ι → α → ℝ≥0∞} {x : filter α} {a : ι → ℝ≥0∞}
(s : finset ι) (h : ∀ i ∈ s, tendsto (f i) x (𝓝 (a i))) (h' : ∀ i ∈ s, a i ≠ ∞):
tendsto (λ b, ∏ c in s, f c b) x (𝓝 (∏ c in s, a c)) :=
begin
induction s using finset.induction with a s has IH, { simp [tendsto_const_nhds] },
simp only [finset.prod_insert has],
apply tendsto.mul (h _ (finset.mem_insert_self _ _)),
{ right,
exact (prod_lt_top (λ i hi, h' _ (finset.mem_insert_of_mem hi))).ne },
{ exact IH (λ i hi, h _ (finset.mem_insert_of_mem hi))
(λ i hi, h' _ (finset.mem_insert_of_mem hi)) },
{ exact or.inr (h' _ (finset.mem_insert_self _ _)) }
end
protected lemma continuous_at_const_mul {a b : ℝ≥0∞} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at ((*) a) b :=
tendsto.const_mul tendsto_id h.symm
protected lemma continuous_at_mul_const {a b : ℝ≥0∞} (h : a ≠ ⊤ ∨ b ≠ 0) :
continuous_at (λ x, x * a) b :=
tendsto.mul_const tendsto_id h.symm
protected lemma continuous_const_mul {a : ℝ≥0∞} (ha : a ≠ ⊤) : continuous ((*) a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_const_mul (or.inl ha)
protected lemma continuous_mul_const {a : ℝ≥0∞} (ha : a ≠ ⊤) : continuous (λ x, x * a) :=
continuous_iff_continuous_at.2 $ λ x, ennreal.continuous_at_mul_const (or.inl ha)
lemma le_of_forall_lt_one_mul_le {x y : ℝ≥0∞} (h : ∀ a < 1, a * x ≤ y) : x ≤ y :=
begin
have : tendsto (* x) (𝓝[Iio 1] 1) (𝓝 (1 * x)) :=
(ennreal.continuous_at_mul_const (or.inr one_ne_zero)).mono_left inf_le_left,
rw one_mul at this,
haveI : (𝓝[Iio 1] (1 : ℝ≥0∞)).ne_bot := nhds_within_Iio_self_ne_bot' ennreal.zero_lt_one,
exact le_of_tendsto this (eventually_nhds_within_iff.2 $ eventually_of_forall h)
end
lemma infi_mul_left' {ι} {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) (h0 : a = 0 → nonempty ι) :
(⨅ i, a * f i) = a * ⨅ i, f i :=
begin
by_cases H : a = ⊤ ∧ (⨅ i, f i) = 0,
{ rcases h H.1 H.2 with ⟨i, hi⟩,
rw [H.2, mul_zero, ← bot_eq_zero, infi_eq_bot],
exact λ b hb, ⟨i, by rwa [hi, mul_zero, ← bot_eq_zero]⟩ },
{ rw not_and_distrib at H,
casesI is_empty_or_nonempty ι,
{ rw [infi_of_empty, infi_of_empty, mul_top, if_neg],
exact mt h0 (not_nonempty_iff.2 ‹_›) },
{ exact (map_infi_of_continuous_at_of_monotone' (ennreal.continuous_at_const_mul H)
ennreal.mul_left_mono).symm } }
end
lemma infi_mul_left {ι} [nonempty ι] {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, a * f i) = a * ⨅ i, f i :=
infi_mul_left' h (λ _, ‹nonempty ι›)
lemma infi_mul_right' {ι} {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) (h0 : a = 0 → nonempty ι) :
(⨅ i, f i * a) = (⨅ i, f i) * a :=
by simpa only [mul_comm a] using infi_mul_left' h h0
lemma infi_mul_right {ι} [nonempty ι] {f : ι → ℝ≥0∞} {a : ℝ≥0∞}
(h : a = ⊤ → (⨅ i, f i) = 0 → ∃ i, f i = 0) :
(⨅ i, f i * a) = (⨅ i, f i) * a :=
infi_mul_right' h (λ _, ‹nonempty ι›)
protected lemma continuous_inv : continuous (has_inv.inv : ℝ≥0∞ → ℝ≥0∞) :=
continuous_iff_continuous_at.2 $ λ a, tendsto_order.2
⟨begin
assume b hb,
simp only [@ennreal.lt_inv_iff_lt_inv b],
exact gt_mem_nhds (ennreal.lt_inv_iff_lt_inv.1 hb),
end,
begin
assume b hb,
simp only [gt_iff_lt, @ennreal.inv_lt_iff_inv_lt _ b],
exact lt_mem_nhds (ennreal.inv_lt_iff_inv_lt.1 hb)
end⟩
@[simp] protected lemma tendsto_inv_iff {f : filter α} {m : α → ℝ≥0∞} {a : ℝ≥0∞} :
tendsto (λ x, (m x)⁻¹) f (𝓝 a⁻¹) ↔ tendsto m f (𝓝 a) :=
⟨λ h, by simpa only [function.comp, ennreal.inv_inv]
using (ennreal.continuous_inv.tendsto a⁻¹).comp h,
(ennreal.continuous_inv.tendsto a).comp⟩
protected lemma tendsto.div {f : filter α} {ma : α → ℝ≥0∞} {mb : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) :
tendsto (λa, ma a / mb a) f (𝓝 (a / b)) :=
by { apply tendsto.mul hma _ (ennreal.tendsto_inv_iff.2 hmb) _; simp [ha, hb] }
protected lemma tendsto.const_div {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) : tendsto (λb, a / m b) f (𝓝 (a / b)) :=
by { apply tendsto.const_mul (ennreal.tendsto_inv_iff.2 hm), simp [hb] }
protected lemma tendsto.div_const {f : filter α} {m : α → ℝ≥0∞} {a b : ℝ≥0∞}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) : tendsto (λx, m x / b) f (𝓝 (a / b)) :=
by { apply tendsto.mul_const hm, simp [ha] }
protected lemma tendsto_inv_nat_nhds_zero : tendsto (λ n : ℕ, (n : ℝ≥0∞)⁻¹) at_top (𝓝 0) :=
ennreal.inv_top ▸ ennreal.tendsto_inv_iff.2 tendsto_nat_nhds_top
lemma bsupr_add {ι} {s : set ι} (hs : s.nonempty) {f : ι → ℝ≥0∞} :
(⨆ i ∈ s, f i) + a = ⨆ i ∈ s, f i + a :=
begin
simp only [← Sup_image], symmetry,
rw [image_comp (+ a)],
refine is_lub.Sup_eq ((is_lub_Sup $ f '' s).is_lub_of_tendsto _ (hs.image _) _),
exacts [λ x _ y _ hxy, add_le_add hxy le_rfl,
tendsto.add (tendsto_id' inf_le_left) tendsto_const_nhds]
end
lemma Sup_add {s : set ℝ≥0∞} (hs : s.nonempty) : Sup s + a = ⨆b∈s, b + a :=
by rw [Sup_eq_supr, bsupr_add hs]
lemma supr_add {ι : Sort*} {s : ι → ℝ≥0∞} [h : nonempty ι] : supr s + a = ⨆b, s b + a :=
let ⟨x⟩ := h in
calc supr s + a = Sup (range s) + a : by rw Sup_range
... = (⨆b∈range s, b + a) : Sup_add ⟨s x, x, rfl⟩
... = _ : supr_range
lemma add_supr {ι : Sort*} {s : ι → ℝ≥0∞} [h : nonempty ι] : a + supr s = ⨆b, a + s b :=
by rw [add_comm, supr_add]; simp [add_comm]
lemma supr_add_supr {ι : Sort*} {f g : ι → ℝ≥0∞} (h : ∀i j, ∃k, f i + g j ≤ f k + g k) :
supr f + supr g = (⨆ a, f a + g a) :=
begin
by_cases hι : nonempty ι,
{ letI := hι,
refine le_antisymm _ (supr_le $ λ a, add_le_add (le_supr _ _) (le_supr _ _)),
simpa [add_supr, supr_add] using
λ i j:ι, show f i + g j ≤ ⨆ a, f a + g a, from
let ⟨k, hk⟩ := h i j in le_supr_of_le k hk },
{ have : ∀f:ι → ℝ≥0∞, (⨆i, f i) = 0 := λ f, supr_eq_zero.mpr (λ i, (hι ⟨i⟩).elim),
rw [this, this, this, zero_add] }
end
lemma supr_add_supr_of_monotone {ι : Sort*} [semilattice_sup ι]
{f g : ι → ℝ≥0∞} (hf : monotone f) (hg : monotone g) :
supr f + supr g = (⨆ a, f a + g a) :=
supr_add_supr $ assume i j, ⟨i ⊔ j, add_le_add (hf $ le_sup_left) (hg $ le_sup_right)⟩
lemma finset_sum_supr_nat {α} {ι} [semilattice_sup ι] {s : finset α} {f : α → ι → ℝ≥0∞}
(hf : ∀a, monotone (f a)) :
∑ a in s, supr (f a) = (⨆ n, ∑ a in s, f a n) :=
begin
refine finset.induction_on s _ _,
{ simp, },
{ assume a s has ih,
simp only [finset.sum_insert has],
rw [ih, supr_add_supr_of_monotone (hf a)],
assume i j h,
exact (finset.sum_le_sum $ assume a ha, hf a h) }
end
lemma mul_Sup {s : set ℝ≥0∞} {a : ℝ≥0∞} : a * Sup s = ⨆i∈s, a * i :=
begin
by_cases hs : ∀x∈s, x = (0:ℝ≥0∞),
{ have h₁ : Sup s = 0 := (bot_unique $ Sup_le $ assume a ha, (hs a ha).symm ▸ le_refl 0),
have h₂ : (⨆i ∈ s, a * i) = 0 :=
(bot_unique $ supr_le $ assume a, supr_le $ assume ha, by simp [hs a ha]),
rw [h₁, h₂, mul_zero] },
{ simp only [not_forall] at hs,
rcases hs with ⟨x, hx, hx0⟩,
have s₁ : Sup s ≠ 0 :=
pos_iff_ne_zero.1 (lt_of_lt_of_le (pos_iff_ne_zero.2 hx0) (le_Sup hx)),
have : Sup ((λb, a * b) '' s) = a * Sup s :=
is_lub.Sup_eq ((is_lub_Sup s).is_lub_of_tendsto
(assume x _ y _ h, mul_le_mul_left' h _)
⟨x, hx⟩
(ennreal.tendsto.const_mul (tendsto_id' inf_le_left) (or.inl s₁))),
rw [this.symm, Sup_image] }
end
lemma mul_supr {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : a * supr f = ⨆i, a * f i :=
by rw [← Sup_range, mul_Sup, supr_range]
lemma supr_mul {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : supr f * a = ⨆i, f i * a :=
by rw [mul_comm, mul_supr]; congr; funext; rw [mul_comm]
lemma supr_div {ι : Sort*} {f : ι → ℝ≥0∞} {a : ℝ≥0∞} : supr f / a = ⨆i, f i / a :=
supr_mul
protected lemma tendsto_coe_sub : ∀{b:ℝ≥0∞}, tendsto (λb:ℝ≥0∞, ↑r - b) (𝓝 b) (𝓝 (↑r - b)) :=
begin
refine forall_ennreal.2 ⟨λ a, _, _⟩,
{ simp [@nhds_coe a, tendsto_map'_iff, (∘), tendsto_coe, ← with_top.coe_sub],
exact tendsto_const_nhds.sub tendsto_id },
simp,
exact (tendsto.congr' (mem_of_superset (lt_mem_nhds $ @coe_lt_top r) $
by simp [le_of_lt] {contextual := tt})) tendsto_const_nhds
end
lemma sub_supr {ι : Sort*} [nonempty ι] {b : ι → ℝ≥0∞} (hr : a < ⊤) :
a - (⨆i, b i) = (⨅i, a - b i) :=
let ⟨r, eq, _⟩ := lt_iff_exists_coe.mp hr in
have Inf ((λb, ↑r - b) '' range b) = ↑r - (⨆i, b i),
from is_glb.Inf_eq $ is_lub_supr.is_glb_of_tendsto
(assume x _ y _, sub_le_sub (le_refl _))
(range_nonempty _)
(ennreal.tendsto_coe_sub.comp (tendsto_id' inf_le_left)),
by rw [eq, ←this]; simp [Inf_image, infi_range, -mem_range]; exact le_refl _
end topological_space
section tsum
variables {f g : α → ℝ≥0∞}
@[norm_cast] protected lemma has_sum_coe {f : α → ℝ≥0} {r : ℝ≥0} :
has_sum (λa, (f a : ℝ≥0∞)) ↑r ↔ has_sum f r :=
have (λs:finset α, ∑ a in s, ↑(f a)) = (coe : ℝ≥0 → ℝ≥0∞) ∘ (λs:finset α, ∑ a in s, f a),
from funext $ assume s, ennreal.coe_finset_sum.symm,
by unfold has_sum; rw [this, tendsto_coe]
protected lemma tsum_coe_eq {f : α → ℝ≥0} (h : has_sum f r) : ∑'a, (f a : ℝ≥0∞) = r :=
(ennreal.has_sum_coe.2 h).tsum_eq
protected lemma coe_tsum {f : α → ℝ≥0} : summable f → ↑(tsum f) = ∑'a, (f a : ℝ≥0∞)
| ⟨r, hr⟩ := by rw [hr.tsum_eq, ennreal.tsum_coe_eq hr]
protected lemma has_sum : has_sum f (⨆s:finset α, ∑ a in s, f a) :=
tendsto_at_top_supr $ λ s t, finset.sum_le_sum_of_subset
@[simp] protected lemma summable : summable f := ⟨_, ennreal.has_sum⟩
lemma tsum_coe_ne_top_iff_summable {f : β → ℝ≥0} :
∑' b, (f b:ℝ≥0∞) ≠ ∞ ↔ summable f :=
begin
refine ⟨λ h, _, λ h, ennreal.coe_tsum h ▸ ennreal.coe_ne_top⟩,
lift (∑' b, (f b:ℝ≥0∞)) to ℝ≥0 using h with a ha,
refine ⟨a, ennreal.has_sum_coe.1 _⟩,
rw ha,
exact ennreal.summable.has_sum
end
protected lemma tsum_eq_supr_sum : ∑'a, f a = (⨆s:finset α, ∑ a in s, f a) :=
ennreal.has_sum.tsum_eq
protected lemma tsum_eq_supr_sum' {ι : Type*} (s : ι → finset α) (hs : ∀ t, ∃ i, t ⊆ s i) :
∑' a, f a = ⨆ i, ∑ a in s i, f a :=
begin
rw [ennreal.tsum_eq_supr_sum],
symmetry,
change (⨆i:ι, (λ t : finset α, ∑ a in t, f a) (s i)) = ⨆s:finset α, ∑ a in s, f a,
exact (finset.sum_mono_set f).supr_comp_eq hs
end
protected lemma tsum_sigma {β : α → Type*} (f : Πa, β a → ℝ≥0∞) :
∑'p:Σa, β a, f p.1 p.2 = ∑'a b, f a b :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_sigma' {β : α → Type*} (f : (Σ a, β a) → ℝ≥0∞) :
∑'p:(Σa, β a), f p = ∑'a b, f ⟨a, b⟩ :=
tsum_sigma' (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_prod {f : α → β → ℝ≥0∞} : ∑'p:α×β, f p.1 p.2 = ∑'a, ∑'b, f a b :=
tsum_prod' ennreal.summable $ λ _, ennreal.summable
protected lemma tsum_comm {f : α → β → ℝ≥0∞} : ∑'a, ∑'b, f a b = ∑'b, ∑'a, f a b :=
tsum_comm' ennreal.summable (λ _, ennreal.summable) (λ _, ennreal.summable)
protected lemma tsum_add : ∑'a, (f a + g a) = (∑'a, f a) + (∑'a, g a) :=
tsum_add ennreal.summable ennreal.summable
protected lemma tsum_le_tsum (h : ∀a, f a ≤ g a) : ∑'a, f a ≤ ∑'a, g a :=
tsum_le_tsum h ennreal.summable ennreal.summable
protected lemma sum_le_tsum {f : α → ℝ≥0∞} (s : finset α) : ∑ x in s, f x ≤ ∑' x, f x :=
sum_le_tsum s (λ x hx, zero_le _) ennreal.summable
protected lemma tsum_eq_supr_nat' {f : ℕ → ℝ≥0∞} {N : ℕ → ℕ} (hN : tendsto N at_top at_top) :
∑'i:ℕ, f i = (⨆i:ℕ, ∑ a in finset.range (N i), f a) :=
ennreal.tsum_eq_supr_sum' _ $ λ t,
let ⟨n, hn⟩ := t.exists_nat_subset_range,
⟨k, _, hk⟩ := exists_le_of_tendsto_at_top hN 0 n in
⟨k, finset.subset.trans hn (finset.range_mono hk)⟩
protected lemma tsum_eq_supr_nat {f : ℕ → ℝ≥0∞} :
∑'i:ℕ, f i = (⨆i:ℕ, ∑ a in finset.range i, f a) :=
ennreal.tsum_eq_supr_sum' _ finset.exists_nat_subset_range
protected lemma tsum_eq_liminf_sum_nat {f : ℕ → ℝ≥0∞} :
∑' i, f i = filter.at_top.liminf (λ n, ∑ i in finset.range n, f i) :=
begin
rw [ennreal.tsum_eq_supr_nat, filter.liminf_eq_supr_infi_of_nat],
congr,
refine funext (λ n, le_antisymm _ _),
{ refine le_binfi (λ i hi, finset.sum_le_sum_of_subset_of_nonneg _ (λ _ _ _, zero_le _)),
simpa only [finset.range_subset, add_le_add_iff_right] using hi, },
{ refine le_trans (infi_le _ n) _,
simp [le_refl n, le_refl ((finset.range n).sum f)], },
end
protected lemma le_tsum (a : α) : f a ≤ ∑'a, f a :=
le_tsum' ennreal.summable a
protected lemma tsum_eq_top_of_eq_top : (∃ a, f a = ∞) → ∑' a, f a = ∞
| ⟨a, ha⟩ := top_unique $ ha ▸ ennreal.le_tsum a
@[simp] protected lemma tsum_top [nonempty α] : ∑' a : α, ∞ = ∞ :=
let ⟨a⟩ := ‹nonempty α› in ennreal.tsum_eq_top_of_eq_top ⟨a, rfl⟩
lemma tsum_const_eq_top_of_ne_zero {α : Type*} [infinite α] {c : ℝ≥0∞} (hc : c ≠ 0) :
(∑' (a : α), c) = ∞ :=
begin
have A : tendsto (λ (n : ℕ), (n : ℝ≥0∞) * c) at_top (𝓝 (∞ * c)),
{ apply ennreal.tendsto.mul_const tendsto_nat_nhds_top,
simp only [true_or, top_ne_zero, ne.def, not_false_iff] },
have B : ∀ (n : ℕ), (n : ℝ≥0∞) * c ≤ (∑' (a : α), c),
{ assume n,
rcases infinite.exists_subset_card_eq α n with ⟨s, hs⟩,
simpa [hs] using @ennreal.sum_le_tsum α (λ i, c) s },
simpa [hc] using le_of_tendsto' A B,
end
protected lemma ne_top_of_tsum_ne_top (h : ∑' a, f a ≠ ∞) (a : α) : f a ≠ ∞ :=
λ ha, h $ ennreal.tsum_eq_top_of_eq_top ⟨a, ha⟩
protected lemma tsum_mul_left : ∑'i, a * f i = a * ∑'i, f i :=
if h : ∀i, f i = 0 then by simp [h] else
let ⟨i, (hi : f i ≠ 0)⟩ := not_forall.mp h in
have sum_ne_0 : ∑'i, f i ≠ 0, from ne_of_gt $
calc 0 < f i : lt_of_le_of_ne (zero_le _) hi.symm
... ≤ ∑'i, f i : ennreal.le_tsum _,
have tendsto (λs:finset α, ∑ j in s, a * f j) at_top (𝓝 (a * ∑'i, f i)),
by rw [← show (*) a ∘ (λs:finset α, ∑ j in s, f j) = λs, ∑ j in s, a * f j,
from funext $ λ s, finset.mul_sum];
exact ennreal.tendsto.const_mul ennreal.summable.has_sum (or.inl sum_ne_0),
has_sum.tsum_eq this
protected lemma tsum_mul_right : (∑'i, f i * a) = (∑'i, f i) * a :=
by simp [mul_comm, ennreal.tsum_mul_left]
@[simp] lemma tsum_supr_eq {α : Type*} (a : α) {f : α → ℝ≥0∞} :
∑'b:α, (⨆ (h : a = b), f b) = f a :=
le_antisymm
(by rw [ennreal.tsum_eq_supr_sum]; exact supr_le (assume s,
calc (∑ b in s, ⨆ (h : a = b), f b) ≤ ∑ b in {a}, ⨆ (h : a = b), f b :
finset.sum_le_sum_of_ne_zero $ assume b _ hb,
suffices a = b, by simpa using this.symm,
classical.by_contradiction $ assume h,
by simpa [h] using hb
... = f a : by simp))
(calc f a ≤ (⨆ (h : a = a), f a) : le_supr (λh:a=a, f a) rfl
... ≤ (∑'b:α, ⨆ (h : a = b), f b) : ennreal.le_tsum _)
lemma has_sum_iff_tendsto_nat {f : ℕ → ℝ≥0∞} (r : ℝ≥0∞) :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
refine ⟨has_sum.tendsto_sum_nat, assume h, _⟩,
rw [← supr_eq_of_tendsto _ h, ← ennreal.tsum_eq_supr_nat],
{ exact ennreal.summable.has_sum },
{ exact assume s t hst, finset.sum_le_sum_of_subset (finset.range_subset.2 hst) }
end
lemma tendsto_nat_tsum (f : ℕ → ℝ≥0∞) :
tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 (∑' n, f n)) :=
by { rw ← has_sum_iff_tendsto_nat, exact ennreal.summable.has_sum }
lemma to_nnreal_apply_of_tsum_ne_top {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' i, f i ≠ ∞) (x : α) :
(((ennreal.to_nnreal ∘ f) x : ℝ≥0) : ℝ≥0∞) = f x :=
coe_to_nnreal $ ennreal.ne_top_of_tsum_ne_top hf _
lemma summable_to_nnreal_of_tsum_ne_top {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' i, f i ≠ ∞) :
summable (ennreal.to_nnreal ∘ f) :=
by simpa only [←tsum_coe_ne_top_iff_summable, to_nnreal_apply_of_tsum_ne_top hf] using hf
lemma tendsto_cofinite_zero_of_tsum_ne_top {α} {f : α → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto f cofinite (𝓝 0) :=
begin
have f_ne_top : ∀ n, f n ≠ ∞, from ennreal.ne_top_of_tsum_ne_top hf,
have h_f_coe : f = λ n, ((f n).to_nnreal : ennreal),
from funext (λ n, (coe_to_nnreal (f_ne_top n)).symm),
rw [h_f_coe, ←@coe_zero, tendsto_coe],
exact nnreal.tendsto_cofinite_zero_of_summable (summable_to_nnreal_of_tsum_ne_top hf),
end
lemma tendsto_at_top_zero_of_tsum_ne_top {f : ℕ → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto f at_top (𝓝 0) :=
by { rw ←nat.cofinite_eq_at_top, exact tendsto_cofinite_zero_of_tsum_ne_top hf }
/-- The sum over the complement of a finset tends to `0` when the finset grows to cover the whole
space. This does not need a summability assumption, as otherwise all sums are zero. -/
lemma tendsto_tsum_compl_at_top_zero {α : Type*} {f : α → ℝ≥0∞} (hf : ∑' x, f x ≠ ∞) :
tendsto (λ (s : finset α), ∑' b : {x // x ∉ s}, f b) at_top (𝓝 0) :=
begin
lift f to α → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hf,
convert ennreal.tendsto_coe.2 (nnreal.tendsto_tsum_compl_at_top_zero f),
ext1 s,
rw ennreal.coe_tsum,
exact nnreal.summable_comp_injective (tsum_coe_ne_top_iff_summable.1 hf) subtype.coe_injective
end
protected lemma tsum_apply {ι α : Type*} {f : ι → α → ℝ≥0∞} {x : α} :
(∑' i, f i) x = ∑' i, f i x :=
tsum_apply $ pi.summable.mpr $ λ _, ennreal.summable
lemma tsum_sub {f : ℕ → ℝ≥0∞} {g : ℕ → ℝ≥0∞} (h₁ : ∑' i, g i ≠ ∞) (h₂ : g ≤ f) :
∑' i, (f i - g i) = (∑' i, f i) - (∑' i, g i) :=
begin
have h₃: ∑' i, (f i - g i) = ∑' i, (f i - g i + g i) - ∑' i, g i,
{ rw [ennreal.tsum_add, add_sub_self h₁]},
have h₄:(λ i, (f i - g i) + (g i)) = f,
{ ext n, rw ennreal.sub_add_cancel_of_le (h₂ n)},
rw h₄ at h₃, apply h₃,
end
end tsum
lemma tendsto_to_real_iff {ι} {fi : filter ι} {f : ι → ℝ≥0∞} (hf : ∀ i, f i ≠ ∞) {x : ℝ≥0∞}
(hx : x ≠ ∞) :
fi.tendsto (λ n, (f n).to_real) (𝓝 x.to_real) ↔ fi.tendsto f (𝓝 x) :=
begin
refine ⟨λ h, _, λ h, tendsto.comp (ennreal.tendsto_to_real hx) h⟩,
have h_eq : f = (λ n, ennreal.of_real (f n).to_real),
by { ext1 n, rw ennreal.of_real_to_real (hf n), },
rw [h_eq, ← ennreal.of_real_to_real hx],
exact ennreal.tendsto_of_real h,
end
lemma tsum_coe_ne_top_iff_summable_coe {f : α → ℝ≥0} :
∑' a, (f a : ℝ≥0∞) ≠ ∞ ↔ summable (λ a, (f a : ℝ)) :=
begin
rw nnreal.summable_coe,
exact tsum_coe_ne_top_iff_summable,
end
lemma tsum_coe_eq_top_iff_not_summable_coe {f : α → ℝ≥0} :
∑' a, (f a : ℝ≥0∞) = ∞ ↔ ¬ summable (λ a, (f a : ℝ)) :=
begin
rw [← @not_not (∑' a, ↑(f a) = ⊤)],
exact not_congr tsum_coe_ne_top_iff_summable_coe
end
lemma summable_to_real {f : α → ℝ≥0∞} (hsum : ∑' x, f x ≠ ∞) :
summable (λ x, (f x).to_real) :=
begin
lift f to α → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hsum,
rwa ennreal.tsum_coe_ne_top_iff_summable_coe at hsum,
end
end ennreal
namespace nnreal
open_locale nnreal
lemma tsum_eq_to_nnreal_tsum {f : β → ℝ≥0} :
(∑' b, f b) = (∑' b, (f b : ℝ≥0∞)).to_nnreal :=
begin
by_cases h : summable f,
{ rw [← ennreal.coe_tsum h, ennreal.to_nnreal_coe] },
{ have A := tsum_eq_zero_of_not_summable h,
simp only [← ennreal.tsum_coe_ne_top_iff_summable, not_not] at h,
simp only [h, ennreal.top_to_nnreal, A] }
end
/-- Comparison test of convergence of `ℝ≥0`-valued series. -/
lemma exists_le_has_sum_of_le {f g : β → ℝ≥0} {r : ℝ≥0}
(hgf : ∀b, g b ≤ f b) (hfr : has_sum f r) : ∃p≤r, has_sum g p :=
have ∑'b, (g b : ℝ≥0∞) ≤ r,
begin
refine has_sum_le (assume b, _) ennreal.summable.has_sum (ennreal.has_sum_coe.2 hfr),
exact ennreal.coe_le_coe.2 (hgf _)
end,
let ⟨p, eq, hpr⟩ := ennreal.le_coe_iff.1 this in
⟨p, hpr, ennreal.has_sum_coe.1 $ eq ▸ ennreal.summable.has_sum⟩
/-- Comparison test of convergence of `ℝ≥0`-valued series. -/
lemma summable_of_le {f g : β → ℝ≥0} (hgf : ∀b, g b ≤ f b) : summable f → summable g
| ⟨r, hfr⟩ := let ⟨p, _, hp⟩ := exists_le_has_sum_of_le hgf hfr in hp.summable
/-- A series of non-negative real numbers converges to `r` in the sense of `has_sum` if and only if
the sequence of partial sum converges to `r`. -/
lemma has_sum_iff_tendsto_nat {f : ℕ → ℝ≥0} {r : ℝ≥0} :
has_sum f r ↔ tendsto (λn:ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
rw [← ennreal.has_sum_coe, ennreal.has_sum_iff_tendsto_nat],
simp only [ennreal.coe_finset_sum.symm],
exact ennreal.tendsto_coe
end
lemma not_summable_iff_tendsto_nat_at_top {f : ℕ → ℝ≥0} :
¬ summable f ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
begin
split,
{ intros h,
refine ((tendsto_of_monotone _).resolve_right h).comp _,
exacts [finset.sum_mono_set _, tendsto_finset_range] },
{ rintros hnat ⟨r, hr⟩,
exact not_tendsto_nhds_of_tendsto_at_top hnat _ (has_sum_iff_tendsto_nat.1 hr) }
end
lemma summable_iff_not_tendsto_nat_at_top {f : ℕ → ℝ≥0} :
summable f ↔ ¬ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
by rw [← not_iff_not, not_not, not_summable_iff_tendsto_nat_at_top]
lemma summable_of_sum_range_le {f : ℕ → ℝ≥0} {c : ℝ≥0}
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : summable f :=
begin
apply summable_iff_not_tendsto_nat_at_top.2 (λ H, _),
rcases exists_lt_of_tendsto_at_top H 0 c with ⟨n, -, hn⟩,
exact lt_irrefl _ (hn.trans_le (h n)),
end
lemma tsum_le_of_sum_range_le {f : ℕ → ℝ≥0} {c : ℝ≥0}
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : ∑' n, f n ≤ c :=
le_of_tendsto' (has_sum_iff_tendsto_nat.1 (summable_of_sum_range_le h).has_sum) h
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ≥0} (hf : summable f)
{i : β → α} (hi : function.injective i) : ∑' x, f (i x) ≤ ∑' x, f x :=
tsum_le_tsum_of_inj i hi (λ c hc, zero_le _) (λ b, le_refl _) (summable_comp_injective hf hi) hf
lemma summable_sigma {β : Π x : α, Type*} {f : (Σ x, β x) → ℝ≥0} :
summable f ↔ (∀ x, summable (λ y, f ⟨x, y⟩)) ∧ summable (λ x, ∑' y, f ⟨x, y⟩) :=
begin
split,
{ simp only [← nnreal.summable_coe, nnreal.coe_tsum],
exact λ h, ⟨h.sigma_factor, h.sigma⟩ },
{ rintro ⟨h₁, h₂⟩,
simpa only [← ennreal.tsum_coe_ne_top_iff_summable, ennreal.tsum_sigma', ennreal.coe_tsum, h₁]
using h₂ }
end
lemma indicator_summable {f : α → ℝ≥0} (hf : summable f) (s : set α) :
summable (s.indicator f) :=
begin
refine nnreal.summable_of_le (λ a, le_trans (le_of_eq (s.indicator_apply f a)) _) hf,
split_ifs,
exact le_refl (f a),
exact zero_le_coe,
end
lemma tsum_indicator_ne_zero {f : α → ℝ≥0} (hf : summable f) {s : set α} (h : ∃ a ∈ s, f a ≠ 0) :
∑' x, (s.indicator f) x ≠ 0 :=
λ h', let ⟨a, ha, hap⟩ := h in
hap (trans (set.indicator_apply_eq_self.mpr (absurd ha)).symm
(((tsum_eq_zero_iff (indicator_summable hf s)).1 h') a))
open finset
/-- For `f : ℕ → ℝ≥0`, then `∑' k, f (k + i)` tends to zero. This does not require a summability
assumption on `f`, as otherwise all sums are zero. -/
lemma tendsto_sum_nat_add (f : ℕ → ℝ≥0) : tendsto (λ i, ∑' k, f (k + i)) at_top (𝓝 0) :=
begin
rw ← tendsto_coe,
convert tendsto_sum_nat_add (λ i, (f i : ℝ)),
norm_cast,
end
lemma has_sum_lt {f g : α → ℝ≥0} {sf sg : ℝ≥0} {i : α} (h : ∀ (a : α), f a ≤ g a) (hi : f i < g i)
(hf : has_sum f sf) (hg : has_sum g sg) : sf < sg :=
begin
have A : ∀ (a : α), (f a : ℝ) ≤ g a := λ a, nnreal.coe_le_coe.2 (h a),
have : (sf : ℝ) < sg :=
has_sum_lt A (nnreal.coe_lt_coe.2 hi) (has_sum_coe.2 hf) (has_sum_coe.2 hg),
exact nnreal.coe_lt_coe.1 this
end
@[mono] lemma has_sum_strict_mono
{f g : α → ℝ≥0} {sf sg : ℝ≥0} (hf : has_sum f sf) (hg : has_sum g sg) (h : f < g) : sf < sg :=
let ⟨hle, i, hi⟩ := pi.lt_def.mp h in has_sum_lt hle hi hf hg
lemma tsum_lt_tsum {f g : α → ℝ≥0} {i : α} (h : ∀ (a : α), f a ≤ g a) (hi : f i < g i)
(hg : summable g) : ∑' n, f n < ∑' n, g n :=
has_sum_lt h hi (summable_of_le h hg).has_sum hg.has_sum
@[mono] lemma tsum_strict_mono {f g : α → ℝ≥0} (hg : summable g) (h : f < g) :
∑' n, f n < ∑' n, g n :=
let ⟨hle, i, hi⟩ := pi.lt_def.mp h in tsum_lt_tsum hle hi hg
lemma tsum_pos {g : α → ℝ≥0} (hg : summable g) (i : α) (hi : 0 < g i) :
0 < ∑' b, g b :=
by { rw ← tsum_zero, exact tsum_lt_tsum (λ a, zero_le _) hi hg }
end nnreal
namespace ennreal
lemma tsum_to_real_eq
{f : α → ℝ≥0∞} (hf : ∀ a, f a ≠ ∞) :
(∑' a, f a).to_real = ∑' a, (f a).to_real :=
begin
lift f to α → ℝ≥0 using hf,
have : (∑' (a : α), (f a : ℝ≥0∞)).to_real =
((∑' (a : α), (f a : ℝ≥0∞)).to_nnreal : ℝ≥0∞).to_real,
{ rw [ennreal.coe_to_real], refl },
rw [this, ← nnreal.tsum_eq_to_nnreal_tsum, ennreal.coe_to_real],
exact nnreal.coe_tsum
end
lemma tendsto_sum_nat_add (f : ℕ → ℝ≥0∞) (hf : ∑' i, f i ≠ ∞) :
tendsto (λ i, ∑' k, f (k + i)) at_top (𝓝 0) :=
begin
lift f to ℕ → ℝ≥0 using ennreal.ne_top_of_tsum_ne_top hf,
replace hf : summable f := tsum_coe_ne_top_iff_summable.1 hf,
simp only [← ennreal.coe_tsum, nnreal.summable_nat_add _ hf, ← ennreal.coe_zero],
exact_mod_cast nnreal.tendsto_sum_nat_add f
end
end ennreal
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ} (hf : summable f) (hn : ∀ a, 0 ≤ f a)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
begin
lift f to α → ℝ≥0 using hn,
rw nnreal.summable_coe at hf,
simpa only [(∘), ← nnreal.coe_tsum] using nnreal.tsum_comp_le_tsum_of_inj hf hi
end
/-- Comparison test of convergence of series of non-negative real numbers. -/
lemma summable_of_nonneg_of_le {f g : β → ℝ}
(hg : ∀b, 0 ≤ g b) (hgf : ∀b, g b ≤ f b) (hf : summable f) : summable g :=
begin
lift f to β → ℝ≥0 using λ b, (hg b).trans (hgf b),
lift g to β → ℝ≥0 using hg,
rw nnreal.summable_coe at hf ⊢,
exact nnreal.summable_of_le (λ b, nnreal.coe_le_coe.1 (hgf b)) hf
end
/-- A series of non-negative real numbers converges to `r` in the sense of `has_sum` if and only if
the sequence of partial sum converges to `r`. -/
lemma has_sum_iff_tendsto_nat_of_nonneg {f : ℕ → ℝ} (hf : ∀i, 0 ≤ f i) (r : ℝ) :
has_sum f r ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top (𝓝 r) :=
begin
lift f to ℕ → ℝ≥0 using hf,
simp only [has_sum, ← nnreal.coe_sum, nnreal.tendsto_coe'],
exact exists_congr (λ hr, nnreal.has_sum_iff_tendsto_nat)
end
lemma ennreal.of_real_tsum_of_nonneg {f : α → ℝ} (hf_nonneg : ∀ n, 0 ≤ f n) (hf : summable f) :
ennreal.of_real (∑' n, f n) = ∑' n, ennreal.of_real (f n) :=
by simp_rw [ennreal.of_real, ennreal.tsum_coe_eq (nnreal.has_sum_of_real_of_nonneg hf_nonneg hf)]
lemma not_summable_iff_tendsto_nat_at_top_of_nonneg {f : ℕ → ℝ} (hf : ∀ n, 0 ≤ f n) :
¬ summable f ↔ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
begin
lift f to ℕ → ℝ≥0 using hf,
exact_mod_cast nnreal.not_summable_iff_tendsto_nat_at_top
end
lemma summable_iff_not_tendsto_nat_at_top_of_nonneg {f : ℕ → ℝ} (hf : ∀ n, 0 ≤ f n) :
summable f ↔ ¬ tendsto (λ n : ℕ, ∑ i in finset.range n, f i) at_top at_top :=
by rw [← not_iff_not, not_not, not_summable_iff_tendsto_nat_at_top_of_nonneg hf]
lemma summable_sigma_of_nonneg {β : Π x : α, Type*} {f : (Σ x, β x) → ℝ} (hf : ∀ x, 0 ≤ f x) :
summable f ↔ (∀ x, summable (λ y, f ⟨x, y⟩)) ∧ summable (λ x, ∑' y, f ⟨x, y⟩) :=
by { lift f to (Σ x, β x) → ℝ≥0 using hf, exact_mod_cast nnreal.summable_sigma }
lemma summable_of_sum_le {ι : Type*} {f : ι → ℝ} {c : ℝ} (hf : 0 ≤ f)
(h : ∀ u : finset ι, ∑ x in u, f x ≤ c) :
summable f :=
⟨ ⨆ u : finset ι, ∑ x in u, f x,
tendsto_at_top_csupr (finset.sum_mono_set_of_nonneg hf) ⟨c, λ y ⟨u, hu⟩, hu ▸ h u⟩ ⟩
lemma summable_of_sum_range_le {f : ℕ → ℝ} {c : ℝ} (hf : ∀ n, 0 ≤ f n)
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : summable f :=
begin
apply (summable_iff_not_tendsto_nat_at_top_of_nonneg hf).2 (λ H, _),
rcases exists_lt_of_tendsto_at_top H 0 c with ⟨n, -, hn⟩,
exact lt_irrefl _ (hn.trans_le (h n)),
end
lemma tsum_le_of_sum_range_le {f : ℕ → ℝ} {c : ℝ} (hf : ∀ n, 0 ≤ f n)
(h : ∀ n, ∑ i in finset.range n, f i ≤ c) : ∑' n, f n ≤ c :=
le_of_tendsto' ((has_sum_iff_tendsto_nat_of_nonneg hf _).1
(summable_of_sum_range_le hf h).has_sum) h
/-- If a sequence `f` with non-negative terms is dominated by a sequence `g` with summable
series and at least one term of `f` is strictly smaller than the corresponding term in `g`,
then the series of `f` is strictly smaller than the series of `g`. -/
lemma tsum_lt_tsum_of_nonneg {i : ℕ} {f g : ℕ → ℝ}
(h0 : ∀ (b : ℕ), 0 ≤ f b) (h : ∀ (b : ℕ), f b ≤ g b) (hi : f i < g i) (hg : summable g) :
∑' n, f n < ∑' n, g n :=
tsum_lt_tsum h hi (summable_of_nonneg_of_le h0 h hg) hg
section
variables [emetric_space β]
open ennreal filter emetric
/-- In an emetric ball, the distance between points is everywhere finite -/
lemma edist_ne_top_of_mem_ball {a : β} {r : ℝ≥0∞} (x y : ball a r) : edist x.1 y.1 ≠ ⊤ :=
lt_top_iff_ne_top.1 $
calc edist x y ≤ edist a x + edist a y : edist_triangle_left x.1 y.1 a
... < r + r : by rw [edist_comm a x, edist_comm a y]; exact add_lt_add x.2 y.2
... ≤ ⊤ : le_top
/-- Each ball in an extended metric space gives us a metric space, as the edist
is everywhere finite. -/
def metric_space_emetric_ball (a : β) (r : ℝ≥0∞) : metric_space (ball a r) :=
emetric_space.to_metric_space edist_ne_top_of_mem_ball
local attribute [instance] metric_space_emetric_ball
lemma nhds_eq_nhds_emetric_ball (a x : β) (r : ℝ≥0∞) (h : x ∈ ball a r) :
𝓝 x = map (coe : ball a r → β) (𝓝 ⟨x, h⟩) :=
(map_nhds_subtype_coe_eq _ $ is_open.mem_nhds emetric.is_open_ball h).symm
end
section
variable [pseudo_emetric_space α]
open emetric
lemma tendsto_iff_edist_tendsto_0 {l : filter β} {f : β → α} {y : α} :
tendsto f l (𝓝 y) ↔ tendsto (λ x, edist (f x) y) l (𝓝 0) :=
by simp only [emetric.nhds_basis_eball.tendsto_right_iff, emetric.mem_ball,
@tendsto_order ℝ≥0∞ β _ _, forall_prop_of_false ennreal.not_lt_zero, forall_const, true_and]
/-- Yet another metric characterization of Cauchy sequences on integers. This one is often the
most efficient. -/
lemma emetric.cauchy_seq_iff_le_tendsto_0 [nonempty β] [semilattice_sup β] {s : β → α} :
cauchy_seq s ↔ (∃ (b: β → ℝ≥0∞), (∀ n m N : β, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N)
∧ (tendsto b at_top (𝓝 0))) :=
⟨begin
assume hs,
rw emetric.cauchy_seq_iff at hs,
/- `s` is Cauchy sequence. The sequence `b` will be constructed by taking
the supremum of the distances between `s n` and `s m` for `n m ≥ N`-/
let b := λN, Sup ((λ(p : β × β), edist (s p.1) (s p.2))''{p | p.1 ≥ N ∧ p.2 ≥ N}),
--Prove that it bounds the distances of points in the Cauchy sequence
have C : ∀ n m N, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
{ refine λm n N hm hn, le_Sup _,
use (prod.mk m n),
simp only [and_true, eq_self_iff_true, set.mem_set_of_eq],
exact ⟨hm, hn⟩ },
--Prove that it tends to `0`, by using the Cauchy property of `s`
have D : tendsto b at_top (𝓝 0),
{ refine tendsto_order.2 ⟨λa ha, absurd ha (ennreal.not_lt_zero), λε εpos, _⟩,
rcases exists_between εpos with ⟨δ, δpos, δlt⟩,
rcases hs δ δpos with ⟨N, hN⟩,
refine filter.mem_at_top_sets.2 ⟨N, λn hn, _⟩,
have : b n ≤ δ := Sup_le begin
simp only [and_imp, set.mem_image, set.mem_set_of_eq, exists_imp_distrib, prod.exists],
intros d p q hp hq hd,
rw ← hd,
exact le_of_lt (hN p q (le_trans hn hp) (le_trans hn hq))
end,
simpa using lt_of_le_of_lt this δlt },
-- Conclude
exact ⟨b, ⟨C, D⟩⟩
end,
begin
rintros ⟨b, ⟨b_bound, b_lim⟩⟩,
/-b : ℕ → ℝ, b_bound : ∀ (n m N : ℕ), N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
b_lim : tendsto b at_top (𝓝 0)-/
refine emetric.cauchy_seq_iff.2 (λε εpos, _),
have : ∀ᶠ n in at_top, b n < ε := (tendsto_order.1 b_lim ).2 _ εpos,
rcases filter.mem_at_top_sets.1 this with ⟨N, hN⟩,
exact ⟨N, λm n hm hn, calc
edist (s m) (s n) ≤ b N : b_bound m n N hm hn
... < ε : (hN _ (le_refl N)) ⟩
end⟩
lemma continuous_of_le_add_edist {f : α → ℝ≥0∞} (C : ℝ≥0∞)
(hC : C ≠ ⊤) (h : ∀x y, f x ≤ f y + C * edist x y) : continuous f :=
begin
rcases eq_or_ne C 0 with (rfl|C0),
{ simp only [zero_mul, add_zero] at h,
exact continuous_of_const (λ x y, le_antisymm (h _ _) (h _ _)) },
{ refine continuous_iff_continuous_at.2 (λ x, _),
by_cases hx : f x = ∞,
{ have : f =ᶠ[𝓝 x] (λ _, ∞),
{ filter_upwards [emetric.ball_mem_nhds x ennreal.coe_lt_top],
refine λ y (hy : edist y x < ⊤), _, rw edist_comm at hy,
simpa [hx, hC, hy.ne] using h x y },
exact this.continuous_at },
{ refine (ennreal.tendsto_nhds hx).2 (λ ε (ε0 : 0 < ε), _),
filter_upwards [emetric.closed_ball_mem_nhds x (ennreal.div_pos_iff.2 ⟨ε0.ne', hC⟩)],
have hεC : C * (ε / C) = ε := ennreal.mul_div_cancel' C0 hC,
refine λ y (hy : edist y x ≤ ε / C), ⟨sub_le_iff_right.2 _, _⟩,
{ rw edist_comm at hy,
calc f x ≤ f y + C * edist x y : h x y
... ≤ f y + C * (ε / C) : add_le_add_left (mul_le_mul_left' hy C) (f y)
... = f y + ε : by rw hεC },
{ calc f y ≤ f x + C * edist y x : h y x
... ≤ f x + C * (ε / C) : add_le_add_left (mul_le_mul_left' hy C) (f x)
... = f x + ε : by rw hεC } } }
end
theorem continuous_edist : continuous (λp:α×α, edist p.1 p.2) :=
begin
apply continuous_of_le_add_edist 2 (by norm_num),
rintros ⟨x, y⟩ ⟨x', y'⟩,
calc edist x y ≤ edist x x' + edist x' y' + edist y' y : edist_triangle4 _ _ _ _
... = edist x' y' + (edist x x' + edist y y') : by simp [edist_comm]; cc
... ≤ edist x' y' + (edist (x, y) (x', y') + edist (x, y) (x', y')) :
add_le_add_left (add_le_add (le_max_left _ _) (le_max_right _ _)) _
... = edist x' y' + 2 * edist (x, y) (x', y') : by rw [← mul_two, mul_comm]
end
@[continuity] theorem continuous.edist [topological_space β] {f g : β → α}
(hf : continuous f) (hg : continuous g) : continuous (λb, edist (f b) (g b)) :=
continuous_edist.comp (hf.prod_mk hg : _)
theorem filter.tendsto.edist {f g : β → α} {x : filter β} {a b : α}
(hf : tendsto f x (𝓝 a)) (hg : tendsto g x (𝓝 b)) :
tendsto (λx, edist (f x) (g x)) x (𝓝 (edist a b)) :=
(continuous_edist.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
lemma cauchy_seq_of_edist_le_of_tsum_ne_top {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n) (hd : tsum d ≠ ∞) :
cauchy_seq f :=
begin
lift d to (ℕ → nnreal) using (λ i, ennreal.ne_top_of_tsum_ne_top hd i),
rw ennreal.tsum_coe_ne_top_iff_summable at hd,
exact cauchy_seq_of_edist_le_of_summable d hf hd
end
lemma emetric.is_closed_ball {a : α} {r : ℝ≥0∞} : is_closed (closed_ball a r) :=
is_closed_le (continuous_id.edist continuous_const) continuous_const
@[simp] lemma emetric.diam_closure (s : set α) : diam (closure s) = diam s :=
begin
refine le_antisymm (diam_le $ λ x hx y hy, _) (diam_mono subset_closure),
have : edist x y ∈ closure (Iic (diam s)),
from map_mem_closure2 (@continuous_edist α _) hx hy (λ _ _, edist_le_diam_of_mem),
rwa closure_Iic at this
end
@[simp] lemma metric.diam_closure {α : Type*} [pseudo_metric_space α] (s : set α) :
metric.diam (closure s) = diam s :=
by simp only [metric.diam, emetric.diam_closure]
lemma is_closed_set_of_lipschitz_on_with {α β} [pseudo_emetric_space α] [pseudo_emetric_space β]
(K : ℝ≥0) (s : set α) :
is_closed {f : α → β | lipschitz_on_with K f s} :=
begin
simp only [lipschitz_on_with, set_of_forall],
refine is_closed_bInter (λ x hx, is_closed_bInter $ λ y hy, is_closed_le _ _),
exacts [continuous.edist (continuous_apply x) (continuous_apply y), continuous_const]
end
lemma is_closed_set_of_lipschitz_with {α β} [pseudo_emetric_space α] [pseudo_emetric_space β]
(K : ℝ≥0) :
is_closed {f : α → β | lipschitz_with K f} :=
by simp only [← lipschitz_on_univ, is_closed_set_of_lipschitz_on_with]
namespace real
/-- For a bounded set `s : set ℝ`, its `emetric.diam` is equal to `Sup s - Inf s` reinterpreted as
`ℝ≥0∞`. -/
lemma ediam_eq {s : set ℝ} (h : bounded s) :
emetric.diam s = ennreal.of_real (Sup s - Inf s) :=
begin
rcases eq_empty_or_nonempty s with rfl|hne, { simp },
refine le_antisymm (metric.ediam_le_of_forall_dist_le $ λ x hx y hy, _) _,
{ have := real.subset_Icc_Inf_Sup_of_bounded h,
exact real.dist_le_of_mem_Icc (this hx) (this hy) },
{ apply ennreal.of_real_le_of_le_to_real,
rw [← metric.diam, ← metric.diam_closure],
have h' := real.bounded_iff_bdd_below_bdd_above.1 h,
calc Sup s - Inf s ≤ dist (Sup s) (Inf s) : le_abs_self _
... ≤ diam (closure s) :
dist_le_diam_of_mem h.closure (cSup_mem_closure hne h'.2) (cInf_mem_closure hne h'.1) }
end
/-- For a bounded set `s : set ℝ`, its `metric.diam` is equal to `Sup s - Inf s`. -/
lemma diam_eq {s : set ℝ} (h : bounded s) : metric.diam s = Sup s - Inf s :=
begin
rw [metric.diam, real.ediam_eq h, ennreal.to_real_of_real],
rw real.bounded_iff_bdd_below_bdd_above at h,
exact sub_nonneg.2 (real.Inf_le_Sup s h.1 h.2)
end
@[simp] lemma ediam_Ioo (a b : ℝ) :
emetric.diam (Ioo a b) = ennreal.of_real (b - a) :=
begin
rcases le_or_lt b a with h|h,
{ simp [h] },
{ rw [real.ediam_eq (bounded_Ioo _ _), cSup_Ioo h, cInf_Ioo h] },
end
@[simp] lemma ediam_Icc (a b : ℝ) :
emetric.diam (Icc a b) = ennreal.of_real (b - a) :=
begin
rcases le_or_lt a b with h|h,
{ rw [real.ediam_eq (bounded_Icc _ _), cSup_Icc h, cInf_Icc h] },
{ simp [h, h.le] }
end
@[simp] lemma ediam_Ico (a b : ℝ) :
emetric.diam (Ico a b) = ennreal.of_real (b - a) :=
le_antisymm (ediam_Icc a b ▸ diam_mono Ico_subset_Icc_self)
(ediam_Ioo a b ▸ diam_mono Ioo_subset_Ico_self)
@[simp] lemma ediam_Ioc (a b : ℝ) :
emetric.diam (Ioc a b) = ennreal.of_real (b - a) :=
le_antisymm (ediam_Icc a b ▸ diam_mono Ioc_subset_Icc_self)
(ediam_Ioo a b ▸ diam_mono Ioo_subset_Ioc_self)
end real
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ℝ≥0∞`,
then the distance from `f n` to the limit is bounded by `∑'_{k=n}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) (n : ℕ) :
edist (f n) a ≤ ∑' m, d (n + m) :=
begin
refine le_of_tendsto (tendsto_const_nhds.edist ha)
(mem_at_top_sets.2 ⟨n, λ m hnm, _⟩),
refine le_trans (edist_le_Ico_sum_of_edist_le hnm (λ k _ _, hf k)) _,
rw [finset.sum_Ico_eq_sum_range],
exact sum_le_tsum _ (λ _ _, zero_le _) ennreal.summable
end
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ℝ≥0∞`,
then the distance from `f 0` to the limit is bounded by `∑'_{k=0}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto₀ {f : ℕ → α} (d : ℕ → ℝ≥0∞)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) :
edist (f 0) a ≤ ∑' m, d m :=
by simpa using edist_le_tsum_of_edist_le_of_tendsto d hf ha 0
end --section
|
9141d8f2b94384b236ea7991f921b340ff228b81
|
ff5230333a701471f46c57e8c115a073ebaaa448
|
/library/init/meta/smt/smt_tactic.lean
|
ad179c959e4536ea985062c4db76cb754bba1a47
|
[
"Apache-2.0"
] |
permissive
|
stanford-cs242/lean
|
f81721d2b5d00bc175f2e58c57b710d465e6c858
|
7bd861261f4a37326dcf8d7a17f1f1f330e4548c
|
refs/heads/master
| 1,600,957,431,849
| 1,576,465,093,000
| 1,576,465,093,000
| 225,779,423
| 0
| 3
|
Apache-2.0
| 1,575,433,936,000
| 1,575,433,935,000
| null |
UTF-8
|
Lean
| false
| false
| 15,126
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.category
import init.meta.simp_tactic
import init.meta.smt.congruence_closure
import init.meta.smt.ematch
universe u
run_cmd mk_simp_attr `pre_smt
run_cmd mk_hinst_lemma_attr_set `ematch [] [`ematch_lhs]
/--
Configuration for the smt tactic preprocessor. The preprocessor
is applied whenever a new hypothesis is introduced.
- simp_attr: is the attribute name for the simplification lemmas
that are used during the preprocessing step.
- max_steps: it is the maximum number of steps performed by the simplifier.
- zeta: if tt, then zeta reduction (i.e., unfolding let-expressions)
is used during preprocessing.
-/
structure smt_pre_config :=
(simp_attr : name := `pre_smt)
(max_steps : nat := 1000000)
(zeta : bool := ff)
/--
Configuration for the smt_state object.
- em_attr: is the attribute name for the hinst_lemmas
that are used for ematching -/
structure smt_config :=
(cc_cfg : cc_config := {})
(em_cfg : ematch_config := {})
(pre_cfg : smt_pre_config := {})
(em_attr : name := `ematch)
meta def smt_config.set_classical (c : smt_config) (b : bool) : smt_config :=
{ cc_cfg := { em := b, ..c.cc_cfg }, ..c }
meta constant smt_goal : Type
meta def smt_state :=
list smt_goal
meta constant smt_state.mk : smt_config → tactic smt_state
meta constant smt_state.to_format : smt_state → tactic_state → format
/-- Return tt iff classical excluded middle was enabled at smt_state.mk -/
meta constant smt_state.classical : smt_state → bool
meta def smt_tactic :=
state_t smt_state tactic
meta instance : has_append smt_state :=
list.has_append
section
local attribute [reducible] smt_tactic
meta instance : monad smt_tactic := by apply_instance
meta instance : alternative smt_tactic := by apply_instance
meta instance : monad_state smt_state smt_tactic := by apply_instance
end
/- We don't use the default state_t lift operation because only
tactics that do not change hypotheses can be automatically lifted to smt_tactic. -/
meta constant tactic_to_smt_tactic (α : Type) : tactic α → smt_tactic α
meta instance : has_monad_lift tactic smt_tactic :=
⟨tactic_to_smt_tactic⟩
meta instance (α : Type) : has_coe (tactic α) (smt_tactic α) :=
⟨monad_lift⟩
meta instance : monad_fail smt_tactic :=
{ fail := λ α s, (tactic.fail (to_fmt s) : smt_tactic α), ..smt_tactic.monad }
namespace smt_tactic
open tactic (transparency)
meta constant intros : smt_tactic unit
meta constant intron : nat → smt_tactic unit
meta constant intro_lst : list name → smt_tactic unit
/--
Try to close main goal by using equalities implied by the congruence
closure module.
-/
meta constant close : smt_tactic unit
/--
Produce new facts using heuristic lemma instantiation based on E-matching.
This tactic tries to match patterns from lemmas in the main goal with terms
in the main goal. The set of lemmas is populated with theorems
tagged with the attribute specified at smt_config.em_attr, and lemmas
added using tactics such as `smt_tactic.add_lemmas`.
The current set of lemmas can be retrieved using the tactic `smt_tactic.get_lemmas`.
Remark: the given predicate is applied to every new instance. The instance
is only added to the state if the predicate returns tt.
-/
meta constant ematch_core : (expr → bool) → smt_tactic unit
/--
Produce new facts using heuristic lemma instantiation based on E-matching.
This tactic tries to match patterns from the given lemmas with terms in
the main goal.
-/
meta constant ematch_using : hinst_lemmas → smt_tactic unit
meta constant mk_ematch_eqn_lemmas_for_core : transparency → name → smt_tactic hinst_lemmas
meta constant to_cc_state : smt_tactic cc_state
meta constant to_em_state : smt_tactic ematch_state
meta constant get_config : smt_tactic smt_config
/--
Preprocess the given term using the same simplifications rules used when
we introduce a new hypothesis. The result is pair containing the resulting
term and a proof that it is equal to the given one.
-/
meta constant preprocess : expr → smt_tactic (expr × expr)
meta constant get_lemmas : smt_tactic hinst_lemmas
meta constant set_lemmas : hinst_lemmas → smt_tactic unit
meta constant add_lemmas : hinst_lemmas → smt_tactic unit
meta def add_ematch_lemma_core (md : transparency) (as_simp : bool) (e : expr) : smt_tactic unit :=
do h ← hinst_lemma.mk_core md e as_simp,
add_lemmas (mk_hinst_singleton h)
meta def add_ematch_lemma_from_decl_core (md : transparency) (as_simp : bool) (n : name) : smt_tactic unit :=
do h ← hinst_lemma.mk_from_decl_core md n as_simp,
add_lemmas (mk_hinst_singleton h)
meta def add_ematch_eqn_lemmas_for_core (md : transparency) (n : name) : smt_tactic unit :=
do hs ← mk_ematch_eqn_lemmas_for_core md n,
add_lemmas hs
meta def ematch : smt_tactic unit :=
ematch_core (λ _, tt)
meta def failed {α} : smt_tactic α :=
tactic.failed
meta def fail {α : Type} {β : Type u} [has_to_format β] (msg : β) : smt_tactic α :=
tactic.fail msg
meta def try {α : Type} (t : smt_tactic α) : smt_tactic unit :=
⟨λ ss ts, result.cases_on (t.run ss ts)
(λ ⟨a, new_ss⟩, result.success ((), new_ss))
(λ e ref s', result.success ((), ss) ts)⟩
/-- `iterate_at_most n t`: repeat the given tactic at most n times or until t fails -/
meta def iterate_at_most : nat → smt_tactic unit → smt_tactic unit
| 0 t := return ()
| (n+1) t := (do t, iterate_at_most n t) <|> return ()
/-- `iterate_exactly n t` : execute t n times -/
meta def iterate_exactly : nat → smt_tactic unit → smt_tactic unit
| 0 t := return ()
| (n+1) t := do t, iterate_exactly n t
meta def iterate : smt_tactic unit → smt_tactic unit :=
iterate_at_most 100000
meta def eblast : smt_tactic unit :=
iterate (ematch >> try close)
open tactic
protected meta def read : smt_tactic (smt_state × tactic_state) :=
do s₁ ← get,
s₂ ← tactic.read,
return (s₁, s₂)
protected meta def write : smt_state × tactic_state → smt_tactic unit :=
λ ⟨ss, ts⟩, ⟨λ _ _, result.success ((), ss) ts⟩
private meta def mk_smt_goals_for (cfg : smt_config) : list expr → list smt_goal → list expr
→ tactic (list smt_goal × list expr)
| [] sr tr := return (sr.reverse, tr.reverse)
| (tg::tgs) sr tr := do
tactic.set_goals [tg],
[new_sg] ← smt_state.mk cfg | tactic.failed,
[new_tg] ← get_goals | tactic.failed,
mk_smt_goals_for tgs (new_sg::sr) (new_tg::tr)
/- See slift -/
meta def slift_aux {α : Type} (t : tactic α) (cfg : smt_config) : smt_tactic α :=
⟨λ ss, do
_::sgs ← return ss | tactic.fail "slift tactic failed, there no smt goals to be solved",
tg::tgs ← tactic.get_goals | tactic.failed,
tactic.set_goals [tg], a ← t,
new_tgs ← tactic.get_goals,
(new_sgs, new_tgs) ← mk_smt_goals_for cfg new_tgs [] [],
tactic.set_goals (new_tgs ++ tgs),
return (a, new_sgs ++ sgs)⟩
/--
This lift operation will restart the SMT state.
It is useful for using tactics that change the set of hypotheses. -/
meta def slift {α : Type} (t : tactic α) : smt_tactic α :=
get_config >>= slift_aux t
meta def trace_state : smt_tactic unit :=
do (s₁, s₂) ← smt_tactic.read,
trace (smt_state.to_format s₁ s₂)
meta def trace {α : Type} [has_to_tactic_format α] (a : α) : smt_tactic unit :=
tactic.trace a
meta def to_expr (q : pexpr) (allow_mvars := tt) : smt_tactic expr :=
tactic.to_expr q allow_mvars
meta def classical : smt_tactic bool :=
do s ← get,
return s.classical
meta def num_goals : smt_tactic nat :=
list.length <$> get
/- Low level primitives for managing set of goals -/
meta def get_goals : smt_tactic (list smt_goal × list expr) :=
do (g₁, _) ← smt_tactic.read,
g₂ ← tactic.get_goals,
return (g₁, g₂)
meta def set_goals : list smt_goal → list expr → smt_tactic unit :=
λ g₁ g₂, ⟨λ ss, tactic.set_goals g₂ >> return ((), g₁)⟩
private meta def all_goals_core (tac : smt_tactic unit) : list smt_goal → list expr → list smt_goal → list expr → smt_tactic unit
| [] ts acs act := set_goals acs (ts ++ act)
| (s :: ss) [] acs act := fail "ill-formed smt_state"
| (s :: ss) (t :: ts) acs act :=
do set_goals [s] [t],
tac,
(new_ss, new_ts) ← get_goals,
all_goals_core ss ts (acs ++ new_ss) (act ++ new_ts)
/-- Apply the given tactic to all goals. -/
meta def all_goals (tac : smt_tactic unit) : smt_tactic unit :=
do (ss, ts) ← get_goals,
all_goals_core tac ss ts [] []
/-- LCF-style AND_THEN tactic. It applies tac1, and if succeed applies tac2 to each subgoal produced by tac1 -/
meta def seq (tac1 : smt_tactic unit) (tac2 : smt_tactic unit) : smt_tactic unit :=
do (s::ss, t::ts) ← get_goals,
set_goals [s] [t],
tac1, all_goals tac2,
(new_ss, new_ts) ← get_goals,
set_goals (new_ss ++ ss) (new_ts ++ ts)
meta instance : has_andthen (smt_tactic unit) (smt_tactic unit) (smt_tactic unit) :=
⟨seq⟩
meta def focus1 {α} (tac : smt_tactic α) : smt_tactic α :=
do (s::ss, t::ts) ← get_goals,
match ss with
| [] := tac
| _ := do
set_goals [s] [t],
a ← tac,
(ss', ts') ← get_goals,
set_goals (ss' ++ ss) (ts' ++ ts),
return a
end
meta def solve1 (tac : smt_tactic unit) : smt_tactic unit :=
do (ss, gs) ← get_goals,
match ss, gs with
| [], _ := fail "solve1 tactic failed, there isn't any goal left to focus"
| _, [] := fail "solve1 tactic failed, there isn't any smt goal left to focus"
| s::ss, g::gs :=
do set_goals [s] [g],
tac,
(ss', gs') ← get_goals,
match ss', gs' with
| [], [] := set_goals ss gs
| _, _ := fail "solve1 tactic failed, focused goal has not been solved"
end
end
meta def swap : smt_tactic unit :=
do (ss, ts) ← get_goals,
match ss, ts with
| (s₁ :: s₂ :: ss), (t₁ :: t₂ :: ts) := set_goals (s₂ :: s₁ :: ss) (t₂ :: t₁ :: ts)
| _, _ := failed
end
/-- Add a new goal for t, and the hypothesis (h : t) in the current goal. -/
meta def assert (h : name) (t : expr) : smt_tactic unit :=
tactic.assert_core h t >> swap >> intros >> swap >> try close
/-- Add the hypothesis (h : t) in the current goal if v has type t. -/
meta def assertv (h : name) (t : expr) (v : expr) : smt_tactic unit :=
tactic.assertv_core h t v >> intros >> return ()
/-- Add a new goal for t, and the hypothesis (h : t := ?M) in the current goal. -/
meta def define (h : name) (t : expr) : smt_tactic unit :=
tactic.define_core h t >> swap >> intros >> swap >> try close
/-- Add the hypothesis (h : t := v) in the current goal if v has type t. -/
meta def definev (h : name) (t : expr) (v : expr) : smt_tactic unit :=
tactic.definev_core h t v >> intros >> return ()
/-- Add (h : t := pr) to the current goal -/
meta def pose (h : name) (t : option expr := none) (pr : expr) : smt_tactic unit :=
match t with
| none := do t ← infer_type pr, definev h t pr
| some t := definev h t pr
end
/-- Add (h : t) to the current goal, given a proof (pr : t) -/
meta def note (h : name) (t : option expr := none) (pr : expr) : smt_tactic unit :=
match t with
| none := do t ← infer_type pr, assertv h t pr
| some t := assertv h t pr
end
meta def destruct (e : expr) : smt_tactic unit :=
smt_tactic.seq (tactic.destruct e) smt_tactic.intros
meta def by_cases (e : expr) : smt_tactic unit :=
do c ← classical,
if c then
destruct (expr.app (expr.const `classical.em []) e)
else do
dec_e ← (mk_app `decidable [e] <|> fail "by_cases smt_tactic failed, type is not a proposition"),
inst ← (mk_instance dec_e <|> fail "by_cases smt_tactic failed, type of given expression is not decidable"),
em ← mk_app `decidable.em [e, inst],
destruct em
meta def by_contradiction : smt_tactic unit :=
do t ← target,
c ← classical,
if t.is_false then skip
else if c then do
apply (expr.app (expr.const `classical.by_contradiction []) t),
intros
else do
dec_t ← (mk_app `decidable [t] <|> fail "by_contradiction smt_tactic failed, target is not a proposition"),
inst ← (mk_instance dec_t <|> fail "by_contradiction smt_tactic failed, target is not decidable"),
a ← mk_mapp `decidable.by_contradiction [some t, some inst],
apply a,
intros
/-- Return a proof for e, if 'e' is a known fact in the main goal. -/
meta def proof_for (e : expr) : smt_tactic expr :=
do cc ← to_cc_state, cc.proof_for e
/-- Return a refutation for e (i.e., a proof for (not e)), if 'e' has been refuted in the main goal. -/
meta def refutation_for (e : expr) : smt_tactic expr :=
do cc ← to_cc_state, cc.refutation_for e
meta def get_facts : smt_tactic (list expr) :=
do cc ← to_cc_state,
return $ cc.eqc_of expr.mk_true
meta def get_refuted_facts : smt_tactic (list expr) :=
do cc ← to_cc_state,
return $ cc.eqc_of expr.mk_false
meta def add_ematch_lemma : expr → smt_tactic unit :=
add_ematch_lemma_core reducible ff
meta def add_ematch_lhs_lemma : expr → smt_tactic unit :=
add_ematch_lemma_core reducible tt
meta def add_ematch_lemma_from_decl : name → smt_tactic unit :=
add_ematch_lemma_from_decl_core reducible ff
meta def add_ematch_lhs_lemma_from_decl : name → smt_tactic unit :=
add_ematch_lemma_from_decl_core reducible ff
meta def add_ematch_eqn_lemmas_for : name → smt_tactic unit :=
add_ematch_eqn_lemmas_for_core reducible
meta def add_lemmas_from_facts_core : list expr → smt_tactic unit
| [] := return ()
| (f::fs) := do
try (is_prop f >> guard (f.is_pi && bnot (f.is_arrow)) >> proof_for f >>= add_ematch_lemma_core reducible ff),
add_lemmas_from_facts_core fs
meta def add_lemmas_from_facts : smt_tactic unit :=
get_facts >>= add_lemmas_from_facts_core
meta def induction (e : expr) (ids : list name := []) (rec : option name := none) : smt_tactic unit :=
slift (tactic.induction e ids rec >> return ()) -- pass on the information?
meta def when (c : Prop) [decidable c] (tac : smt_tactic unit) : smt_tactic unit :=
if c then tac else skip
meta def when_tracing (n : name) (tac : smt_tactic unit) : smt_tactic unit :=
when (is_trace_enabled_for n = tt) tac
end smt_tactic
open smt_tactic
meta def using_smt {α} (t : smt_tactic α) (cfg : smt_config := {}) : tactic α :=
do ss ← smt_state.mk cfg,
(a, _) ← (do a ← t, iterate close, return a).run ss,
return a
meta def using_smt_with {α} (cfg : smt_config) (t : smt_tactic α) : tactic α :=
using_smt t cfg
|
04bee2bacb953f3edec6ca0ad6f25949b8edffdc
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/src/Lean/Elab/Structure.lean
|
f776f61f4a4fe3a2ff653749a44d44248e34fd11
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 46,061
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Class
import Lean.Parser.Command
import Lean.Meta.Closure
import Lean.Meta.SizeOf
import Lean.Meta.Injective
import Lean.Meta.Structure
import Lean.Meta.AppBuilder
import Lean.Elab.Command
import Lean.Elab.DeclModifiers
import Lean.Elab.DeclUtil
import Lean.Elab.Inductive
import Lean.Elab.DeclarationRange
import Lean.Elab.Binders
namespace Lean.Elab.Command
open Meta
open TSyntax.Compat
/-! Recall that the `structure command syntax is
```
leading_parser (structureTk <|> classTk) >> declId >> many Term.bracketedBinder >> optional «extends» >> Term.optType >> optional (" := " >> optional structCtor >> structFields)
```
-/
structure StructCtorView where
ref : Syntax
modifiers : Modifiers
name : Name
declName : Name
structure StructFieldView where
ref : Syntax
modifiers : Modifiers
binderInfo : BinderInfo
declName : Name
name : Name -- The field name as it is going to be registered in the kernel. It does not include macroscopes.
rawName : Name -- Same as `name` but including macroscopes.
binders : Syntax
type? : Option Syntax
value? : Option Syntax
structure StructView where
ref : Syntax
modifiers : Modifiers
scopeLevelNames : List Name -- All `universe` declarations in the current scope
allUserLevelNames : List Name -- `scopeLevelNames` ++ explicit universe parameters provided in the `structure` command
isClass : Bool
declName : Name
scopeVars : Array Expr -- All `variable` declaration in the current scope
params : Array Expr -- Explicit parameters provided in the `structure` command
parents : Array Syntax
type : Syntax
ctor : StructCtorView
fields : Array StructFieldView
inductive StructFieldKind where
| newField | copiedField | fromParent | subobject
deriving Inhabited, DecidableEq, Repr
structure StructFieldInfo where
name : Name
declName : Name -- Remark: for `fromParent` fields, `declName` is only relevant in the generation of auxiliary "default value" functions.
fvar : Expr
kind : StructFieldKind
value? : Option Expr := none
deriving Inhabited, Repr
def StructFieldInfo.isFromParent (info : StructFieldInfo) : Bool :=
match info.kind with
| StructFieldKind.fromParent => true
| _ => false
def StructFieldInfo.isSubobject (info : StructFieldInfo) : Bool :=
match info.kind with
| StructFieldKind.subobject => true
| _ => false
structure ElabStructResult where
decl : Declaration
projInfos : List ProjectionInfo
projInstances : List Name -- projections (to parent classes) that must be marked as instances.
mctx : MetavarContext
lctx : LocalContext
localInsts : LocalInstances
defaultAuxDecls : Array (Name × Expr × Expr)
private def defaultCtorName := `mk
/-
The structure constructor syntax is
```
leading_parser try (declModifiers >> ident >> " :: ")
```
-/
private def expandCtor (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM StructCtorView := do
let useDefault := do
let declName := structDeclName ++ defaultCtorName
addAuxDeclarationRanges declName structStx[2] structStx[2]
pure { ref := structStx, modifiers := {}, name := defaultCtorName, declName }
if structStx[5].isNone then
useDefault
else
let optCtor := structStx[5][1]
if optCtor.isNone then
useDefault
else
let ctor := optCtor[0]
withRef ctor do
let ctorModifiers ← elabModifiers ctor[0]
checkValidCtorModifier ctorModifiers
if ctorModifiers.isPrivate && structModifiers.isPrivate then
throwError "invalid 'private' constructor in a 'private' structure"
if ctorModifiers.isProtected && structModifiers.isPrivate then
throwError "invalid 'protected' constructor in a 'private' structure"
let name := ctor[1].getId
let declName := structDeclName ++ name
let declName ← applyVisibility ctorModifiers.visibility declName
addDocString' declName ctorModifiers.docString?
addAuxDeclarationRanges declName ctor[1] ctor[1]
pure { ref := ctor, name, modifiers := ctorModifiers, declName }
def checkValidFieldModifier (modifiers : Modifiers) : TermElabM Unit := do
if modifiers.isNoncomputable then
throwError "invalid use of 'noncomputable' in field declaration"
if modifiers.isPartial then
throwError "invalid use of 'partial' in field declaration"
if modifiers.isUnsafe then
throwError "invalid use of 'unsafe' in field declaration"
if modifiers.attrs.size != 0 then
throwError "invalid use of attributes in field declaration"
/-
```
def structExplicitBinder := leading_parser atomic (declModifiers true >> "(") >> many1 ident >> optDeclSig >> optional (Term.binderTactic <|> Term.binderDefault) >> ")"
def structImplicitBinder := leading_parser atomic (declModifiers true >> "{") >> many1 ident >> declSig >> "}"
def structInstBinder := leading_parser atomic (declModifiers true >> "[") >> many1 ident >> declSig >> "]"
def structSimpleBinder := leading_parser atomic (declModifiers true >> ident) >> optDeclSig >> optional (Term.binderTactic <|> Term.binderDefault)
def structFields := leading_parser many (structExplicitBinder <|> structImplicitBinder <|> structInstBinder)
```
-/
private def expandFields (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM (Array StructFieldView) :=
let fieldBinders := if structStx[5].isNone then #[] else structStx[5][2][0].getArgs
fieldBinders.foldlM (init := #[]) fun (views : Array StructFieldView) fieldBinder => withRef fieldBinder do
let mut fieldBinder := fieldBinder
if fieldBinder.getKind == ``Parser.Command.structSimpleBinder then
fieldBinder := mkNode ``Parser.Command.structExplicitBinder
#[ fieldBinder[0], mkAtomFrom fieldBinder "(", mkNullNode #[ fieldBinder[1] ], fieldBinder[2], fieldBinder[3], fieldBinder[4], mkAtomFrom fieldBinder ")" ]
let k := fieldBinder.getKind
let binfo ←
if k == ``Parser.Command.structExplicitBinder then pure BinderInfo.default
else if k == ``Parser.Command.structImplicitBinder then pure BinderInfo.implicit
else if k == ``Parser.Command.structInstBinder then pure BinderInfo.instImplicit
else throwError "unexpected kind of structure field"
let fieldModifiers ← elabModifiers fieldBinder[0]
checkValidFieldModifier fieldModifiers
if fieldModifiers.isPrivate && structModifiers.isPrivate then
throwError "invalid 'private' field in a 'private' structure"
if fieldModifiers.isProtected && structModifiers.isPrivate then
throwError "invalid 'protected' field in a 'private' structure"
let (binders, type?) ←
if binfo == BinderInfo.default then
let (binders, type?) := expandOptDeclSig fieldBinder[3]
let optBinderTacticDefault := fieldBinder[4]
if optBinderTacticDefault.isNone then
pure (binders, type?)
else if optBinderTacticDefault[0].getKind != ``Parser.Term.binderTactic then
pure (binders, type?)
else
let binderTactic := optBinderTacticDefault[0]
match type? with
| none => throwErrorAt binderTactic "invalid field declaration, type must be provided when auto-param (tactic) is used"
| some type =>
let tac := binderTactic[2]
let name ← Term.declareTacticSyntax tac
-- The tactic should be for binders+type.
-- It is safe to reset the binders to a "null" node since there is no value to be elaborated
let type ← `(forall $(binders.getArgs):bracketedBinder*, $type)
let type ← `(autoParam $type $(mkIdentFrom tac name))
pure (mkNullNode, some type.raw)
else
let (binders, type) := expandDeclSig fieldBinder[3]
pure (binders, some type)
let value? ← if binfo != BinderInfo.default then
pure none
else
let optBinderTacticDefault := fieldBinder[4]
-- trace[Elab.struct] ">>> {optBinderTacticDefault}"
if optBinderTacticDefault.isNone then
pure none
else if optBinderTacticDefault[0].getKind == ``Parser.Term.binderTactic then
pure none
else
-- binderDefault := leading_parser " := " >> termParser
pure (some optBinderTacticDefault[0][1])
let idents := fieldBinder[2].getArgs
idents.foldlM (init := views) fun (views : Array StructFieldView) ident => withRef ident do
let rawName := ident.getId
let name := rawName.eraseMacroScopes
unless name.isAtomic do
throwErrorAt ident "invalid field name '{name.eraseMacroScopes}', field names must be atomic"
let declName := structDeclName ++ name
let declName ← applyVisibility fieldModifiers.visibility declName
addDocString' declName fieldModifiers.docString?
return views.push {
ref := ident
modifiers := fieldModifiers
binderInfo := binfo
declName
name
rawName
binders
type?
value?
}
private def validStructType (type : Expr) : Bool :=
match type with
| Expr.sort .. => true
| _ => false
private def findFieldInfo? (infos : Array StructFieldInfo) (fieldName : Name) : Option StructFieldInfo :=
infos.find? fun info => info.name == fieldName
private def containsFieldName (infos : Array StructFieldInfo) (fieldName : Name) : Bool :=
(findFieldInfo? infos fieldName).isSome
private def updateFieldInfoVal (infos : Array StructFieldInfo) (fieldName : Name) (value : Expr) : Array StructFieldInfo :=
infos.map fun info =>
if info.name == fieldName then
{ info with value? := value }
else
info
register_builtin_option structureDiamondWarning : Bool := {
defValue := false
descr := "enable/disable warning messages for structure diamonds"
}
/-- Return `some fieldName` if field `fieldName` of the parent structure `parentStructName` is already in `infos` -/
private def findExistingField? (infos : Array StructFieldInfo) (parentStructName : Name) : CoreM (Option Name) := do
let fieldNames := getStructureFieldsFlattened (← getEnv) parentStructName
for fieldName in fieldNames do
if containsFieldName infos fieldName then
return some fieldName
return none
private partial def processSubfields (structDeclName : Name) (parentFVar : Expr) (parentStructName : Name) (subfieldNames : Array Name)
(infos : Array StructFieldInfo) (k : Array StructFieldInfo → TermElabM α) : TermElabM α :=
go 0 infos
where
go (i : Nat) (infos : Array StructFieldInfo) := do
if h : i < subfieldNames.size then
let subfieldName := subfieldNames.get ⟨i, h⟩
if containsFieldName infos subfieldName then
throwError "field '{subfieldName}' from '{parentStructName}' has already been declared"
let val ← mkProjection parentFVar subfieldName
let type ← inferType val
withLetDecl subfieldName type val fun subfieldFVar =>
/- The following `declName` is only used for creating the `_default` auxiliary declaration name when
its default value is overwritten in the structure. If the default value is not overwritten, then its value is irrelevant. -/
let declName := structDeclName ++ subfieldName
let infos := infos.push { name := subfieldName, declName, fvar := subfieldFVar, kind := StructFieldKind.fromParent }
go (i+1) infos
else
k infos
/-- Given `obj.foo.bar.baz`, return `obj`. -/
private partial def getNestedProjectionArg (e : Expr) : MetaM Expr := do
if let Expr.const subProjName .. := e.getAppFn then
if let some { numParams, .. } ← getProjectionFnInfo? subProjName then
if e.getAppNumArgs == numParams + 1 then
return ← getNestedProjectionArg e.appArg!
return e
/--
Get field type of `fieldName` in `parentType`, but replace references
to other fields of that structure by existing field fvars.
Auxiliary method for `copyNewFieldsFrom`.
-/
private def getFieldType (infos : Array StructFieldInfo) (parentType : Expr) (fieldName : Name) : MetaM Expr := do
withLocalDeclD (← mkFreshId) parentType fun parent => do
let proj ← mkProjection parent fieldName
let projType ← inferType proj
/- Eliminate occurrences of `parent.field`. This happens when the structure contains dependent fields.
If the copied parent extended another structure via a subobject,
then the occurrence can also look like `parent.toGrandparent.field`
(where `toGrandparent` is not a field of the current structure). -/
let visit (e : Expr) : MetaM TransformStep := do
if let Expr.const subProjName .. := e.getAppFn then
if let some { numParams, .. } ← getProjectionFnInfo? subProjName then
let Name.str _ subFieldName .. := subProjName
| throwError "invalid projection name {subProjName}"
let args := e.getAppArgs
if let some major := args.get? numParams then
if (← getNestedProjectionArg major) == parent then
if let some existingFieldInfo := findFieldInfo? infos subFieldName then
return TransformStep.done <| mkAppN existingFieldInfo.fvar args[numParams+1:args.size]
return TransformStep.done e
let projType ← Meta.transform projType (post := visit)
if projType.containsFVar parent.fvarId! then
throwError "unsupported dependent field in {fieldName} : {projType}"
if let some info := getFieldInfo? (← getEnv) (← getStructureName parentType) fieldName then
if let some autoParamExpr := info.autoParam? then
return (← mkAppM ``autoParam #[projType, autoParamExpr])
return projType
private def toVisibility (fieldInfo : StructureFieldInfo) : CoreM Visibility := do
if isProtected (← getEnv) fieldInfo.projFn then
return Visibility.protected
else if isPrivateName fieldInfo.projFn then
return Visibility.private
else
return Visibility.regular
abbrev FieldMap := NameMap Expr -- Map from field name to expression representing the field
/-- Reduce projetions of the structures in `structNames` -/
private def reduceProjs (e : Expr) (structNames : NameSet) : MetaM Expr :=
let reduce (e : Expr) : MetaM TransformStep := do
match (← reduceProjOf? e structNames.contains) with
| some v => return TransformStep.done v
| _ => return TransformStep.done e
transform e (post := reduce)
/--
Copy the default value for field `fieldName` set at structure `structName`.
The arguments for the `_default` auxiliary function are provided by `fieldMap`.
Recall some of the entries in `fieldMap` are constructor applications, and they needed
to be reduced using `reduceProjs`. Otherwise, the produced default value may be "cyclic".
That is, we reduce projections of the structures in `expandedStructNames`. Here is
an example that shows why the reduction is needed.
```
structure A where
a : Nat
structure B where
a : Nat
b : Nat
c : Nat
structure C extends B where
d : Nat
c := b + d
structure D extends A, C
#print D.c._default
```
Without the reduction, it produces
```
def D.c._default : A → Nat → Nat → Nat → Nat :=
fun toA b c d => id ({ a := toA.a, b := b, c := c : B }.b + d)
```
-/
private partial def copyDefaultValue? (fieldMap : FieldMap) (expandedStructNames : NameSet) (structName : Name) (fieldName : Name) : TermElabM (Option Expr) := do
match getDefaultFnForField? (← getEnv) structName fieldName with
| none => return none
| some defaultFn =>
let cinfo ← getConstInfo defaultFn
let us ← mkFreshLevelMVarsFor cinfo
go? (← instantiateValueLevelParams cinfo us)
where
failed : TermElabM (Option Expr) := do
logWarning s!"ignoring default value for field '{fieldName}' defined at '{structName}'"
return none
go? (e : Expr) : TermElabM (Option Expr) := do
match e with
| Expr.lam n d b c =>
if c.isExplicit then
match fieldMap.find? n with
| none => failed
| some val =>
let valType ← inferType val
if (← isDefEq valType d) then
go? (b.instantiate1 val)
else
failed
else
let arg ← mkFreshExprMVar d
go? (b.instantiate1 arg)
| e =>
let r := if e.isAppOfArity ``id 2 then e.appArg! else e
return some (← reduceProjs (← instantiateMVars r) expandedStructNames)
private partial def copyNewFieldsFrom (structDeclName : Name) (infos : Array StructFieldInfo) (parentType : Expr) (k : Array StructFieldInfo → TermElabM α) : TermElabM α := do
copyFields infos {} parentType fun infos _ _ => k infos
where
copyFields (infos : Array StructFieldInfo) (expandedStructNames : NameSet) (parentType : Expr) (k : Array StructFieldInfo → FieldMap → NameSet → TermElabM α) : TermElabM α := do
let parentStructName ← getStructureName parentType
let fieldNames := getStructureFields (← getEnv) parentStructName
let rec copy (i : Nat) (infos : Array StructFieldInfo) (fieldMap : FieldMap) (expandedStructNames : NameSet) : TermElabM α := do
if h : i < fieldNames.size then
let fieldName := fieldNames.get ⟨i, h⟩
let fieldType ← getFieldType infos parentType fieldName
match findFieldInfo? infos fieldName with
| some existingFieldInfo =>
let existingFieldType ← inferType existingFieldInfo.fvar
unless (← isDefEq fieldType existingFieldType) do
throwError "parent field type mismatch, field '{fieldName}' from parent '{parentStructName}' {← mkHasTypeButIsExpectedMsg fieldType existingFieldType}"
/- Remark: if structure has a default value for this field, it will be set at the `processOveriddenDefaultValues` below. -/
copy (i+1) infos (fieldMap.insert fieldName existingFieldInfo.fvar) expandedStructNames
| none =>
let some fieldInfo := getFieldInfo? (← getEnv) parentStructName fieldName | unreachable!
let addNewField : TermElabM α := do
let value? ← copyDefaultValue? fieldMap expandedStructNames parentStructName fieldName
withLocalDecl fieldName fieldInfo.binderInfo fieldType fun fieldFVar => do
let fieldDeclName := structDeclName ++ fieldName
let fieldDeclName ← applyVisibility (← toVisibility fieldInfo) fieldDeclName
addDocString' fieldDeclName (← findDocString? (← getEnv) fieldInfo.projFn)
let infos := infos.push { name := fieldName, declName := fieldDeclName, fvar := fieldFVar, value?,
kind := StructFieldKind.copiedField }
copy (i+1) infos (fieldMap.insert fieldName fieldFVar) expandedStructNames
if fieldInfo.subobject?.isSome then
let fieldParentStructName ← getStructureName fieldType
if (← findExistingField? infos fieldParentStructName).isSome then
-- See comment at `copyDefaultValue?`
let expandedStructNames := expandedStructNames.insert fieldParentStructName
copyFields infos expandedStructNames fieldType fun infos nestedFieldMap expandedStructNames => do
let fieldVal ← mkCompositeField fieldType nestedFieldMap
copy (i+1) infos (fieldMap.insert fieldName fieldVal) expandedStructNames
else
let subfieldNames := getStructureFieldsFlattened (← getEnv) fieldParentStructName
let fieldName := fieldInfo.fieldName
withLocalDecl fieldName fieldInfo.binderInfo fieldType fun parentFVar =>
let infos := infos.push { name := fieldName, declName := structDeclName ++ fieldName, fvar := parentFVar, kind := StructFieldKind.subobject }
processSubfields structDeclName parentFVar fieldParentStructName subfieldNames infos fun infos =>
copy (i+1) infos (fieldMap.insert fieldName parentFVar) expandedStructNames
else
addNewField
else
let infos ← processOveriddenDefaultValues infos fieldMap expandedStructNames parentStructName
k infos fieldMap expandedStructNames
copy 0 infos {} expandedStructNames
processOveriddenDefaultValues (infos : Array StructFieldInfo) (fieldMap : FieldMap) (expandedStructNames : NameSet) (parentStructName : Name) : TermElabM (Array StructFieldInfo) :=
infos.mapM fun info => do
match (← copyDefaultValue? fieldMap expandedStructNames parentStructName info.name) with
| some value => return { info with value? := value }
| none => return info
mkCompositeField (parentType : Expr) (fieldMap : FieldMap) : TermElabM Expr := do
let env ← getEnv
let Expr.const parentStructName us ← pure parentType.getAppFn | unreachable!
let parentCtor := getStructureCtor env parentStructName
let mut result := mkAppN (mkConst parentCtor.name us) parentType.getAppArgs
for fieldName in getStructureFields env parentStructName do
match fieldMap.find? fieldName with
| some val => result := mkApp result val
| none => throwError "failed to copy fields from parent structure{indentExpr parentType}" -- TODO improve error message
return result
private partial def mkToParentName (parentStructName : Name) (p : Name → Bool) : Name := Id.run do
let base := Name.mkSimple $ "to" ++ parentStructName.eraseMacroScopes.getString!
if p base then
base
else
let rec go (i : Nat) : Name :=
let curr := base.appendIndexAfter i
if p curr then curr else go (i+1)
go 1
private partial def withParents (view : StructView) (k : Array StructFieldInfo → Array Expr → TermElabM α) : TermElabM α := do
go 0 #[] #[]
where
go (i : Nat) (infos : Array StructFieldInfo) (copiedParents : Array Expr) : TermElabM α := do
if h : i < view.parents.size then
let parentStx := view.parents.get ⟨i, h⟩
withRef parentStx do
let parentType ← Term.elabType parentStx
let parentStructName ← getStructureName parentType
if let some existingFieldName ← findExistingField? infos parentStructName then
if structureDiamondWarning.get (← getOptions) then
logWarning s!"field '{existingFieldName}' from '{parentStructName}' has already been declared"
copyNewFieldsFrom view.declName infos parentType fun infos => go (i+1) infos (copiedParents.push parentType)
-- TODO: if `class`, then we need to create a let-decl that stores the local instance for the `parentStructure`
else
let env ← getEnv
let subfieldNames := getStructureFieldsFlattened env parentStructName
let toParentName := mkToParentName parentStructName fun n => !containsFieldName infos n && !subfieldNames.contains n
let binfo := if view.isClass && isClass env parentStructName then BinderInfo.instImplicit else BinderInfo.default
withLocalDecl toParentName binfo parentType fun parentFVar =>
let infos := infos.push { name := toParentName, declName := view.declName ++ toParentName, fvar := parentFVar, kind := StructFieldKind.subobject }
processSubfields view.declName parentFVar parentStructName subfieldNames infos fun infos => go (i+1) infos copiedParents
else
k infos copiedParents
private def elabFieldTypeValue (view : StructFieldView) : TermElabM (Option Expr × Option Expr) :=
Term.withAutoBoundImplicit <| Term.withAutoBoundImplicitForbiddenPred (fun n => view.name == n) <| Term.elabBinders view.binders.getArgs fun params => do
match view.type? with
| none =>
match view.value? with
| none => return (none, none)
| some valStx =>
Term.synthesizeSyntheticMVarsNoPostponing
-- TODO: add forbidden predicate using `shortDeclName` from `view`
let params ← Term.addAutoBoundImplicits params
let value ← Term.withoutAutoBoundImplicit <| Term.elabTerm valStx none
let value ← mkLambdaFVars params value
return (none, value)
| some typeStx =>
let type ← Term.elabType typeStx
Term.synthesizeSyntheticMVarsNoPostponing
let params ← Term.addAutoBoundImplicits params
match view.value? with
| none =>
let type ← mkForallFVars params type
return (type, none)
| some valStx =>
let value ← Term.withoutAutoBoundImplicit <| Term.elabTermEnsuringType valStx type
Term.synthesizeSyntheticMVarsNoPostponing
let type ← mkForallFVars params type
let value ← mkLambdaFVars params value
return (type, value)
private partial def withFields (views : Array StructFieldView) (infos : Array StructFieldInfo) (k : Array StructFieldInfo → TermElabM α) : TermElabM α := do
go 0 {} infos
where
go (i : Nat) (defaultValsOverridden : NameSet) (infos : Array StructFieldInfo) : TermElabM α := do
if h : i < views.size then
let view := views.get ⟨i, h⟩
withRef view.ref do
match findFieldInfo? infos view.name with
| none =>
let (type?, value?) ← elabFieldTypeValue view
match type?, value? with
| none, none => throwError "invalid field, type expected"
| some type, _ =>
withLocalDecl view.rawName view.binderInfo type fun fieldFVar =>
let infos := infos.push { name := view.name, declName := view.declName, fvar := fieldFVar, value? := value?,
kind := StructFieldKind.newField }
go (i+1) defaultValsOverridden infos
| none, some value =>
let type ← inferType value
withLocalDecl view.rawName view.binderInfo type fun fieldFVar =>
let infos := infos.push { name := view.name, declName := view.declName, fvar := fieldFVar, value? := value,
kind := StructFieldKind.newField }
go (i+1) defaultValsOverridden infos
| some info =>
let updateDefaultValue : TermElabM α := do
match view.value? with
| none => throwError "field '{view.name}' has been declared in parent structure"
| some valStx =>
if let some type := view.type? then
throwErrorAt type "omit field '{view.name}' type to set default value"
else
if defaultValsOverridden.contains info.name then
throwError "field '{view.name}' new default value has already been set"
let defaultValsOverridden := defaultValsOverridden.insert info.name
let mut valStx := valStx
if view.binders.getArgs.size > 0 then
valStx ← `(fun $(view.binders.getArgs)* => $valStx:term)
let fvarType ← inferType info.fvar
let value ← Term.elabTermEnsuringType valStx fvarType
pushInfoLeaf <| .ofFieldRedeclInfo { stx := view.ref }
let infos := updateFieldInfoVal infos info.name value
go (i+1) defaultValsOverridden infos
match info.kind with
| StructFieldKind.newField => throwError "field '{view.name}' has already been declared"
| StructFieldKind.subobject => throwError "unexpected subobject field reference" -- improve error message
| StructFieldKind.copiedField => updateDefaultValue
| StructFieldKind.fromParent => updateDefaultValue
else
k infos
private def getResultUniverse (type : Expr) : TermElabM Level := do
let type ← whnf type
match type with
| Expr.sort u => pure u
| _ => throwError "unexpected structure resulting type"
private def collectUsed (params : Array Expr) (fieldInfos : Array StructFieldInfo) : StateRefT CollectFVars.State MetaM Unit := do
params.forM fun p => do
let type ← inferType p
type.collectFVars
fieldInfos.forM fun info => do
let fvarType ← inferType info.fvar
fvarType.collectFVars
match info.value? with
| none => pure ()
| some value => value.collectFVars
private def removeUnused (scopeVars : Array Expr) (params : Array Expr) (fieldInfos : Array StructFieldInfo)
: TermElabM (LocalContext × LocalInstances × Array Expr) := do
let (_, used) ← (collectUsed params fieldInfos).run {}
Meta.removeUnused scopeVars used
private def withUsed {α} (scopeVars : Array Expr) (params : Array Expr) (fieldInfos : Array StructFieldInfo) (k : Array Expr → TermElabM α)
: TermElabM α := do
let (lctx, localInsts, vars) ← removeUnused scopeVars params fieldInfos
withLCtx lctx localInsts <| k vars
private def levelMVarToParam (scopeVars : Array Expr) (params : Array Expr) (fieldInfos : Array StructFieldInfo) (univToInfer? : Option LMVarId) : TermElabM (Array StructFieldInfo) := do
levelMVarToParamFVars scopeVars
levelMVarToParamFVars params
fieldInfos.mapM fun info => do
levelMVarToParamFVar info.fvar
match info.value? with
| none => pure info
| some value =>
let value ← levelMVarToParam' value
pure { info with value? := value }
where
levelMVarToParam' (type : Expr) : TermElabM Expr := do
Term.levelMVarToParam type (except := fun mvarId => univToInfer? == some mvarId)
levelMVarToParamFVars (fvars : Array Expr) : TermElabM Unit :=
fvars.forM levelMVarToParamFVar
levelMVarToParamFVar (fvar : Expr) : TermElabM Unit := do
let type ← inferType fvar
discard <| levelMVarToParam' type
private partial def collectUniversesFromFields (r : Level) (rOffset : Nat) (fieldInfos : Array StructFieldInfo) : TermElabM (Array Level) := do
let (_, us) ← go |>.run #[]
return us
where
go : StateRefT (Array Level) TermElabM Unit :=
for info in fieldInfos do
let type ← inferType info.fvar
let u ← getLevel type
let u ← instantiateLevelMVars u
match (← modifyGet fun s => accLevel u r rOffset |>.run |>.run s) with
| some _ => pure ()
| none =>
let typeType ← inferType type
let mut msg := m!"failed to compute resulting universe level of structure, field '{info.declName}' has type{indentD m!"{type} : {typeType}"}\nstructure resulting type{indentExpr (mkSort (r.addOffset rOffset))}"
if r.isMVar then
msg := msg ++ "\nrecall that Lean only infers the resulting universe level automatically when there is a unique solution for the universe level constraints, consider explicitly providing the structure resulting universe level"
throwError msg
private def updateResultingUniverse (fieldInfos : Array StructFieldInfo) (type : Expr) : TermElabM Expr := do
let r ← getResultUniverse type
let rOffset : Nat := r.getOffset
let r : Level := r.getLevelOffset
match r with
| Level.mvar mvarId =>
let us ← collectUniversesFromFields r rOffset fieldInfos
let rNew := mkResultUniverse us rOffset
assignLevelMVar mvarId rNew
instantiateMVars type
| _ => throwError "failed to compute resulting universe level of structure, provide universe explicitly"
private def collectLevelParamsInFVar (s : CollectLevelParams.State) (fvar : Expr) : TermElabM CollectLevelParams.State := do
let type ← inferType fvar
let type ← instantiateMVars type
return collectLevelParams s type
private def collectLevelParamsInFVars (fvars : Array Expr) (s : CollectLevelParams.State) : TermElabM CollectLevelParams.State :=
fvars.foldlM collectLevelParamsInFVar s
private def collectLevelParamsInStructure (structType : Expr) (scopeVars : Array Expr) (params : Array Expr) (fieldInfos : Array StructFieldInfo)
: TermElabM (Array Name) := do
let s := collectLevelParams {} structType
let s ← collectLevelParamsInFVars scopeVars s
let s ← collectLevelParamsInFVars params s
let s ← fieldInfos.foldlM (init := s) fun s info => collectLevelParamsInFVar s info.fvar
return s.params
private def addCtorFields (fieldInfos : Array StructFieldInfo) : Nat → Expr → TermElabM Expr
| 0, type => pure type
| i+1, type => do
let info := fieldInfos[i]!
let decl ← Term.getFVarLocalDecl! info.fvar
let type ← instantiateMVars type
let type := type.abstract #[info.fvar]
match info.kind with
| StructFieldKind.fromParent =>
let val := decl.value
addCtorFields fieldInfos i (type.instantiate1 val)
| _ =>
addCtorFields fieldInfos i (mkForall decl.userName decl.binderInfo decl.type type)
private def mkCtor (view : StructView) (levelParams : List Name) (params : Array Expr) (fieldInfos : Array StructFieldInfo) : TermElabM Constructor :=
withRef view.ref do
let type := mkAppN (mkConst view.declName (levelParams.map mkLevelParam)) params
let type ← addCtorFields fieldInfos fieldInfos.size type
let type ← mkForallFVars params type
let type ← instantiateMVars type
let type := type.inferImplicit params.size true
pure { name := view.ctor.declName, type }
@[extern "lean_mk_projections"]
private opaque mkProjections (env : Environment) (structName : Name) (projs : List Name) (isClass : Bool) : Except KernelException Environment
private def addProjections (structName : Name) (projs : List Name) (isClass : Bool) : TermElabM Unit := do
let env ← getEnv
let env ← ofExceptKernelException (mkProjections env structName projs isClass)
setEnv env
private def registerStructure (structName : Name) (infos : Array StructFieldInfo) : TermElabM Unit := do
let fields ← infos.filterMapM fun info => do
if info.kind == StructFieldKind.fromParent then
return none
else
return some {
fieldName := info.name
projFn := info.declName
binderInfo := (← getFVarLocalDecl info.fvar).binderInfo
autoParam? := (← inferType info.fvar).getAutoParamTactic?
subobject? :=
if info.kind == StructFieldKind.subobject then
match (← getEnv).find? info.declName with
| some (ConstantInfo.defnInfo val) =>
match val.type.getForallBody.getAppFn with
| Expr.const parentName .. => some parentName
| _ => panic! "ill-formed structure"
| _ => panic! "ill-formed environment"
else
none
}
modifyEnv fun env => Lean.registerStructure env { structName, fields }
private def mkAuxConstructions (declName : Name) : TermElabM Unit := do
let env ← getEnv
let hasUnit := env.contains `PUnit
let hasEq := env.contains `Eq
let hasHEq := env.contains `HEq
mkRecOn declName
if hasUnit then mkCasesOn declName
if hasUnit && hasEq && hasHEq then mkNoConfusion declName
private def addDefaults (lctx : LocalContext) (defaultAuxDecls : Array (Name × Expr × Expr)) : TermElabM Unit := do
let localInsts ← getLocalInstances
withLCtx lctx localInsts do
defaultAuxDecls.forM fun (declName, type, value) => do
let value ← instantiateMVars value
if value.hasExprMVar then
throwError "invalid default value for field, it contains metavariables{indentExpr value}"
/- The identity function is used as "marker". -/
let value ← mkId value
discard <| mkAuxDefinition declName type value (zeta := true)
setReducibleAttribute declName
/--
Given `type` of the form `forall ... (source : A), B`, return `forall ... [source : A], B`.
-/
private def setSourceInstImplicit (type : Expr) : Expr :=
match type with
| .forallE _ d b _ =>
if b.isForall then
type.updateForallE! d (setSourceInstImplicit b)
else
type.updateForall! .instImplicit d b
| _ => unreachable!
private partial def mkCoercionToCopiedParent (levelParams : List Name) (params : Array Expr) (view : StructView) (parentType : Expr) : MetaM Unit := do
let env ← getEnv
let structName := view.declName
let sourceFieldNames := getStructureFieldsFlattened env structName
let structType := mkAppN (Lean.mkConst structName (levelParams.map mkLevelParam)) params
let Expr.const parentStructName _ ← pure parentType.getAppFn | unreachable!
let binfo := if view.isClass && isClass env parentStructName then BinderInfo.instImplicit else BinderInfo.default
withLocalDeclD `self structType fun source => do
let mut declType ← instantiateMVars (← mkForallFVars params (← mkForallFVars #[source] parentType))
if view.isClass && isClass env parentStructName then
declType := setSourceInstImplicit declType
declType := declType.inferImplicit params.size true
let rec copyFields (parentType : Expr) : MetaM Expr := do
let Expr.const parentStructName us ← pure parentType.getAppFn | unreachable!
let parentCtor := getStructureCtor env parentStructName
let mut result := mkAppN (mkConst parentCtor.name us) parentType.getAppArgs
for fieldName in getStructureFields env parentStructName do
if sourceFieldNames.contains fieldName then
let fieldVal ← mkProjection source fieldName
result := mkApp result fieldVal
else
-- fieldInfo must be a field of `parentStructName`
let some fieldInfo := getFieldInfo? env parentStructName fieldName | unreachable!
if fieldInfo.subobject?.isNone then throwError "failed to build coercion to parent structure"
let resultType ← whnfD (← inferType result)
unless resultType.isForall do throwError "failed to build coercion to parent structure, unexpect type{indentExpr resultType}"
let fieldVal ← copyFields resultType.bindingDomain!
result := mkApp result fieldVal
return result
let declVal ← instantiateMVars (← mkLambdaFVars params (← mkLambdaFVars #[source] (← copyFields parentType)))
let declName := structName ++ mkToParentName (← getStructureName parentType) fun n => !env.contains (structName ++ n)
addAndCompile <| Declaration.defnDecl {
name := declName
levelParams := levelParams
type := declType
value := declVal
hints := ReducibilityHints.abbrev
safety := if view.modifiers.isUnsafe then DefinitionSafety.unsafe else DefinitionSafety.safe
}
if binfo.isInstImplicit then
addInstance declName AttributeKind.global (eval_prio default)
else
setReducibleAttribute declName
private def elabStructureView (view : StructView) : TermElabM Unit := do
view.fields.forM fun field => do
if field.declName == view.ctor.declName then
throwErrorAt field.ref "invalid field name '{field.name}', it is equal to structure constructor name"
addAuxDeclarationRanges field.declName field.ref field.ref
let type ← Term.elabType view.type
unless validStructType type do throwErrorAt view.type "expected Type"
withRef view.ref do
withParents view fun fieldInfos copiedParents => do
withFields view.fields fieldInfos fun fieldInfos => do
Term.synthesizeSyntheticMVarsNoPostponing
let u ← getResultUniverse type
let univToInfer? ← shouldInferResultUniverse u
withUsed view.scopeVars view.params fieldInfos fun scopeVars => do
let fieldInfos ← levelMVarToParam scopeVars view.params fieldInfos univToInfer?
let type ← withRef view.ref do
if univToInfer?.isSome then
updateResultingUniverse fieldInfos type
else
checkResultingUniverse (← getResultUniverse type)
pure type
trace[Elab.structure] "type: {type}"
let usedLevelNames ← collectLevelParamsInStructure type scopeVars view.params fieldInfos
match sortDeclLevelParams view.scopeLevelNames view.allUserLevelNames usedLevelNames with
| Except.error msg => withRef view.ref <| throwError msg
| Except.ok levelParams =>
let params := scopeVars ++ view.params
let ctor ← mkCtor view levelParams params fieldInfos
let type ← mkForallFVars params type
let type ← instantiateMVars type
let indType := { name := view.declName, type := type, ctors := [ctor] : InductiveType }
let decl := Declaration.inductDecl levelParams params.size [indType] view.modifiers.isUnsafe
Term.ensureNoUnassignedMVars decl
addDecl decl
let projNames := (fieldInfos.filter fun (info : StructFieldInfo) => !info.isFromParent).toList.map fun (info : StructFieldInfo) => info.declName
addProjections view.declName projNames view.isClass
registerStructure view.declName fieldInfos
mkAuxConstructions view.declName
let instParents ← fieldInfos.filterM fun info => do
let decl ← Term.getFVarLocalDecl! info.fvar
pure (info.isSubobject && decl.binderInfo.isInstImplicit)
withSaveInfoContext do -- save new env
Term.addLocalVarInfo view.ref[1] (← mkConstWithLevelParams view.declName)
if let some _ := view.ctor.ref[1].getPos? (canonicalOnly := true) then
Term.addTermInfo' view.ctor.ref[1] (← mkConstWithLevelParams view.ctor.declName) (isBinder := true)
for field in view.fields do
-- may not exist if overriding inherited field
if (← getEnv).contains field.declName then
Term.addTermInfo' field.ref (← mkConstWithLevelParams field.declName) (isBinder := true)
Term.applyAttributesAt view.declName view.modifiers.attrs AttributeApplicationTime.afterTypeChecking
let projInstances := instParents.toList.map fun info => info.declName
projInstances.forM fun declName => addInstance declName AttributeKind.global (eval_prio default)
copiedParents.forM fun parent => mkCoercionToCopiedParent levelParams params view parent
let lctx ← getLCtx
let fieldsWithDefault := fieldInfos.filter fun info => info.value?.isSome
let defaultAuxDecls ← fieldsWithDefault.mapM fun info => do
let type ← inferType info.fvar
pure (mkDefaultFnOfProjFn info.declName, type, info.value?.get!)
/- The `lctx` and `defaultAuxDecls` are used to create the auxiliary "default value" declarations
The parameters `params` for these definitions must be marked as implicit, and all others as explicit. -/
let lctx :=
params.foldl (init := lctx) fun (lctx : LocalContext) (p : Expr) =>
if p.isFVar then
lctx.setBinderInfo p.fvarId! BinderInfo.implicit
else
lctx
let lctx :=
fieldInfos.foldl (init := lctx) fun (lctx : LocalContext) (info : StructFieldInfo) =>
if info.isFromParent then lctx -- `fromParent` fields are elaborated as let-decls, and are zeta-expanded when creating "default value" auxiliary functions
else lctx.setBinderInfo info.fvar.fvarId! BinderInfo.default
addDefaults lctx defaultAuxDecls
/-
leading_parser (structureTk <|> classTk) >> declId >> many Term.bracketedBinder >> optional «extends» >> Term.optType >> " := " >> optional structCtor >> structFields >> optDeriving
where
def «extends» := leading_parser " extends " >> sepBy1 termParser ", "
def typeSpec := leading_parser " : " >> termParser
def optType : Parser := optional typeSpec
def structFields := leading_parser many (structExplicitBinder <|> structImplicitBinder <|> structInstBinder)
def structCtor := leading_parser try (declModifiers >> ident >> " :: ")
-/
def elabStructure (modifiers : Modifiers) (stx : Syntax) : CommandElabM Unit := do
checkValidInductiveModifier modifiers
let isClass := stx[0].getKind == ``Parser.Command.classTk
let modifiers := if isClass then modifiers.addAttribute { name := `class } else modifiers
let declId := stx[1]
let params := stx[2].getArgs
let exts := stx[3]
let parents := if exts.isNone then #[] else exts[0][1].getSepArgs
let optType := stx[4]
let derivingClassViews ← getOptDerivingClasses stx[6]
let type ← if optType.isNone then `(Sort _) else pure optType[0][1]
let declName ←
runTermElabM fun scopeVars => do
let scopeLevelNames ← Term.getLevelNames
let ⟨name, declName, allUserLevelNames⟩ ← Elab.expandDeclId (← getCurrNamespace) scopeLevelNames declId modifiers
Term.withAutoBoundImplicitForbiddenPred (fun n => name == n) do
addDeclarationRanges declName stx
Term.withDeclName declName do
let ctor ← expandCtor stx modifiers declName
let fields ← expandFields stx modifiers declName
Term.withLevelNames allUserLevelNames <| Term.withAutoBoundImplicit <|
Term.elabBinders params fun params => do
Term.synthesizeSyntheticMVarsNoPostponing
let params ← Term.addAutoBoundImplicits params
let allUserLevelNames ← Term.getLevelNames
elabStructureView {
ref := stx
modifiers
scopeLevelNames
allUserLevelNames
declName
isClass
scopeVars
params
parents
type
ctor
fields
}
unless isClass do
mkSizeOfInstances declName
mkInjectiveTheorems declName
return declName
derivingClassViews.forM fun view => view.applyHandlers #[declName]
runTermElabM fun _ => Term.withDeclName declName do
Term.applyAttributesAt declName modifiers.attrs .afterCompilation
builtin_initialize registerTraceClass `Elab.structure
end Lean.Elab.Command
|
9aa1a765a3815b0638ba442c2338827b72c9aba0
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/tactic/omega/misc.lean
|
40d4ff009901a0e04f7f7739c06d34d047bb440d
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,977
|
lean
|
/-
Copyright (c) 2019 Seul Baek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Seul Baek
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.localized
import Mathlib.PostPort
namespace Mathlib
/-
Miscellaneous.
-/
namespace omega
theorem fun_mono_2 {α : Type} {β : Type} {γ : Type} {p : α → β → γ} {a1 : α} {a2 : α} {b1 : β} {b2 : β} : a1 = a2 → b1 = b2 → p a1 b1 = p a2 b2 :=
fun (h1 : a1 = a2) (h2 : b1 = b2) =>
eq.mpr (id (Eq._oldrec (Eq.refl (p a1 b1 = p a2 b2)) h1))
(eq.mpr (id (Eq._oldrec (Eq.refl (p a2 b1 = p a2 b2)) h2)) (Eq.refl (p a2 b2)))
theorem pred_mono_2 {α : Type} {β : Type} {p : α → β → Prop} {a1 : α} {a2 : α} {b1 : β} {b2 : β} : a1 = a2 → b1 = b2 → (p a1 b1 ↔ p a2 b2) :=
fun (h1 : a1 = a2) (h2 : b1 = b2) =>
eq.mpr (id (Eq._oldrec (Eq.refl (p a1 b1 ↔ p a2 b2)) h1))
(eq.mpr (id (Eq._oldrec (Eq.refl (p a2 b1 ↔ p a2 b2)) h2)) (iff.refl (p a2 b2)))
theorem pred_mono_2' {c : Prop → Prop → Prop} {a1 : Prop} {a2 : Prop} {b1 : Prop} {b2 : Prop} : (a1 ↔ a2) → (b1 ↔ b2) → (c a1 b1 ↔ c a2 b2) :=
fun (h1 : a1 ↔ a2) (h2 : b1 ↔ b2) =>
eq.mpr (id (Eq._oldrec (Eq.refl (c a1 b1 ↔ c a2 b2)) (propext h1)))
(eq.mpr (id (Eq._oldrec (Eq.refl (c a2 b1 ↔ c a2 b2)) (propext h2))) (iff.refl (c a2 b2)))
/-- Update variable assignment for a specific variable
and leave everything else unchanged -/
def update {α : Type} (m : ℕ) (a : α) (v : ℕ → α) : ℕ → α :=
sorry
theorem update_eq {α : Type} (m : ℕ) (a : α) (v : ℕ → α) : update m a v m = a := sorry
theorem update_eq_of_ne {α : Type} {m : ℕ} {a : α} {v : ℕ → α} (k : ℕ) : k ≠ m → update m a v k = v k := sorry
/-- Assign a new value to the zeroth variable, and push all
other assignments up by 1 -/
def update_zero {α : Type} (a : α) (v : ℕ → α) : ℕ → α :=
sorry
|
75707d895fe6072f89bb0257279c81a3fefe12c0
|
5412d79aa1dc0b521605c38bef9f0d4557b5a29d
|
/stage0/src/Lean/Parser/Command.lean
|
ef5778ba5c88c3a90141112ea46ccd1ea134b331
|
[
"Apache-2.0"
] |
permissive
|
smunix/lean4
|
a450ec0927dc1c74816a1bf2818bf8600c9fc9bf
|
3407202436c141e3243eafbecb4b8720599b970a
|
refs/heads/master
| 1,676,334,875,188
| 1,610,128,510,000
| 1,610,128,521,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 8,614
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Sebastian Ullrich
-/
import Lean.Parser.Term
import Lean.Parser.Do
namespace Lean
namespace Parser
/--
Syntax quotation for terms and (lists of) commands. We prefer terms, so ambiguous quotations like
`($x $y) will be parsed as an application, not two commands. Use `($x:command $y:command) instead.
Multiple command will be put in a `null node, but a single command will not (so that you can directly
match against a quotation in a command kind's elaborator). -/
-- TODO: use two separate quotation parsers with parser priorities instead
@[builtinTermParser] def Term.quot := parser! "`(" >> toggleInsideQuot (termParser <|> many1Unbox commandParser) >> ")"
namespace Command
def namedPrio := parser! (atomic ("(" >> nonReservedSymbol "priority") >> " := " >> priorityParser >> ")")
def optNamedPrio := optional namedPrio
def commentBody : Parser :=
{ fn := rawFn (finishCommentBlock 1) true }
@[combinatorParenthesizer Lean.Parser.Command.commentBody] def commentBody.parenthesizer := PrettyPrinter.Parenthesizer.visitToken
@[combinatorFormatter Lean.Parser.Command.commentBody] def commentBody.formatter := PrettyPrinter.Formatter.visitAtom Name.anonymous
def docComment := parser! ppDedent $ "/--" >> commentBody >> ppLine
def «private» := parser! "private "
def «protected» := parser! "protected "
def visibility := «private» <|> «protected»
def «noncomputable» := parser! "noncomputable "
def «unsafe» := parser! "unsafe "
def «partial» := parser! "partial "
def declModifiers (inline : Bool) := parser! optional docComment >> optional (Term.«attributes» >> if inline then skip else ppDedent ppLine) >> optional visibility >> optional «noncomputable» >> optional «unsafe» >> optional «partial»
def declId := parser! ident >> optional (".{" >> sepBy1 ident ", " >> "}")
def declSig := parser! many (ppSpace >> (Term.simpleBinderWithoutType <|> Term.bracketedBinder)) >> Term.typeSpec
def optDeclSig := parser! many (ppSpace >> (Term.simpleBinderWithoutType <|> Term.bracketedBinder)) >> Term.optType
def declValSimple := parser! " :=\n" >> termParser >> optional Term.whereDecls
def declValEqns := parser! Term.matchAltsWhereDecls
def declVal := declValSimple <|> declValEqns <|> Term.whereDecls
def «abbrev» := parser! "abbrev " >> declId >> optDeclSig >> declVal
def «def» := parser! "def " >> declId >> optDeclSig >> declVal
def «theorem» := parser! "theorem " >> declId >> declSig >> declVal
def «constant» := parser! "constant " >> declId >> declSig >> optional declValSimple
def «instance» := parser! Term.attrKind >> "instance " >> optNamedPrio >> optional declId >> declSig >> declVal
def «axiom» := parser! "axiom " >> declId >> declSig
def «example» := parser! "example " >> declSig >> declVal
def inferMod := parser! atomic (symbol "{" >> "}")
def ctor := parser! "\n| " >> declModifiers true >> ident >> optional inferMod >> optDeclSig
def optDeriving := parser! optional (atomic ("deriving " >> notSymbol "instance") >> sepBy1 ident ", ")
def «inductive» := parser! "inductive " >> declId >> optDeclSig >> optional (symbol ":=" <|> "where") >> many ctor >> optDeriving
def classInductive := parser! atomic (group (symbol "class " >> "inductive ")) >> declId >> optDeclSig >> optional (symbol ":=" <|> "where") >> many ctor >> optDeriving
def structExplicitBinder := parser! atomic (declModifiers true >> "(") >> many1 ident >> optional inferMod >> optDeclSig >> optional Term.binderDefault >> ")"
def structImplicitBinder := parser! atomic (declModifiers true >> "{") >> many1 ident >> optional inferMod >> declSig >> "}"
def structInstBinder := parser! atomic (declModifiers true >> "[") >> many1 ident >> optional inferMod >> declSig >> "]"
def structSimpleBinder := parser! atomic (declModifiers true >> ident) >> optional inferMod >> optDeclSig >> optional Term.binderDefault
def structFields := parser! manyIndent (ppLine >> checkColGe >>(structExplicitBinder <|> structImplicitBinder <|> structInstBinder <|> structSimpleBinder))
def structCtor := parser! atomic (declModifiers true >> ident >> optional inferMod >> " :: ")
def structureTk := parser! "structure "
def classTk := parser! "class "
def «extends» := parser! " extends " >> sepBy1 termParser ", "
def «structure» := parser!
(structureTk <|> classTk) >> declId >> many Term.bracketedBinder >> optional «extends» >> Term.optType
>> optional ((symbol " := " <|> " where ") >> optional structCtor >> structFields)
>> optDeriving
@[builtinCommandParser] def declaration := parser!
declModifiers false >> («abbrev» <|> «def» <|> «theorem» <|> «constant» <|> «instance» <|> «axiom» <|> «example» <|> «inductive» <|> classInductive <|> «structure»)
@[builtinCommandParser] def «deriving» := parser! "deriving " >> "instance " >> sepBy1 ident ", " >> " for " >> sepBy1 ident ", "
@[builtinCommandParser] def «section» := parser! "section " >> optional ident
@[builtinCommandParser] def «namespace» := parser! "namespace " >> ident
@[builtinCommandParser] def «end» := parser! "end " >> optional ident
@[builtinCommandParser] def «variable» := parser! "variable" >> Term.bracketedBinder
@[builtinCommandParser] def «variables» := parser! "variables" >> many1 Term.bracketedBinder
@[builtinCommandParser] def «universe» := parser! "universe " >> ident
@[builtinCommandParser] def «universes» := parser! "universes " >> many1 ident
@[builtinCommandParser] def check := parser! "#check " >> termParser
@[builtinCommandParser] def check_failure := parser! "#check_failure " >> termParser -- Like `#check`, but succeeds only if term does not type check
@[builtinCommandParser] def reduce := parser! "#reduce " >> termParser
@[builtinCommandParser] def eval := parser! "#eval " >> termParser
@[builtinCommandParser] def synth := parser! "#synth " >> termParser
@[builtinCommandParser] def exit := parser! "#exit"
@[builtinCommandParser] def print := parser! "#print " >> (ident <|> strLit)
@[builtinCommandParser] def printAxioms := parser! "#print " >> nonReservedSymbol "axioms " >> ident
@[builtinCommandParser] def «resolve_name» := parser! "#resolve_name " >> ident
@[builtinCommandParser] def «init_quot» := parser! "init_quot"
@[builtinCommandParser] def «set_option» := parser! "set_option " >> ident >> ppSpace >> (nonReservedSymbol "true" <|> nonReservedSymbol "false" <|> strLit <|> numLit)
@[builtinCommandParser] def «attribute» := parser! "attribute " >> "[" >> sepBy1 Term.attrInstance ", " >> "] " >> many1 ident
@[builtinCommandParser] def «export» := parser! "export " >> ident >> "(" >> many1 ident >> ")"
def openHiding := parser! atomic (ident >> "hiding") >> many1 ident
def openRenamingItem := parser! ident >> unicodeSymbol "→" "->" >> ident
def openRenaming := parser! atomic (ident >> "renaming") >> sepBy1 openRenamingItem ", "
def openOnly := parser! atomic (ident >> "(") >> many1 ident >> ")"
def openSimple := parser! many1 ident
@[builtinCommandParser] def «open» := parser! "open " >> (openHiding <|> openRenaming <|> openOnly <|> openSimple)
@[builtinCommandParser] def «mutual» := parser! "mutual " >> many1 (ppLine >> notSymbol "end" >> commandParser) >> ppDedent (ppLine >> "end")
@[builtinCommandParser] def «initialize» := parser! "initialize " >> optional (atomic (ident >> Term.typeSpec >> Term.leftArrow)) >> Term.doSeq
@[builtinCommandParser] def «builtin_initialize» := parser! "builtin_initialize " >> optional (atomic (ident >> Term.typeSpec >> Term.leftArrow)) >> Term.doSeq
@[builtinCommandParser] def «in» := tparser! " in " >> commandParser
@[runBuiltinParserAttributeHooks] abbrev declModifiersF := declModifiers false
@[runBuiltinParserAttributeHooks] abbrev declModifiersT := declModifiers true
builtin_initialize
registerParserAlias! "declModifiers" declModifiersF
registerParserAlias! "nestedDeclModifiers" declModifiersT
registerParserAlias! "declId" declId
registerParserAlias! "declSig" declSig
registerParserAlias! "optDeclSig" optDeclSig
end Command
end Parser
end Lean
|
37dc4ec8a84fead884cd7984ddc30915054aa651
|
5756a081670ba9c1d1d3fca7bd47cb4e31beae66
|
/Oneshot/lean4-in/Extra.lean
|
650b23fa9c188b1c7c19f966cdc79e2894ac58eb
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathport
|
2c9bdc8292168febf59799efdc5451dbf0450d4a
|
13051f68064f7638970d39a8fecaede68ffbf9e1
|
refs/heads/master
| 1,693,841,364,079
| 1,693,813,111,000
| 1,693,813,111,000
| 379,357,010
| 27
| 10
|
Apache-2.0
| 1,691,309,132,000
| 1,624,384,521,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 162
|
lean
|
-- Insert lean 4 alignment code here.
import Mathlib.Mathport.Rename
set_option align.precheck false
#align foo.foo Foo.bar
#align foo.foo_eq_one Foo.bar_eq_one
|
c266fcf0b32abd1168abc189c10b2cc0e569c5f5
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/int/gcd.lean
|
1a7b452549bbd840454edc9745194dd8837e4fa5
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,787
|
lean
|
/-
Copyright (c) 2018 Guy Leroy. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sangwoo Jo (aka Jason), Guy Leroy, Johannes Hölzl, Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.nat.prime
import Mathlib.PostPort
universes u_1
namespace Mathlib
/-!
# Extended GCD and divisibility over ℤ
## Main definitions
* Given `x y : ℕ`, `xgcd x y` computes the pair of integers `(a, b)` such that
`gcd x y = x * a + y * b`. `gcd_a x y` and `gcd_b x y` are defined to be `a` and `b`,
respectively.
## Main statements
* `gcd_eq_gcd_ab`: Bézout's lemma, given `x y : ℕ`, `gcd x y = x * gcd_a x y + y * gcd_b x y`.
-/
/-! ### Extended Euclidean algorithm -/
namespace nat
/-- Helper function for the extended GCD algorithm (`nat.xgcd`). -/
def xgcd_aux : ℕ → ℤ → ℤ → ℕ → ℤ → ℤ → ℕ × ℤ × ℤ :=
sorry
@[simp] theorem xgcd_zero_left {s : ℤ} {t : ℤ} {r' : ℕ} {s' : ℤ} {t' : ℤ} : xgcd_aux 0 s t r' s' t' = (r', s', t') := sorry
theorem xgcd_aux_rec {r : ℕ} {s : ℤ} {t : ℤ} {r' : ℕ} {s' : ℤ} {t' : ℤ} (h : 0 < r) : xgcd_aux r s t r' s' t' = xgcd_aux (r' % r) (s' - ↑r' / ↑r * s) (t' - ↑r' / ↑r * t) r s t := sorry
/-- Use the extended GCD algorithm to generate the `a` and `b` values
satisfying `gcd x y = x * a + y * b`. -/
def xgcd (x : ℕ) (y : ℕ) : ℤ × ℤ :=
prod.snd (xgcd_aux x 1 0 y 0 1)
/-- The extended GCD `a` value in the equation `gcd x y = x * a + y * b`. -/
def gcd_a (x : ℕ) (y : ℕ) : ℤ :=
prod.fst (xgcd x y)
/-- The extended GCD `b` value in the equation `gcd x y = x * a + y * b`. -/
def gcd_b (x : ℕ) (y : ℕ) : ℤ :=
prod.snd (xgcd x y)
@[simp] theorem gcd_a_zero_left {s : ℕ} : gcd_a 0 s = 0 := sorry
@[simp] theorem gcd_b_zero_left {s : ℕ} : gcd_b 0 s = 1 := sorry
@[simp] theorem gcd_a_zero_right {s : ℕ} (h : s ≠ 0) : gcd_a s 0 = 1 := sorry
@[simp] theorem gcd_b_zero_right {s : ℕ} (h : s ≠ 0) : gcd_b s 0 = 0 := sorry
@[simp] theorem xgcd_aux_fst (x : ℕ) (y : ℕ) (s : ℤ) (t : ℤ) (s' : ℤ) (t' : ℤ) : prod.fst (xgcd_aux x s t y s' t') = gcd x y := sorry
theorem xgcd_aux_val (x : ℕ) (y : ℕ) : xgcd_aux x 1 0 y 0 1 = (gcd x y, xgcd x y) := sorry
theorem xgcd_val (x : ℕ) (y : ℕ) : xgcd x y = (gcd_a x y, gcd_b x y) := sorry
theorem xgcd_aux_P (x : ℕ) (y : ℕ) {r : ℕ} {r' : ℕ} {s : ℤ} {t : ℤ} {s' : ℤ} {t' : ℤ} : P x y (r, s, t) → P x y (r', s', t') → P x y (xgcd_aux r s t r' s' t') := sorry
/-- Bézout's lemma: given `x y : ℕ`, `gcd x y = x * a + y * b`, where `a = gcd_a x y` and
`b = gcd_b x y` are computed by the extended Euclidean algorithm.
-/
theorem gcd_eq_gcd_ab (x : ℕ) (y : ℕ) : ↑(gcd x y) = ↑x * gcd_a x y + ↑y * gcd_b x y := sorry
end nat
/-! ### Divisibility over ℤ -/
namespace int
theorem nat_abs_div (a : ℤ) (b : ℤ) (H : b ∣ a) : nat_abs (a / b) = nat_abs a / nat_abs b := sorry
theorem nat_abs_dvd_abs_iff {i : ℤ} {j : ℤ} : nat_abs i ∣ nat_abs j ↔ i ∣ j :=
{ mp := fun (H : nat_abs i ∣ nat_abs j) => iff.mp dvd_nat_abs (iff.mp nat_abs_dvd (iff.mpr coe_nat_dvd H)),
mpr := fun (H : i ∣ j) => iff.mp coe_nat_dvd (iff.mpr dvd_nat_abs (iff.mpr nat_abs_dvd H)) }
theorem succ_dvd_or_succ_dvd_of_succ_sum_dvd_mul {p : ℕ} (p_prime : nat.prime p) {m : ℤ} {n : ℤ} {k : ℕ} {l : ℕ} (hpm : ↑(p ^ k) ∣ m) (hpn : ↑(p ^ l) ∣ n) (hpmn : ↑(p ^ (k + l + 1)) ∣ m * n) : ↑(p ^ (k + 1)) ∣ m ∨ ↑(p ^ (l + 1)) ∣ n := sorry
theorem dvd_of_mul_dvd_mul_left {i : ℤ} {j : ℤ} {k : ℤ} (k_non_zero : k ≠ 0) (H : k * i ∣ k * j) : i ∣ j :=
dvd.elim H
fun (l : ℤ) (H1 : k * j = k * i * l) =>
Exists.intro l (mul_left_cancel' k_non_zero (eq.mp (Eq._oldrec (Eq.refl (k * j = k * i * l)) (mul_assoc k i l)) H1))
theorem dvd_of_mul_dvd_mul_right {i : ℤ} {j : ℤ} {k : ℤ} (k_non_zero : k ≠ 0) (H : i * k ∣ j * k) : i ∣ j :=
dvd_of_mul_dvd_mul_left k_non_zero
(eq.mp (Eq._oldrec (Eq.refl (k * i ∣ j * k)) (mul_comm j k))
(eq.mp (Eq._oldrec (Eq.refl (i * k ∣ j * k)) (mul_comm i k)) H))
theorem prime.dvd_nat_abs_of_coe_dvd_pow_two {p : ℕ} (hp : nat.prime p) (k : ℤ) (h : ↑p ∣ k ^ bit0 1) : p ∣ nat_abs k := sorry
/-- ℤ specific version of least common multiple. -/
def lcm (i : ℤ) (j : ℤ) : ℕ :=
nat.lcm (nat_abs i) (nat_abs j)
theorem lcm_def (i : ℤ) (j : ℤ) : lcm i j = nat.lcm (nat_abs i) (nat_abs j) :=
rfl
theorem gcd_dvd_left (i : ℤ) (j : ℤ) : ↑(gcd i j) ∣ i :=
iff.mp dvd_nat_abs (iff.mpr coe_nat_dvd (nat.gcd_dvd_left (nat_abs i) (nat_abs j)))
theorem gcd_dvd_right (i : ℤ) (j : ℤ) : ↑(gcd i j) ∣ j :=
iff.mp dvd_nat_abs (iff.mpr coe_nat_dvd (nat.gcd_dvd_right (nat_abs i) (nat_abs j)))
theorem dvd_gcd {i : ℤ} {j : ℤ} {k : ℤ} (h1 : k ∣ i) (h2 : k ∣ j) : k ∣ ↑(gcd i j) :=
iff.mp nat_abs_dvd (iff.mpr coe_nat_dvd (nat.dvd_gcd (iff.mpr nat_abs_dvd_abs_iff h1) (iff.mpr nat_abs_dvd_abs_iff h2)))
theorem gcd_mul_lcm (i : ℤ) (j : ℤ) : gcd i j * lcm i j = nat_abs (i * j) := sorry
theorem gcd_comm (i : ℤ) (j : ℤ) : gcd i j = gcd j i :=
nat.gcd_comm (nat_abs i) (nat_abs j)
theorem gcd_assoc (i : ℤ) (j : ℤ) (k : ℤ) : gcd (↑(gcd i j)) k = gcd i ↑(gcd j k) :=
nat.gcd_assoc (nat_abs i) (nat_abs j) (nat_abs k)
@[simp] theorem gcd_self (i : ℤ) : gcd i i = nat_abs i := sorry
@[simp] theorem gcd_zero_left (i : ℤ) : gcd 0 i = nat_abs i := sorry
@[simp] theorem gcd_zero_right (i : ℤ) : gcd i 0 = nat_abs i := sorry
@[simp] theorem gcd_one_left (i : ℤ) : gcd 1 i = 1 :=
nat.gcd_one_left (nat_abs i)
@[simp] theorem gcd_one_right (i : ℤ) : gcd i 1 = 1 :=
nat.gcd_one_right (nat_abs i)
theorem gcd_mul_left (i : ℤ) (j : ℤ) (k : ℤ) : gcd (i * j) (i * k) = nat_abs i * gcd j k := sorry
theorem gcd_mul_right (i : ℤ) (j : ℤ) (k : ℤ) : gcd (i * j) (k * j) = gcd i k * nat_abs j := sorry
theorem gcd_pos_of_non_zero_left {i : ℤ} (j : ℤ) (i_non_zero : i ≠ 0) : 0 < gcd i j :=
nat.gcd_pos_of_pos_left (nat_abs j) (nat_abs_pos_of_ne_zero i_non_zero)
theorem gcd_pos_of_non_zero_right (i : ℤ) {j : ℤ} (j_non_zero : j ≠ 0) : 0 < gcd i j :=
nat.gcd_pos_of_pos_right (nat_abs i) (nat_abs_pos_of_ne_zero j_non_zero)
theorem gcd_eq_zero_iff {i : ℤ} {j : ℤ} : gcd i j = 0 ↔ i = 0 ∧ j = 0 := sorry
theorem gcd_div {i : ℤ} {j : ℤ} {k : ℤ} (H1 : k ∣ i) (H2 : k ∣ j) : gcd (i / k) (j / k) = gcd i j / nat_abs k := sorry
theorem gcd_div_gcd_div_gcd {i : ℤ} {j : ℤ} (H : 0 < gcd i j) : gcd (i / ↑(gcd i j)) (j / ↑(gcd i j)) = 1 := sorry
theorem gcd_dvd_gcd_of_dvd_left {i : ℤ} {k : ℤ} (j : ℤ) (H : i ∣ k) : gcd i j ∣ gcd k j :=
iff.mp coe_nat_dvd (dvd_gcd (dvd.trans (gcd_dvd_left i j) H) (gcd_dvd_right i j))
theorem gcd_dvd_gcd_of_dvd_right {i : ℤ} {k : ℤ} (j : ℤ) (H : i ∣ k) : gcd j i ∣ gcd j k :=
iff.mp coe_nat_dvd (dvd_gcd (gcd_dvd_left j i) (dvd.trans (gcd_dvd_right j i) H))
theorem gcd_dvd_gcd_mul_left (i : ℤ) (j : ℤ) (k : ℤ) : gcd i j ∣ gcd (k * i) j :=
gcd_dvd_gcd_of_dvd_left j (dvd_mul_left i k)
theorem gcd_dvd_gcd_mul_right (i : ℤ) (j : ℤ) (k : ℤ) : gcd i j ∣ gcd (i * k) j :=
gcd_dvd_gcd_of_dvd_left j (dvd_mul_right i k)
theorem gcd_dvd_gcd_mul_left_right (i : ℤ) (j : ℤ) (k : ℤ) : gcd i j ∣ gcd i (k * j) :=
gcd_dvd_gcd_of_dvd_right i (dvd_mul_left j k)
theorem gcd_dvd_gcd_mul_right_right (i : ℤ) (j : ℤ) (k : ℤ) : gcd i j ∣ gcd i (j * k) :=
gcd_dvd_gcd_of_dvd_right i (dvd_mul_right j k)
theorem gcd_eq_left {i : ℤ} {j : ℤ} (H : i ∣ j) : gcd i j = nat_abs i := sorry
theorem gcd_eq_right {i : ℤ} {j : ℤ} (H : j ∣ i) : gcd i j = nat_abs j :=
eq.mpr (id (Eq._oldrec (Eq.refl (gcd i j = nat_abs j)) (gcd_comm i j)))
(eq.mpr (id (Eq._oldrec (Eq.refl (gcd j i = nat_abs j)) (gcd_eq_left H))) (Eq.refl (nat_abs j)))
theorem ne_zero_of_gcd {x : ℤ} {y : ℤ} (hc : gcd x y ≠ 0) : x ≠ 0 ∨ y ≠ 0 := sorry
theorem exists_gcd_one {m : ℤ} {n : ℤ} (H : 0 < gcd m n) : ∃ (m' : ℤ), ∃ (n' : ℤ), gcd m' n' = 1 ∧ m = m' * ↑(gcd m n) ∧ n = n' * ↑(gcd m n) := sorry
theorem exists_gcd_one' {m : ℤ} {n : ℤ} (H : 0 < gcd m n) : ∃ (g : ℕ), ∃ (m' : ℤ), ∃ (n' : ℤ), 0 < g ∧ gcd m' n' = 1 ∧ m = m' * ↑g ∧ n = n' * ↑g := sorry
theorem pow_dvd_pow_iff {m : ℤ} {n : ℤ} {k : ℕ} (k0 : 0 < k) : m ^ k ∣ n ^ k ↔ m ∣ n := sorry
/-! ### lcm -/
theorem lcm_comm (i : ℤ) (j : ℤ) : lcm i j = lcm j i :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm i j = lcm j i)) (lcm.equations._eqn_1 i j)))
(eq.mpr (id (Eq._oldrec (Eq.refl (nat.lcm (nat_abs i) (nat_abs j) = lcm j i)) (lcm.equations._eqn_1 j i)))
(nat.lcm_comm (nat_abs i) (nat_abs j)))
theorem lcm_assoc (i : ℤ) (j : ℤ) (k : ℤ) : lcm (↑(lcm i j)) k = lcm i ↑(lcm j k) := sorry
@[simp] theorem lcm_zero_left (i : ℤ) : lcm 0 i = 0 :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm 0 i = 0)) (lcm.equations._eqn_1 0 i))) (nat.lcm_zero_left (nat_abs i))
@[simp] theorem lcm_zero_right (i : ℤ) : lcm i 0 = 0 :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm i 0 = 0)) (lcm.equations._eqn_1 i 0))) (nat.lcm_zero_right (nat_abs i))
@[simp] theorem lcm_one_left (i : ℤ) : lcm 1 i = nat_abs i :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm 1 i = nat_abs i)) (lcm.equations._eqn_1 1 i))) (nat.lcm_one_left (nat_abs i))
@[simp] theorem lcm_one_right (i : ℤ) : lcm i 1 = nat_abs i :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm i 1 = nat_abs i)) (lcm.equations._eqn_1 i 1))) (nat.lcm_one_right (nat_abs i))
@[simp] theorem lcm_self (i : ℤ) : lcm i i = nat_abs i :=
eq.mpr (id (Eq._oldrec (Eq.refl (lcm i i = nat_abs i)) (lcm.equations._eqn_1 i i))) (nat.lcm_self (nat_abs i))
theorem dvd_lcm_left (i : ℤ) (j : ℤ) : i ∣ ↑(lcm i j) :=
eq.mpr (id (Eq._oldrec (Eq.refl (i ∣ ↑(lcm i j))) (lcm.equations._eqn_1 i j)))
(iff.mpr coe_nat_dvd_right (nat.dvd_lcm_left (nat_abs i) (nat_abs j)))
theorem dvd_lcm_right (i : ℤ) (j : ℤ) : j ∣ ↑(lcm i j) :=
eq.mpr (id (Eq._oldrec (Eq.refl (j ∣ ↑(lcm i j))) (lcm.equations._eqn_1 i j)))
(iff.mpr coe_nat_dvd_right (nat.dvd_lcm_right (nat_abs i) (nat_abs j)))
theorem lcm_dvd {i : ℤ} {j : ℤ} {k : ℤ} : i ∣ k → j ∣ k → ↑(lcm i j) ∣ k :=
eq.mpr (id (Eq._oldrec (Eq.refl (i ∣ k → j ∣ k → ↑(lcm i j) ∣ k)) (lcm.equations._eqn_1 i j)))
fun (hi : i ∣ k) (hj : j ∣ k) =>
iff.mpr coe_nat_dvd_left (nat.lcm_dvd (iff.mpr nat_abs_dvd_abs_iff hi) (iff.mpr nat_abs_dvd_abs_iff hj))
end int
theorem pow_gcd_eq_one {M : Type u_1} [monoid M] (x : M) {m : ℕ} {n : ℕ} (hm : x ^ m = 1) (hn : x ^ n = 1) : x ^ nat.gcd m n = 1 := sorry
|
ead6c3245379419bbcc710e366a3fac14230bf23
|
a6b711a4e8db20755026231f7ed529a9014b2b6d
|
/ZZ_IGNORE/S17/class/FinalExam/FinalExam-key.lean
|
ee2efb1e1815dc062ce29d38b694374d30a7a9d4
|
[] |
no_license
|
chaseboettner/cs-dm-1
|
b67d4a7e86f56bce59d2af115503769749d423b2
|
80b35f2957ffaa45b8b7a4479a3570a2d6eb4db0
|
refs/heads/master
| 1,585,367,603,488
| 1,536,235,675,000
| 1,536,235,675,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,568
|
lean
|
/-
PART I: LOGIC AND PROOF USING THE LEAN PROVER
This section is worth 20% of the exam grade.
There are five problems and one extra credit
problem.
-/
/- PROBLEM #1
Complete the proof by replacing the "sorry" stub
with code to construct a proof of the proposition.
You may want to start by replacing sorry with a
hole (underscore), then hover over the underscore
to see what assumptions you've got and what you
have left to prove You will see that you've assumed
that P and Q are arbitrary propositions and that
you have a proof of P ∧ Q. Apply the appropriate
elimination and introduction rules, first to obtain
the necessary "ingredients" for the final proof (by
eliminations), and then combine them together into
a proof of the concluding proposition, Q ∧ P. Note
that the conclusion is a conjunction. That tells you
a lot about what kind of introduction rule you need
to use to complete the proof. It's not a bad idea
to work "top down" by incrementally filling in the
hole with expressions that can include new holes,
continuing this "refinement" process until your
proof is complete
-/
theorem and_commutes: ∀ (P Q: Prop), P ∧ Q → Q ∧ P :=
λ P Q pf,
and.intro
(and.elim_right pf)
(and.elim_left pf)
/-
You can read and_commutes as a function that takes
three parameters, two propositions and a proof of
P ∧ Q, and that returns a proof of Q ∧ P. Another
way to read it is as taking two propositions, P and
Q, and returning a proof of P ∧ Q → Q ∧ P. This is
a reading that helps when it comes to solving the
next problem. Here's a slightly different but wholly
equivalent way to write the same thing.
-/
theorem and_commutes': forall P Q: Prop, P ∧ Q → Q ∧ P :=
λ P Q,
(λ pf,
and.intro
(and.elim_right pf)
(and.elim_left pf))
/-
The inner lambda, a function, *is* a proof that if you
are give a proof, pf, of P ∧ Q, you can always return a
proof of Q ∧ P, i.e., P ∧ Q → Q → P. The parentheses
around the inner lambda are not necessary but are there
to highlight the fact that when the outer function is
given propositions, P and Q, it returns a proof of an
implication, that P ∧ Q → Q ∧ P. This proof is given by
the function that the inner lambda defines.
-/
/- PROBLEM #2
As you can see from the definition of and_commutes
it is basically a function (see the lambda) that takes
two propositions, P and Q, as arguments, and that then
returns a proof of P ∧ Q → Q ∧ P. No other proofs are
needed as arguments in this case, as the proposition
to be proved is logically valid.
In this problem you use the fact that and_commutes ca
be "called" to build the two proofs that do have to be
provided as arguments to an introduction rule to build
a proof of the stated equivalence.
Read and understand the statement of the theorem first,
then read the partial proof. Complete it by replacing
"sorry" with a proof of the required type. You might
again want to start by replacing sorry with a hole,
then check to see the context and goal for that hole.
And finally fill it with an expression that produces
the required proof.
-/
theorem and_commutes_2: forall X Y: Prop, X ∧ Y ↔ Y ∧ X :=
λ (X Y: Prop),
iff.intro
(and_commutes X Y)
(and_commutes Y X)
/-
To intoduce a conclusion of the form A ↔ B (A iff B), you
have to have proofs of the implication in each direction.
You have to have a proof that A → B and a proof that B ↔ A,
Wwe need proofs of both X ∧ Y → Y ∧ X and Y ∧ X → X ∧ Y.
Luckily, the and_commutes function returns proofs of just
this sort. If you give it propositions, A and B, it gives
you back a proof of A ∧ B → B ∧ A. If you given it B and
A as arguments, in that reversed order, it gives you back
a proof of B ∧ A → A ∧ B. We call and_commutes twice, with
the propositions X and Y in the right order, to build the
two proofs needed for the iff.intro rule to construct a
proof of the given ↔ proposition.
-/
/- PROBLEM #3
Replace sorry to complete the proof that if p, q, and r
are arbitrary natural numbers, and p < q and q < r then
p < r. Very strong hint: Lean already has a theorem (a
rule), called lt.trans, that when given a proof of p < q
and a proof of q < r generates a proof of p < r.
-/
theorem my_lt_trans: ∀ p q r: nat, p < q → q < r → p < r :=
λ p q r pq qr, lt.trans pq qr
/- PROBLEM #4
In Lean, write and prove the proposition that, for all
natural numbers, p, q, and r, if p is equal to q then
if q is equal to r, then r is equal to p. Don't reverse
the order of variables in this expression when writing
it in Lean. If we were just restating transitivity of
equality, the third proposition would have been p = r,
but here it's r = p. Hint: compose inference rules for
two different properties of equality. Give your theorem
the name, eq_rev_trans
-/
-- your answer here
theorem eq_rev_trans: ∀ p q r: nat, p=q → q=r → r=p :=
λ p q r pq qr,
eq.symm
(eq.trans pq qr)
/- PROBLEM #5
In Lean, write and prove a theorem, called pqp, that
states that, for any propositions, P and Q, P → Q → P.
-/
theorem pqp: forall P Q: Prop, P → Q → P :=
λ P Q pfP pfQ, pfP
/-
The first trick to solving this problem is to
consider the solution to be a function that takes
four arguments and that then returns a proof of P.
The arguments are the propositions (types), P and
Q, a proof (value) of (type) P, and a proof of Q.
Given that context, the function has to return a
proof of P. But there's already one right there
in the context, so we just return *it*.
Now think about P -> Q -> P in natural language.
It says, "If P is true, then if Q is true then P
is also true". If you start by assuming that P
is true, then you can direclty conclude that P
is true. You don't really have to consider Q at
all.
-/
/- EXTRA CREDIT.
One of the inference rules that we did validate in
the propositional logic unit, and that was described
in intro.lean, but that we did not have time to talk
about in class, states that false implies anything at
all, even absurdities. Your task here is, in Lean,
first to write and prove that false implies 0=1 (call
the theorem f01) and then to write and prove a second
theorem asserting that from false you can prove any
proposition, Q, whatsoever (call it fQ).
-/
theorem f01: false → 0=1 :=
λ f, false.elim f
theorem fq: ∀ Q: Prop, false → Q :=
λ Q pf, false.elim pf
|
c29da9353ae48df8efd0f896a5597774bd817b3e
|
1a61aba1b67cddccce19532a9596efe44be4285f
|
/tests/lean/t14.lean
|
448448b2c1c55379a095a3ae60875782dd9a9bea
|
[
"Apache-2.0"
] |
permissive
|
eigengrau/lean
|
07986a0f2548688c13ba36231f6cdbee82abf4c6
|
f8a773be1112015e2d232661ce616d23f12874d0
|
refs/heads/master
| 1,610,939,198,566
| 1,441,352,386,000
| 1,441,352,494,000
| 41,903,576
| 0
| 0
| null | 1,441,352,210,000
| 1,441,352,210,000
| null |
UTF-8
|
Lean
| false
| false
| 685
|
lean
|
prelude namespace foo
constant A : Type.{1}
constant a : A
constant x : A
constant c : A
end foo
section
open foo (renaming a->b x->y) (hiding c)
check b
check y
check c -- Error
end
section
open foo (a x)
check a
check x
check c -- Error
end
section
open foo (a x) (hiding c) -- Error
end
section
open foo
check a
check c
check A
end
namespace foo
constant f : A → A → A
infix `*`:75 := f
end foo
section
open foo
check a * c
end
section
open [notations] foo -- use only the notation
check foo.a * foo.c
check a * c -- Error
end
section
open [decls] foo -- use only the declarations
check f a c
check a*c -- Error
end
|
61e1f11b6427474bba59bab0d891ec4db5063cef
|
69d4931b605e11ca61881fc4f66db50a0a875e39
|
/src/group_theory/order_of_element.lean
|
f745afb24ecda030f94dca7329a9e2c99d127f57
|
[
"Apache-2.0"
] |
permissive
|
abentkamp/mathlib
|
d9a75d291ec09f4637b0f30cc3880ffb07549ee5
|
5360e476391508e092b5a1e5210bd0ed22dc0755
|
refs/heads/master
| 1,682,382,954,948
| 1,622,106,077,000
| 1,622,106,077,000
| 149,285,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 33,416
|
lean
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Julian Kuelshammer
-/
import algebra.big_operators.order
import group_theory.coset
import data.nat.totient
import data.int.gcd
import data.set.finite
import dynamics.periodic_pts
import algebra.iterate_hom
/-!
# Order of an element
This file defines the order of an element of a finite group. For a finite group `G` the order of
`x ∈ G` is the minimal `n ≥ 1` such that `x ^ n = 1`.
## Main definitions
* `is_of_fin_order` is a predicate on an element `x` of a monoid `G` saying that `x` is of finite
order.
* `is_of_fin_add_order` is the additive analogue of `is_of_find_order`.
* `order_of x` defines the order of an element `x` of a monoid `G`, by convention its value is `0`
if `x` has infinite order.
* `add_order_of` is the additive analogue of `order_of`.
## Tags
order of an element
-/
open function nat
universes u v
variables {G : Type u} {A : Type v}
variables {x y : G} {a b : A} {n m : ℕ}
section monoid_add_monoid
variables [monoid G] [add_monoid A]
section is_of_fin_order
lemma is_periodic_pt_add_iff_nsmul_eq_zero (a : A) :
is_periodic_pt ((+) a) n 0 ↔ n • a = 0 :=
by rw [is_periodic_pt, is_fixed_pt, add_left_iterate, add_zero]
@[to_additive is_periodic_pt_add_iff_nsmul_eq_zero]
lemma is_periodic_pt_mul_iff_pow_eq_one (x : G) : is_periodic_pt ((*) x) n 1 ↔ x ^ n = 1 :=
by rw [is_periodic_pt, is_fixed_pt, mul_left_iterate, mul_one]
/-- `is_of_fin_add_order` is a predicate on an element `a` of an additive monoid to be of finite
order, i.e. there exists `n ≥ 1` such that `n • a = 0`.-/
def is_of_fin_add_order (a : A) : Prop :=
(0 : A) ∈ periodic_pts ((+) a)
/-- `is_of_fin_order` is a predicate on an element `x` of a monoid to be of finite order, i.e. there
exists `n ≥ 1` such that `x ^ n = 1`.-/
@[to_additive is_of_fin_add_order]
def is_of_fin_order (x : G) : Prop :=
(1 : G) ∈ periodic_pts ((*) x)
lemma is_of_fin_add_order_of_mul_iff :
is_of_fin_add_order (additive.of_mul x) ↔ is_of_fin_order x := iff.rfl
lemma is_of_fin_order_of_add_iff :
is_of_fin_order (multiplicative.of_add a) ↔ is_of_fin_add_order a := iff.rfl
lemma is_of_fin_add_order_iff_nsmul_eq_zero (a : A) :
is_of_fin_add_order a ↔ ∃ n, 0 < n ∧ n • a = 0 :=
by { convert iff.rfl, simp only [exists_prop, is_periodic_pt_add_iff_nsmul_eq_zero] }
@[to_additive is_of_fin_add_order_iff_nsmul_eq_zero]
lemma is_of_fin_order_iff_pow_eq_one (x : G) :
is_of_fin_order x ↔ ∃ n, 0 < n ∧ x ^ n = 1 :=
by { convert iff.rfl, simp [is_periodic_pt_mul_iff_pow_eq_one] }
end is_of_fin_order
/-- `add_order_of a` is the order of the element `a`, i.e. the `n ≥ 1`, s.t. `n • a = 0` if it
exists. Otherwise, i.e. if `a` is of infinite order, then `add_order_of a` is `0` by convention.-/
noncomputable def add_order_of (a : A) : ℕ :=
minimal_period ((+) a) 0
/-- `order_of x` is the order of the element `x`, i.e. the `n ≥ 1`, s.t. `x ^ n = 1` if it exists.
Otherwise, i.e. if `x` is of infinite order, then `order_of x` is `0` by convention.-/
@[to_additive add_order_of]
noncomputable def order_of (x : G) : ℕ :=
minimal_period ((*) x) 1
attribute [to_additive add_order_of] order_of
@[to_additive]
lemma commute.order_of_mul_dvd_lcm (h : commute x y) :
order_of (x * y) ∣ nat.lcm (order_of x) (order_of y) :=
begin
convert function.commute.minimal_period_of_comp_dvd_lcm h.function_commute_mul_left,
rw [order_of, comp_mul_left],
end
@[simp] lemma add_order_of_of_mul_eq_order_of (x : G) :
add_order_of (additive.of_mul x) = order_of x := rfl
@[simp] lemma order_of_of_add_eq_add_order_of (a : A) :
order_of (multiplicative.of_add a) = add_order_of a := rfl
@[to_additive add_order_of_pos']
lemma order_of_pos' (h : is_of_fin_order x) : 0 < order_of x :=
minimal_period_pos_of_mem_periodic_pts h
lemma pow_order_of_eq_one (x : G) : x ^ order_of x = 1 :=
begin
convert is_periodic_pt_minimal_period ((*) x) _,
rw [order_of, mul_left_iterate, mul_one],
end
lemma add_order_of_nsmul_eq_zero (a : A) : add_order_of a • a = 0 :=
begin
convert is_periodic_pt_minimal_period ((+) a) _,
rw [add_order_of, add_left_iterate, add_zero],
end
attribute [to_additive add_order_of_nsmul_eq_zero] pow_order_of_eq_one
@[to_additive add_order_of_eq_zero]
lemma order_of_eq_zero (h : ¬ is_of_fin_order x) : order_of x = 0 :=
by rwa [order_of, minimal_period, dif_neg]
lemma nsmul_ne_zero_of_lt_add_order_of' (n0 : n ≠ 0) (h : n < add_order_of a) :
n • a ≠ 0 :=
λ j, not_is_periodic_pt_of_pos_of_lt_minimal_period n0 h
((is_periodic_pt_add_iff_nsmul_eq_zero a).mpr j)
@[to_additive nsmul_ne_zero_of_lt_add_order_of']
lemma pow_eq_one_of_lt_order_of' (n0 : n ≠ 0) (h : n < order_of x) : x ^ n ≠ 1 :=
λ j, not_is_periodic_pt_of_pos_of_lt_minimal_period n0 h
((is_periodic_pt_mul_iff_pow_eq_one x).mpr j)
lemma add_order_of_le_of_nsmul_eq_zero (hn : 0 < n) (h : n • a = 0) : add_order_of a ≤ n :=
is_periodic_pt.minimal_period_le hn (by rwa is_periodic_pt_add_iff_nsmul_eq_zero)
@[to_additive add_order_of_le_of_nsmul_eq_zero]
lemma order_of_le_of_pow_eq_one (hn : 0 < n) (h : x ^ n = 1) : order_of x ≤ n :=
is_periodic_pt.minimal_period_le hn (by rwa is_periodic_pt_mul_iff_pow_eq_one)
@[simp] lemma order_of_one : order_of (1 : G) = 1 :=
by rw [order_of, one_mul_eq_id, minimal_period_id]
@[simp] lemma add_order_of_zero : add_order_of (0 : A) = 1 :=
by simp only [←order_of_of_add_eq_add_order_of, order_of_one, of_add_zero]
attribute [to_additive add_order_of_zero] order_of_one
@[simp] lemma order_of_eq_one_iff : order_of x = 1 ↔ x = 1 :=
by rw [order_of, is_fixed_point_iff_minimal_period_eq_one, is_fixed_pt, mul_one]
@[simp] lemma add_order_of_eq_one_iff : add_order_of a = 1 ↔ a = 0 :=
by simp [← order_of_of_add_eq_add_order_of]
attribute [to_additive add_order_of_eq_one_iff] order_of_eq_one_iff
lemma pow_eq_mod_order_of {n : ℕ} : x ^ n = x ^ (n % order_of x) :=
calc x ^ n = x ^ (n % order_of x + order_of x * (n / order_of x)) : by rw [nat.mod_add_div]
... = x ^ (n % order_of x) : by simp [pow_add, pow_mul, pow_order_of_eq_one]
lemma nsmul_eq_mod_add_order_of {n : ℕ} : n • a = (n % add_order_of a) • a :=
begin
apply multiplicative.of_add.injective,
rw [← order_of_of_add_eq_add_order_of, of_add_nsmul, of_add_nsmul, pow_eq_mod_order_of],
end
attribute [to_additive nsmul_eq_mod_add_order_of] pow_eq_mod_order_of
lemma order_of_dvd_of_pow_eq_one (h : x ^ n = 1) : order_of x ∣ n :=
is_periodic_pt.minimal_period_dvd ((is_periodic_pt_mul_iff_pow_eq_one _).mpr h)
lemma add_order_of_dvd_of_nsmul_eq_zero (h : n • a = 0) : add_order_of a ∣ n :=
is_periodic_pt.minimal_period_dvd ((is_periodic_pt_add_iff_nsmul_eq_zero _).mpr h)
attribute [to_additive add_order_of_dvd_of_nsmul_eq_zero] order_of_dvd_of_pow_eq_one
lemma add_order_of_dvd_iff_nsmul_eq_zero {n : ℕ} : add_order_of a ∣ n ↔ n • a = 0 :=
⟨λ h, by rw [nsmul_eq_mod_add_order_of, nat.mod_eq_zero_of_dvd h, zero_nsmul],
add_order_of_dvd_of_nsmul_eq_zero⟩
@[to_additive add_order_of_dvd_iff_nsmul_eq_zero]
lemma order_of_dvd_iff_pow_eq_one {n : ℕ} : order_of x ∣ n ↔ x ^ n = 1 :=
⟨λ h, by rw [pow_eq_mod_order_of, nat.mod_eq_zero_of_dvd h, pow_zero], order_of_dvd_of_pow_eq_one⟩
lemma exists_pow_eq_self_of_coprime (h : n.coprime (order_of x)) :
∃ m : ℕ, (x ^ n) ^ m = x :=
begin
by_cases h0 : order_of x = 0,
{ rw [h0, coprime_zero_right] at h,
exact ⟨1, by rw [h, pow_one, pow_one]⟩ },
by_cases h1 : order_of x = 1,
{ exact ⟨0, by rw [order_of_eq_one_iff.mp h1, one_pow, one_pow]⟩ },
obtain ⟨m, hm⟩ :=
exists_mul_mod_eq_one_of_coprime h (one_lt_iff_ne_zero_and_ne_one.mpr ⟨h0, h1⟩),
exact ⟨m, by rw [←pow_mul, pow_eq_mod_order_of, hm, pow_one]⟩,
end
lemma exists_nsmul_eq_self_of_coprime (a : A)
(h : coprime n (add_order_of a)) : ∃ m : ℕ, m • (n • a) = a :=
begin
change n.coprime (order_of (multiplicative.of_add a)) at h,
exact exists_pow_eq_self_of_coprime h,
end
attribute [to_additive exists_nsmul_eq_self_of_coprime] exists_pow_eq_self_of_coprime
lemma add_order_of_eq_add_order_of_iff {B : Type*} [add_monoid B] {b : B} :
add_order_of a = add_order_of b ↔ ∀ n : ℕ, n • a = 0 ↔ n • b = 0 :=
begin
simp_rw ← add_order_of_dvd_iff_nsmul_eq_zero,
exact ⟨λ h n, by rw h, λ h, nat.dvd_antisymm ((h _).mpr (dvd_refl _)) ((h _).mp (dvd_refl _))⟩,
end
@[to_additive add_order_of_eq_add_order_of_iff]
lemma order_of_eq_order_of_iff {H : Type*} [monoid H] {y : H} :
order_of x = order_of y ↔ ∀ n : ℕ, x ^ n = 1 ↔ y ^ n = 1 :=
by simp_rw [← is_periodic_pt_mul_iff_pow_eq_one, ← minimal_period_eq_minimal_period_iff, order_of]
lemma add_order_of_injective {B : Type*} [add_monoid B] (f : A →+ B)
(hf : function.injective f) (a : A) : add_order_of (f a) = add_order_of a :=
by simp_rw [add_order_of_eq_add_order_of_iff, ←f.map_nsmul, ←f.map_zero, hf.eq_iff, iff_self,
forall_const]
@[to_additive add_order_of_injective]
lemma order_of_injective {H : Type*} [monoid H] (f : G →* H)
(hf : function.injective f) (x : G) : order_of (f x) = order_of x :=
by simp_rw [order_of_eq_order_of_iff, ←f.map_pow, ←f.map_one, hf.eq_iff, iff_self, forall_const]
@[simp, norm_cast, to_additive] lemma order_of_submonoid {H : submonoid G}
(y : H) : order_of (y : G) = order_of y :=
order_of_injective H.subtype subtype.coe_injective y
variables (x)
lemma order_of_pow' (h : n ≠ 0) :
order_of (x ^ n) = order_of x / gcd (order_of x) n :=
begin
convert minimal_period_iterate_eq_div_gcd h,
simp only [order_of, mul_left_iterate],
end
variables (a)
lemma add_order_of_nsmul' (h : n ≠ 0) :
add_order_of (n • a) = add_order_of a / gcd (add_order_of a) n :=
by simpa [← order_of_of_add_eq_add_order_of, of_add_nsmul] using order_of_pow' _ h
attribute [to_additive add_order_of_nsmul'] order_of_pow'
variable (n)
lemma order_of_pow'' (h : is_of_fin_order x) :
order_of (x ^ n) = order_of x / gcd (order_of x) n :=
begin
convert minimal_period_iterate_eq_div_gcd' h,
simp only [order_of, mul_left_iterate],
end
lemma add_order_of_nsmul'' (h : is_of_fin_add_order a) :
add_order_of (n • a) = add_order_of a / gcd (add_order_of a) n :=
by simp [← order_of_of_add_eq_add_order_of, of_add_nsmul,
order_of_pow'' _ n (is_of_fin_order_of_add_iff.mpr h)]
attribute [to_additive add_order_of_nsmul''] order_of_pow''
section p_prime
variables {a x n} {p : ℕ} [hp : fact p.prime]
include hp
lemma add_order_of_eq_prime (hg : p • a = 0) (hg1 : a ≠ 0) : add_order_of a = p :=
minimal_period_eq_prime ((is_periodic_pt_add_iff_nsmul_eq_zero _).mpr hg)
(by rwa [is_fixed_pt, add_zero])
@[to_additive add_order_of_eq_prime]
lemma order_of_eq_prime (hg : x ^ p = 1) (hg1 : x ≠ 1) : order_of x = p :=
minimal_period_eq_prime ((is_periodic_pt_mul_iff_pow_eq_one _).mpr hg)
(by rwa [is_fixed_pt, mul_one])
lemma add_order_of_eq_prime_pow (hnot : ¬ (p ^ n) • a = 0) (hfin : (p ^ (n + 1)) • a = 0) :
add_order_of a = p ^ (n + 1) :=
begin
apply minimal_period_eq_prime_pow;
rwa is_periodic_pt_add_iff_nsmul_eq_zero,
end
@[to_additive add_order_of_eq_prime_pow]
lemma order_of_eq_prime_pow (hnot : ¬ x ^ p ^ n = 1) (hfin : x ^ p ^ (n + 1) = 1) :
order_of x = p ^ (n + 1) :=
begin
apply minimal_period_eq_prime_pow;
rwa is_periodic_pt_mul_iff_pow_eq_one,
end
omit hp
-- An example on how to determine the order of an element of a finite group.
example : order_of (-1 : units ℤ) = 2 :=
begin
haveI : fact (prime 2) := ⟨prime_two⟩,
exact order_of_eq_prime (int.units_mul_self _) dec_trivial,
end
end p_prime
end monoid_add_monoid
section cancel_monoid
variables [left_cancel_monoid G] (x)
variables [add_left_cancel_monoid A] (a)
lemma pow_injective_aux (h : n ≤ m)
(hm : m < order_of x) (eq : x ^ n = x ^ m) : n = m :=
by_contradiction $ assume ne : n ≠ m,
have h₁ : m - n > 0, from nat.pos_of_ne_zero (by simp [nat.sub_eq_iff_eq_add h, ne.symm]),
have h₂ : m = n + (m - n) := (nat.add_sub_of_le h).symm,
have h₃ : x ^ (m - n) = 1,
by { rw [h₂, pow_add] at eq, apply mul_left_cancel, convert eq.symm, exact mul_one (x ^ n) },
have le : order_of x ≤ m - n, from order_of_le_of_pow_eq_one h₁ h₃,
have lt : m - n < order_of x,
from (nat.sub_lt_left_iff_lt_add h).mpr $ nat.lt_add_left _ _ _ hm,
lt_irrefl _ (le.trans_lt lt)
-- TODO: This lemma was originally private, but this doesn't seem to work with `to_additive`,
-- therefore the private got removed.
lemma nsmul_injective_aux {n m : ℕ} (h : n ≤ m)
(hm : m < add_order_of a) (eq : n • a = m • a) : n = m :=
begin
apply_fun multiplicative.of_add at eq,
rw [of_add_nsmul, of_add_nsmul] at eq,
rw ← order_of_of_add_eq_add_order_of at hm,
exact pow_injective_aux (multiplicative.of_add a) h hm eq,
end
attribute [to_additive nsmul_injective_aux] pow_injective_aux
lemma nsmul_injective_of_lt_add_order_of {n m : ℕ}
(hn : n < add_order_of a) (hm : m < add_order_of a) (eq : n • a = m • a) : n = m :=
(le_total n m).elim
(assume h, nsmul_injective_aux a h hm eq)
(assume h, (nsmul_injective_aux a h hn eq.symm).symm)
@[to_additive nsmul_injective_of_lt_add_order_of]
lemma pow_injective_of_lt_order_of
(hn : n < order_of x) (hm : m < order_of x) (eq : x ^ n = x ^ m) : n = m :=
(le_total n m).elim
(assume h, pow_injective_aux x h hm eq)
(assume h, (pow_injective_aux x h hn eq.symm).symm)
end cancel_monoid
section group
variables [group G] [add_group A] {x a} {i : ℤ}
@[simp, norm_cast, to_additive] lemma order_of_subgroup {H : subgroup G}
(y: H) : order_of (y : G) = order_of y :=
order_of_injective H.subtype subtype.coe_injective y
lemma gpow_eq_mod_order_of : x ^ i = x ^ (i % order_of x) :=
calc x ^ i = x ^ (i % order_of x + order_of x * (i / order_of x)) :
by rw [int.mod_add_div]
... = x ^ (i % order_of x) :
by simp [gpow_add, gpow_mul, pow_order_of_eq_one]
lemma gsmul_eq_mod_add_order_of : i • a = (i % add_order_of a) • a :=
begin
apply multiplicative.of_add.injective,
simp [of_add_gsmul, gpow_eq_mod_order_of],
end
attribute [to_additive gsmul_eq_mod_add_order_of] gpow_eq_mod_order_of
end group
section fintype
variables [fintype G] [fintype A]
section finite_monoid
variables [monoid G] [add_monoid A]
open_locale big_operators
lemma sum_card_add_order_of_eq_card_nsmul_eq_zero [decidable_eq A] (hn : 0 < n) :
∑ m in (finset.range n.succ).filter (∣ n), (finset.univ.filter (λ a : A, add_order_of a = m)).card
= (finset.univ.filter (λ a : A, n • a = 0)).card :=
calc ∑ m in (finset.range n.succ).filter (∣ n),
(finset.univ.filter (λ a : A, add_order_of a = m)).card
= _ : (finset.card_bUnion (by { intros, apply finset.disjoint_filter.2, cc })).symm
... = _ : congr_arg finset.card (finset.ext (begin
assume a,
suffices : add_order_of a ≤ n ∧ add_order_of a ∣ n ↔ n • a = 0,
{ simpa [nat.lt_succ_iff], },
exact ⟨λ h, let ⟨m, hm⟩ := h.2 in
by rw [hm, mul_comm, mul_nsmul, add_order_of_nsmul_eq_zero, nsmul_zero],
λ h, ⟨add_order_of_le_of_nsmul_eq_zero hn h, add_order_of_dvd_of_nsmul_eq_zero h⟩⟩
end))
@[to_additive sum_card_add_order_of_eq_card_nsmul_eq_zero]
lemma sum_card_order_of_eq_card_pow_eq_one [decidable_eq G] (hn : 0 < n) :
∑ m in (finset.range n.succ).filter (∣ n), (finset.univ.filter (λ x : G, order_of x = m)).card
= (finset.univ.filter (λ x : G, x ^ n = 1)).card :=
calc ∑ m in (finset.range n.succ).filter (∣ n), (finset.univ.filter (λ x : G, order_of x = m)).card
= _ : (finset.card_bUnion (by { intros, apply finset.disjoint_filter.2, cc })).symm
... = _ : congr_arg finset.card (finset.ext (begin
assume x,
suffices : order_of x ≤ n ∧ order_of x ∣ n ↔ x ^ n = 1,
{ simpa [nat.lt_succ_iff], },
exact ⟨λ h, let ⟨m, hm⟩ := h.2 in by rw [hm, pow_mul, pow_order_of_eq_one, one_pow],
λ h, ⟨order_of_le_of_pow_eq_one hn h, order_of_dvd_of_pow_eq_one h⟩⟩
end))
end finite_monoid
section finite_cancel_monoid
-- TODO: Of course everything also works for right_cancel_monoids.
variables [left_cancel_monoid G] [add_left_cancel_monoid A]
-- TODO: Use this to show that a finite left cancellative monoid is a group.
lemma exists_pow_eq_one (x : G) : is_of_fin_order x :=
begin
refine (is_of_fin_order_iff_pow_eq_one _).mpr _,
obtain ⟨i, j, a_eq, ne⟩ : ∃(i j : ℕ), x ^ i = x ^ j ∧ i ≠ j :=
by simpa only [not_forall, exists_prop] using (not_injective_infinite_fintype (λi:ℕ, x^i)),
wlog h'' : j ≤ i,
refine ⟨i - j, nat.sub_pos_of_lt (lt_of_le_of_ne h'' ne.symm), mul_right_injective (x^j) _⟩,
rw [mul_one, ← pow_add, ← a_eq, nat.add_sub_cancel' h''],
end
lemma exists_nsmul_eq_zero (a : A) : is_of_fin_add_order a :=
begin
rcases exists_pow_eq_one (multiplicative.of_add a) with ⟨i, hi1, hi2⟩,
refine ⟨i, hi1, multiplicative.of_add.injective _⟩,
rw [add_left_iterate, of_add_zero, of_add_eq_one, add_zero],
exact (is_periodic_pt_mul_iff_pow_eq_one (multiplicative.of_add a)).mp hi2,
end
attribute [to_additive exists_nsmul_eq_zero] exists_pow_eq_one
lemma add_order_of_le_card_univ : add_order_of a ≤ fintype.card A :=
finset.le_card_of_inj_on_range (• a)
(assume n _, finset.mem_univ _)
(assume i hi j hj, nsmul_injective_of_lt_add_order_of a hi hj)
@[to_additive add_order_of_le_card_univ]
lemma order_of_le_card_univ : order_of x ≤ fintype.card G :=
finset.le_card_of_inj_on_range ((^) x)
(assume n _, finset.mem_univ _)
(assume i hi j hj, pow_injective_of_lt_order_of x hi hj)
/-- This is the same as `add_order_of_pos' but with one fewer explicit assumption since this is
automatic in case of a finite cancellative additive monoid.-/
lemma add_order_of_pos (a : A) : 0 < add_order_of a := add_order_of_pos' (exists_nsmul_eq_zero _)
/-- This is the same as `order_of_pos' but with one fewer explicit assumption since this is
automatic in case of a finite cancellative monoid.-/
@[to_additive add_order_of_pos]
lemma order_of_pos (x : G) : 0 < order_of x := order_of_pos' (exists_pow_eq_one x)
open nat
/-- This is the same as `add_order_of_nsmul'` and `add_order_of_nsmul` but with one assumption less
which is automatic in the case of a finite cancellative additive monoid. -/
lemma add_order_of_nsmul (a : A) :
add_order_of (n • a) = add_order_of a / gcd (add_order_of a) n :=
add_order_of_nsmul'' _ _ (exists_nsmul_eq_zero _)
/-- This is the same as `order_of_pow'` and `order_of_pow''` but with one assumption less which is
automatic in the case of a finite cancellative monoid.-/
@[to_additive add_order_of_nsmul]
lemma order_of_pow (x : G) :
order_of (x ^ n) = order_of x / gcd (order_of x) n := order_of_pow'' _ _ (exists_pow_eq_one _)
lemma mem_multiples_iff_mem_range_add_order_of [decidable_eq A] :
b ∈ add_submonoid.multiples a ↔
b ∈ (finset.range (add_order_of a)).image ((• a) : ℕ → A) :=
finset.mem_range_iff_mem_finset_range_of_mod_eq' (add_order_of_pos a)
(assume i, nsmul_eq_mod_add_order_of.symm)
@[to_additive mem_multiples_iff_mem_range_add_order_of]
lemma mem_powers_iff_mem_range_order_of [decidable_eq G] :
y ∈ submonoid.powers x ↔ y ∈ (finset.range (order_of x)).image ((^) x : ℕ → G) :=
finset.mem_range_iff_mem_finset_range_of_mod_eq' (order_of_pos x)
(assume i, pow_eq_mod_order_of.symm)
noncomputable instance decidable_multiples [decidable_eq A] :
decidable_pred (add_submonoid.multiples a : set A) :=
begin
assume b,
apply decidable_of_iff' (b ∈ (finset.range (add_order_of a)).image (• a)),
exact mem_multiples_iff_mem_range_add_order_of,
end
@[to_additive decidable_multiples]
noncomputable instance decidable_powers [decidable_eq G] :
decidable_pred (submonoid.powers x : set G) :=
begin
assume y,
apply decidable_of_iff'
(y ∈ (finset.range (order_of x)).image ((^) x)),
exact mem_powers_iff_mem_range_order_of
end
/-- The equivalence between `fin (order_of x)` and `submonoid.powers x`, sending `i` to `x ^ i`. -/
noncomputable def fin_equiv_powers (x : G) :
fin (order_of x) ≃ (submonoid.powers x : set G) :=
equiv.of_bijective (λ n, ⟨x ^ ↑n, ⟨n, rfl⟩⟩) ⟨λ ⟨i, hi⟩ ⟨j, hj⟩ ij,
subtype.mk_eq_mk.2 (pow_injective_of_lt_order_of x hi hj (subtype.mk_eq_mk.1 ij)),
λ ⟨_, i, rfl⟩, ⟨⟨i % order_of x, mod_lt i (order_of_pos x)⟩, subtype.eq pow_eq_mod_order_of.symm⟩⟩
/-- The equivalence between `fin (add_order_of a)` and `add_submonoid.multiples a`,
sending `i` to `i • a`."-/
noncomputable def fin_equiv_multiples (a : A) :
fin (add_order_of a) ≃ (add_submonoid.multiples a : set A) :=
fin_equiv_powers (multiplicative.of_add a)
attribute [to_additive fin_equiv_multiples] fin_equiv_powers
@[simp] lemma fin_equiv_powers_apply {x : G} {n : fin (order_of x)} :
fin_equiv_powers x n = ⟨x ^ ↑n, n, rfl⟩ := rfl
@[simp] lemma fin_equiv_multiples_apply {a : A} {n : fin (add_order_of a)} :
fin_equiv_multiples a n = ⟨nsmul ↑n a, n, rfl⟩ := rfl
attribute [to_additive fin_equiv_multiples_apply] fin_equiv_powers_apply
@[simp] lemma fin_equiv_powers_symm_apply (x : G) (n : ℕ)
{hn : ∃ (m : ℕ), x ^ m = x ^ n} :
((fin_equiv_powers x).symm ⟨x ^ n, hn⟩) = ⟨n % order_of x, nat.mod_lt _ (order_of_pos x)⟩ :=
by rw [equiv.symm_apply_eq, fin_equiv_powers_apply, subtype.mk_eq_mk,
pow_eq_mod_order_of, fin.coe_mk]
@[simp] lemma fin_equiv_multiples_symm_apply (a : A) (n : ℕ)
{hn : ∃ (m : ℕ), m • a = n • a} :
((fin_equiv_multiples a).symm ⟨n • a, hn⟩) =
⟨n % add_order_of a, nat.mod_lt _ (add_order_of_pos a)⟩ :=
fin_equiv_powers_symm_apply (multiplicative.of_add a) n
attribute [to_additive fin_equiv_multiples_symm_apply] fin_equiv_powers_symm_apply
/-- The equivalence between `submonoid.powers` of two elements `x, y` of the same order, mapping
`x ^ i` to `y ^ i`. -/
noncomputable def powers_equiv_powers (h : order_of x = order_of y) :
(submonoid.powers x : set G) ≃ (submonoid.powers y : set G) :=
(fin_equiv_powers x).symm.trans ((fin.cast h).to_equiv.trans (fin_equiv_powers y))
/-- The equivalence between `submonoid.multiples` of two elements `a, b` of the same additive order,
mapping `i • a` to `i • b`. -/
noncomputable def multiples_equiv_multiples (h : add_order_of a = add_order_of b) :
(add_submonoid.multiples a : set A) ≃ (add_submonoid.multiples b : set A) :=
(fin_equiv_multiples a).symm.trans ((fin.cast h).to_equiv.trans (fin_equiv_multiples b))
attribute [to_additive multiples_equiv_multiples] powers_equiv_powers
@[simp]
lemma powers_equiv_powers_apply (h : order_of x = order_of y)
(n : ℕ) : powers_equiv_powers h ⟨x ^ n, n, rfl⟩ = ⟨y ^ n, n, rfl⟩ :=
begin
rw [powers_equiv_powers, equiv.trans_apply, equiv.trans_apply,
fin_equiv_powers_symm_apply, ← equiv.eq_symm_apply, fin_equiv_powers_symm_apply],
simp [h]
end
@[simp]
lemma multiples_equiv_multiples_apply (h : add_order_of a = add_order_of b)
(n : ℕ) : multiples_equiv_multiples h ⟨n • a, n, rfl⟩ = ⟨n • b, n, rfl⟩ :=
powers_equiv_powers_apply h n
attribute [to_additive multiples_equiv_multiples_apply] powers_equiv_powers_apply
lemma order_eq_card_powers [decidable_eq G] :
order_of x = fintype.card (submonoid.powers x : set G) :=
(fintype.card_fin (order_of x)).symm.trans (fintype.card_eq.2 ⟨fin_equiv_powers x⟩)
lemma add_order_of_eq_card_multiples [decidable_eq A] :
add_order_of a = fintype.card (add_submonoid.multiples a : set A) :=
(fintype.card_fin (add_order_of a)).symm.trans (fintype.card_eq.2 ⟨fin_equiv_multiples a⟩)
attribute [to_additive add_order_of_eq_card_multiples] order_eq_card_powers
end finite_cancel_monoid
section finite_group
variables [group G] [add_group A]
lemma exists_gpow_eq_one (x : G) : ∃ (i : ℤ) (H : i ≠ 0), x ^ (i : ℤ) = 1 :=
--lemma exists_gpow_eq_one (a : α) : ∃ (i : ℤ) (H : i ≠ 0), a ^ (i : ℤ) = 1 :=
begin
rcases exists_pow_eq_one x with ⟨w, hw1, hw2⟩,
refine ⟨w, int.coe_nat_ne_zero.mpr (ne_of_gt hw1), _⟩,
rw gpow_coe_nat,
exact (is_periodic_pt_mul_iff_pow_eq_one _).mp hw2,
end
lemma exists_gsmul_eq_zero (a : A) : ∃ (i : ℤ) (H : i ≠ 0), i • a = 0 :=
@exists_gpow_eq_one (multiplicative A) _ _ a
attribute [to_additive] exists_gpow_eq_one
lemma mem_multiples_iff_mem_gmultiples :
b ∈ add_submonoid.multiples a ↔ b ∈ add_subgroup.gmultiples a :=
⟨λ ⟨n, hn⟩, ⟨n, by simp * at *⟩, λ ⟨i, hi⟩, ⟨(i % add_order_of a).nat_abs,
by { simp only [nsmul_eq_smul] at hi ⊢,
rwa [← gsmul_coe_nat,
int.nat_abs_of_nonneg (int.mod_nonneg _ (int.coe_nat_ne_zero_iff_pos.2
(add_order_of_pos a))), ← gsmul_eq_mod_add_order_of] } ⟩⟩
open subgroup
@[to_additive mem_multiples_iff_mem_gmultiples]
lemma mem_powers_iff_mem_gpowers : y ∈ submonoid.powers x ↔ y ∈ gpowers x :=
⟨λ ⟨n, hn⟩, ⟨n, by simp * at *⟩,
λ ⟨i, hi⟩, ⟨(i % order_of x).nat_abs,
by rwa [← gpow_coe_nat, int.nat_abs_of_nonneg (int.mod_nonneg _
(int.coe_nat_ne_zero_iff_pos.2 (order_of_pos x))),
← gpow_eq_mod_order_of]⟩⟩
lemma multiples_eq_gmultiples (a : A) :
(add_submonoid.multiples a : set A) = add_subgroup.gmultiples a :=
set.ext $ λ y, mem_multiples_iff_mem_gmultiples
@[to_additive multiples_eq_gmultiples]
lemma powers_eq_gpowers (x : G) : (submonoid.powers x : set G) = gpowers x :=
set.ext $ λ x, mem_powers_iff_mem_gpowers
lemma mem_gmultiples_iff_mem_range_add_order_of [decidable_eq A] :
b ∈ add_subgroup.gmultiples a ↔ b ∈ (finset.range (add_order_of a)).image (• a) :=
by rw [← mem_multiples_iff_mem_gmultiples, mem_multiples_iff_mem_range_add_order_of]
@[to_additive mem_gmultiples_iff_mem_range_add_order_of]
lemma mem_gpowers_iff_mem_range_order_of [decidable_eq G] :
y ∈ subgroup.gpowers x ↔ y ∈ (finset.range (order_of x)).image ((^) x : ℕ → G) :=
by rw [← mem_powers_iff_mem_gpowers, mem_powers_iff_mem_range_order_of]
noncomputable instance decidable_gmultiples [decidable_eq A] :
decidable_pred (add_subgroup.gmultiples a : set A) :=
begin
rw ← multiples_eq_gmultiples,
exact decidable_multiples,
end
@[to_additive decidable_gmultiples]
noncomputable instance decidable_gpowers [decidable_eq G] :
decidable_pred (subgroup.gpowers x : set G) :=
begin
rw ← powers_eq_gpowers,
exact decidable_powers,
end
/-- The equivalence between `fin (order_of x)` and `subgroup.gpowers x`, sending `i` to `x ^ i`. -/
noncomputable def fin_equiv_gpowers (x : G) :
fin (order_of x) ≃ (subgroup.gpowers x : set G) :=
(fin_equiv_powers x).trans (equiv.set.of_eq (powers_eq_gpowers x))
/-- The equivalence between `fin (add_order_of a)` and `subgroup.gmultiples a`,
sending `i` to `i • a`. -/
noncomputable def fin_equiv_gmultiples (a : A) :
fin (add_order_of a) ≃ (add_subgroup.gmultiples a : set A) :=
fin_equiv_gpowers (multiplicative.of_add a)
attribute [to_additive fin_equiv_gmultiples] fin_equiv_gpowers
@[simp] lemma fin_equiv_gpowers_apply {n : fin (order_of x)} :
fin_equiv_gpowers x n = ⟨x ^ (n : ℕ), n, gpow_coe_nat x n⟩ := rfl
@[simp] lemma fin_equiv_gmultiples_apply {n : fin (add_order_of a)} :
fin_equiv_gmultiples a n = ⟨(n : ℕ) • a, n, gsmul_coe_nat a n⟩ :=
fin_equiv_gpowers_apply
attribute [to_additive fin_equiv_gmultiples_apply] fin_equiv_gpowers_apply
@[simp] lemma fin_equiv_gpowers_symm_apply (x : G) (n : ℕ)
{hn : ∃ (m : ℤ), x ^ m = x ^ n} :
((fin_equiv_gpowers x).symm ⟨x ^ n, hn⟩) = ⟨n % order_of x, nat.mod_lt _ (order_of_pos x)⟩ :=
by { rw [fin_equiv_gpowers, equiv.symm_trans_apply, equiv.set.of_eq_symm_apply],
exact fin_equiv_powers_symm_apply x n }
@[simp] lemma fin_equiv_gmultiples_symm_apply (a : A) (n : ℕ)
{hn : ∃ (m : ℤ), m • a = n • a} :
((fin_equiv_gmultiples a).symm ⟨n • a, hn⟩) =
⟨n % add_order_of a, nat.mod_lt _ (add_order_of_pos a)⟩ :=
fin_equiv_gpowers_symm_apply (multiplicative.of_add a) n
attribute [to_additive fin_equiv_gmultiples_symm_apply] fin_equiv_gpowers_symm_apply
/-- The equivalence between `subgroup.gpowers` of two elements `x, y` of the same order, mapping
`x ^ i` to `y ^ i`. -/
noncomputable def gpowers_equiv_gpowers (h : order_of x = order_of y) :
(subgroup.gpowers x : set G) ≃ (subgroup.gpowers y : set G) :=
(fin_equiv_gpowers x).symm.trans ((fin.cast h).to_equiv.trans (fin_equiv_gpowers y))
/-- The equivalence between `subgroup.gmultiples` of two elements `a, b` of the same additive order,
mapping `i • a` to `i • b`. -/
noncomputable def gmultiples_equiv_gmultiples (h : add_order_of a = add_order_of b) :
(add_subgroup.gmultiples a : set A) ≃ (add_subgroup.gmultiples b : set A) :=
(fin_equiv_gmultiples a).symm.trans ((fin.cast h).to_equiv.trans (fin_equiv_gmultiples b))
attribute [to_additive gmultiples_equiv_gmultiples] gpowers_equiv_gpowers
@[simp]
lemma gpowers_equiv_gpowers_apply (h : order_of x = order_of y)
(n : ℕ) : gpowers_equiv_gpowers h ⟨x ^ n, n, gpow_coe_nat x n⟩ = ⟨y ^ n, n, gpow_coe_nat y n⟩ :=
begin
rw [gpowers_equiv_gpowers, equiv.trans_apply, equiv.trans_apply,
fin_equiv_gpowers_symm_apply, ← equiv.eq_symm_apply, fin_equiv_gpowers_symm_apply],
simp [h]
end
@[simp]
lemma gmultiples_equiv_gmultiples_apply (h : add_order_of a = add_order_of b) (n : ℕ) :
gmultiples_equiv_gmultiples h ⟨n • a, n, gsmul_coe_nat a n⟩ = ⟨n • b, n, gsmul_coe_nat b n⟩ :=
gpowers_equiv_gpowers_apply h n
attribute [to_additive gmultiples_equiv_gmultiples_apply] gpowers_equiv_gpowers_apply
lemma order_eq_card_gpowers [decidable_eq G] :
order_of x = fintype.card (subgroup.gpowers x : set G) :=
(fintype.card_fin (order_of x)).symm.trans (fintype.card_eq.2 ⟨fin_equiv_gpowers x⟩)
lemma add_order_eq_card_gmultiples [decidable_eq A] :
add_order_of a = fintype.card (add_subgroup.gmultiples a : set A) :=
(fintype.card_fin (add_order_of a)).symm.trans (fintype.card_eq.2 ⟨fin_equiv_gmultiples a⟩)
attribute [to_additive add_order_eq_card_gmultiples] order_eq_card_gpowers
open quotient_group
/- TODO: use cardinal theory, introduce `card : set G → ℕ`, or setup decidability for cosets -/
lemma order_of_dvd_card_univ : order_of x ∣ fintype.card G :=
begin
classical,
have ft_prod : fintype (quotient (gpowers x) × (gpowers x)),
from fintype.of_equiv G group_equiv_quotient_times_subgroup,
have ft_s : fintype (gpowers x),
from @fintype.fintype_prod_right _ _ _ ft_prod _,
have ft_cosets : fintype (quotient (gpowers x)),
from @fintype.fintype_prod_left _ _ _ ft_prod ⟨⟨1, (gpowers x).one_mem⟩⟩,
have eq₁ : fintype.card G = @fintype.card _ ft_cosets * @fintype.card _ ft_s,
from calc fintype.card G = @fintype.card _ ft_prod :
@fintype.card_congr _ _ _ ft_prod group_equiv_quotient_times_subgroup
... = @fintype.card _ (@prod.fintype _ _ ft_cosets ft_s) :
congr_arg (@fintype.card _) $ subsingleton.elim _ _
... = @fintype.card _ ft_cosets * @fintype.card _ ft_s :
@fintype.card_prod _ _ ft_cosets ft_s,
have eq₂ : order_of x = @fintype.card _ ft_s,
from calc order_of x = _ : order_eq_card_gpowers
... = _ : congr_arg (@fintype.card _) $ subsingleton.elim _ _,
exact dvd.intro (@fintype.card (quotient (subgroup.gpowers x)) ft_cosets)
(by rw [eq₁, eq₂, mul_comm])
end
lemma add_order_of_dvd_card_univ : add_order_of a ∣ fintype.card A :=
begin
rw ← order_of_of_add_eq_add_order_of,
exact order_of_dvd_card_univ,
end
attribute [to_additive add_order_of_dvd_card_univ] order_of_dvd_card_univ
@[simp] lemma pow_card_eq_one : x ^ fintype.card G = 1 :=
let ⟨m, hm⟩ := @order_of_dvd_card_univ _ x _ _ in
by simp [hm, pow_mul, pow_order_of_eq_one]
@[simp] lemma card_nsmul_eq_zero {a : A} : fintype.card A • a = 0 :=
begin
apply multiplicative.of_add.injective,
rw [of_add_nsmul, of_add_zero],
exact pow_card_eq_one,
end
attribute [to_additive card_nsmul_eq_zero] pow_card_eq_one
variable (a)
lemma image_range_add_order_of [decidable_eq A] :
finset.image (λ i, i • a) (finset.range (add_order_of a)) =
(add_subgroup.gmultiples a : set A).to_finset :=
by {ext x, rw [set.mem_to_finset, set_like.mem_coe, mem_gmultiples_iff_mem_range_add_order_of] }
/-- TODO: Generalise to `submonoid.powers`.-/
@[to_additive image_range_add_order_of]
lemma image_range_order_of [decidable_eq G] :
finset.image (λ i, x ^ i) (finset.range (order_of x)) = (gpowers x : set G).to_finset :=
by { ext x, rw [set.mem_to_finset, set_like.mem_coe, mem_gpowers_iff_mem_range_order_of] }
lemma gcd_nsmul_card_eq_zero_iff : n • a = 0 ↔ (gcd n (fintype.card A)) • a = 0 :=
⟨λ h, gcd_nsmul_eq_zero _ h $ card_nsmul_eq_zero,
λ h, let ⟨m, hm⟩ := gcd_dvd_left n (fintype.card A) in
by rw [hm, mul_comm, mul_nsmul, h, nsmul_zero]⟩
/-- TODO: Generalise to `finite_cancel_monoid`. -/
@[to_additive gcd_nsmul_card_eq_zero_iff]
lemma pow_gcd_card_eq_one_iff : x ^ n = 1 ↔ x ^ (gcd n (fintype.card G)) = 1 :=
⟨λ h, pow_gcd_eq_one _ h $ pow_card_eq_one,
λ h, let ⟨m, hm⟩ := gcd_dvd_left n (fintype.card G) in
by rw [hm, pow_mul, h, one_pow]⟩
end finite_group
end fintype
|
589093de653e99248834b4345434c625c6bc2d62
|
94637389e03c919023691dcd05bd4411b1034aa5
|
/src/zzz_junk/has_one/has_one_nat.lean
|
40d93262272810cf18fcdabd8f849976aacb0364
|
[] |
no_license
|
kevinsullivan/complogic-s21
|
7c4eef2105abad899e46502270d9829d913e8afc
|
99039501b770248c8ceb39890be5dfe129dc1082
|
refs/heads/master
| 1,682,985,669,944
| 1,621,126,241,000
| 1,621,126,241,000
| 335,706,272
| 0
| 38
| null | 1,618,325,669,000
| 1,612,374,118,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 558
|
lean
|
import .has_one
namespace hidden
-- Overload has_mul_ident for nat
instance has_one_nat : has_one nat := ⟨ 1, sorry, sorry ⟩
/-
Again this code would typically go in the module
that defines the nat type, as here we define an
implementation of ident_nat for this type.
-/
/-
Now anywhere else in a code base that we need a nat,
we can use typeclass inference to find the instance,
n, of the has_mul_id typeclass for the nat type and
use it to return a satisfactory answer.
-/
def getMeANat [n : has_one nat] : nat := n.one
#eval getMeANat
end hidden
|
161cd480c89b695bd79f83ee8e50d3304f588e2c
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/data/buffer/parser/basic.lean
|
4c9b11621f64bf8bebf4b411e626e7ee1c22d9aa
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 110,006
|
lean
|
/-
Copyright (c) 2020 Yakov Pechersky. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yakov Pechersky
-/
import data.string.basic
import data.buffer.basic
import data.nat.digits
import data.buffer.parser
/-!
# Parsers
`parser α` is the type that describes a computation that can ingest a `char_buffer`
and output, if successful, a term of type `α`.
This file expands on the definitions in the core library, proving that all the core library
parsers are `mono`. There are also lemmas on the composability of parsers.
## Main definitions
* `parse_result.pos` : The position of a `char_buffer` at which a `parser α` has finished.
* `parser.mono` : The property that a parser only moves forward within a buffer,
in both cases of success or failure.
## Implementation details
Lemmas about how parsers are mono are in the `mono` namespace. That allows using projection
notation for shorter term proofs that are parallel to the definitions of the parsers in structure.
-/
open parser parse_result
/--
For some `parse_result α`, give the position at which the result was provided, in either the
`done` or the `fail` case.
-/
@[simp] def parse_result.pos {α} : parse_result α → ℕ
| (done n _) := n
| (fail n _) := n
namespace parser
section defn_lemmas
variables {α β : Type} (msgs : thunk (list string)) (msg : thunk string)
variables (p q : parser α) (cb : char_buffer) (n n' : ℕ) {err : dlist string}
variables {a : α} {b : β}
/--
A `p : parser α` is defined to be `mono` if the result `p cb n` it gives,
for some `cb : char_buffer` and `n : ℕ`, (whether `done` or `fail`),
is always at a `parse_result.pos` that is at least `n`.
The `mono` property is used mainly for proper `orelse` behavior.
-/
class mono : Prop :=
(le' : ∀ (cb : char_buffer) (n : ℕ), n ≤ (p cb n).pos)
lemma mono.le [p.mono] : n ≤ (p cb n).pos := mono.le' cb n
/--
A `parser α` is defined to be `static` if it does not move on success.
-/
class static : Prop :=
(of_done : ∀ {cb : char_buffer} {n n' : ℕ} {a : α}, p cb n = done n' a → n = n')
/--
A `parser α` is defined to be `err_static` if it does not move on error.
-/
class err_static : Prop :=
(of_fail : ∀ {cb : char_buffer} {n n' : ℕ} {err : dlist string}, p cb n = fail n' err → n = n')
/--
A `parser α` is defined to be `step` if it always moves exactly one char forward on success.
-/
class step : Prop :=
(of_done : ∀ {cb : char_buffer} {n n' : ℕ} {a : α}, p cb n = done n' a → n' = n + 1)
/--
A `parser α` is defined to be `prog` if it always moves forward on success.
-/
class prog : Prop :=
(of_done : ∀ {cb : char_buffer} {n n' : ℕ} {a : α}, p cb n = done n' a → n < n')
/--
A `parser a` is defined to be `bounded` if it produces a
`fail` `parse_result` when it is parsing outside the provided `char_buffer`.
-/
class bounded : Prop :=
(ex' : ∀ {cb : char_buffer} {n : ℕ}, cb.size ≤ n → ∃ (n' : ℕ) (err : dlist string),
p cb n = fail n' err)
lemma bounded.exists (p : parser α) [p.bounded] {cb : char_buffer} {n : ℕ} (h : cb.size ≤ n) :
∃ (n' : ℕ) (err : dlist string), p cb n = fail n' err :=
bounded.ex' h
/--
A `parser a` is defined to be `unfailing` if it always produces a `done` `parse_result`.
-/
class unfailing : Prop :=
(ex' : ∀ (cb : char_buffer) (n : ℕ), ∃ (n' : ℕ) (a : α), p cb n = done n' a)
/--
A `parser a` is defined to be `conditionally_unfailing` if it produces a
`done` `parse_result` as long as it is parsing within the provided `char_buffer`.
-/
class conditionally_unfailing : Prop :=
(ex' : ∀ {cb : char_buffer} {n : ℕ}, n < cb.size → ∃ (n' : ℕ) (a : α), p cb n = done n' a)
lemma fail_iff :
(∀ pos' result, p cb n ≠ done pos' result) ↔
∃ (pos' : ℕ) (err : dlist string), p cb n = fail pos' err :=
by cases p cb n; simp
lemma success_iff :
(∀ pos' err, p cb n ≠ fail pos' err) ↔ ∃ (pos' : ℕ) (result : α), p cb n = done pos' result :=
by cases p cb n; simp
variables {p q cb n n' msgs msg}
lemma mono.of_done [p.mono] (h : p cb n = done n' a) : n ≤ n' :=
by simpa [h] using mono.le p cb n
lemma mono.of_fail [p.mono] (h : p cb n = fail n' err) : n ≤ n' :=
by simpa [h] using mono.le p cb n
lemma bounded.of_done [p.bounded] (h : p cb n = done n' a) : n < cb.size :=
begin
contrapose! h,
obtain ⟨np, err, hp⟩ := bounded.exists p h,
simp [hp]
end
lemma static.iff :
static p ↔ (∀ (cb : char_buffer) (n n' : ℕ) (a : α), p cb n = done n' a → n = n') :=
⟨λ h _ _ _ _ hp, by { haveI := h, exact static.of_done hp}, λ h, ⟨h⟩⟩
lemma exists_done (p : parser α) [p.unfailing] (cb : char_buffer) (n : ℕ) :
∃ (n' : ℕ) (a : α), p cb n = done n' a :=
unfailing.ex' cb n
lemma unfailing.of_fail [p.unfailing] (h : p cb n = fail n' err) : false :=
begin
obtain ⟨np, a, hp⟩ := p.exists_done cb n,
simpa [hp] using h
end
@[priority 100] -- see Note [lower instance priority]
instance conditionally_unfailing_of_unfailing [p.unfailing] : conditionally_unfailing p :=
⟨λ _ _ _, p.exists_done _ _⟩
lemma exists_done_in_bounds (p : parser α) [p.conditionally_unfailing] {cb : char_buffer} {n : ℕ}
(h : n < cb.size) : ∃ (n' : ℕ) (a : α), p cb n = done n' a :=
conditionally_unfailing.ex' h
lemma conditionally_unfailing.of_fail [p.conditionally_unfailing] (h : p cb n = fail n' err)
(hn : n < cb.size) : false :=
begin
obtain ⟨np, a, hp⟩ := p.exists_done_in_bounds hn,
simpa [hp] using h
end
lemma decorate_errors_fail (h : p cb n = fail n' err) :
@decorate_errors α msgs p cb n = fail n ((dlist.lazy_of_list (msgs ()))) :=
by simp [decorate_errors, h]
lemma decorate_errors_success (h : p cb n = done n' a) :
@decorate_errors α msgs p cb n = done n' a :=
by simp [decorate_errors, h]
lemma decorate_error_fail (h : p cb n = fail n' err) :
@decorate_error α msg p cb n = fail n ((dlist.lazy_of_list ([msg ()]))) :=
decorate_errors_fail h
lemma decorate_error_success (h : p cb n = done n' a) :
@decorate_error α msg p cb n = done n' a :=
decorate_errors_success h
@[simp] lemma decorate_errors_eq_done :
@decorate_errors α msgs p cb n = done n' a ↔ p cb n = done n' a :=
by cases h : p cb n; simp [decorate_errors, h]
@[simp] lemma decorate_error_eq_done :
@decorate_error α msg p cb n = done n' a ↔ p cb n = done n' a :=
decorate_errors_eq_done
@[simp] lemma decorate_errors_eq_fail :
@decorate_errors α msgs p cb n = fail n' err ↔
n = n' ∧ err = dlist.lazy_of_list (msgs ()) ∧ ∃ np err', p cb n = fail np err' :=
by cases h : p cb n; simp [decorate_errors, h, eq_comm]
@[simp] lemma decorate_error_eq_fail :
@decorate_error α msg p cb n = fail n' err ↔
n = n' ∧ err = dlist.lazy_of_list ([msg ()]) ∧ ∃ np err', p cb n = fail np err' :=
decorate_errors_eq_fail
@[simp] lemma return_eq_pure : (@return parser _ _ a) = pure a := rfl
lemma pure_eq_done : (@pure parser _ _ a) = λ _ n, done n a := rfl
@[simp] lemma pure_ne_fail : (pure a : parser α) cb n ≠ fail n' err := by simp [pure_eq_done]
section bind
variable (f : α → parser β)
@[simp] lemma bind_eq_bind : p.bind f = p >>= f := rfl
variable {f}
@[simp] lemma bind_eq_done :
(p >>= f) cb n = done n' b ↔
∃ (np : ℕ) (a : α), p cb n = done np a ∧ f a cb np = done n' b :=
by cases hp : p cb n; simp [hp, ←bind_eq_bind, parser.bind, and_assoc]
@[simp] lemma bind_eq_fail :
(p >>= f) cb n = fail n' err ↔
(p cb n = fail n' err) ∨ (∃ (np : ℕ) (a : α), p cb n = done np a ∧ f a cb np = fail n' err) :=
by cases hp : p cb n; simp [hp, ←bind_eq_bind, parser.bind, and_assoc]
@[simp] lemma and_then_eq_bind {α β : Type} {m : Type → Type} [monad m] (a : m α) (b : m β) :
a >> b = a >>= (λ _, b) := rfl
lemma and_then_fail :
(p >> return ()) cb n = parse_result.fail n' err ↔ p cb n = fail n' err :=
by simp [pure_eq_done]
lemma and_then_success :
(p >> return ()) cb n = parse_result.done n' () ↔ ∃ a, p cb n = done n' a:=
by simp [pure_eq_done]
end bind
section map
variable {f : α → β}
@[simp] lemma map_eq_done : (f <$> p) cb n = done n' b ↔
∃ (a : α), p cb n = done n' a ∧ f a = b :=
by cases hp : p cb n; simp [←is_lawful_monad.bind_pure_comp_eq_map, hp, and_assoc, pure_eq_done]
@[simp] lemma map_eq_fail : (f <$> p) cb n = fail n' err ↔ p cb n = fail n' err :=
by simp [←bind_pure_comp_eq_map, pure_eq_done]
@[simp] lemma map_const_eq_done {b'} : (b <$ p) cb n = done n' b' ↔
∃ (a : α), p cb n = done n' a ∧ b = b' :=
by simp [map_const_eq]
@[simp] lemma map_const_eq_fail : (b <$ p) cb n = fail n' err ↔ p cb n = fail n' err :=
by simp only [map_const_eq, map_eq_fail]
lemma map_const_rev_eq_done {b'} : (p $> b) cb n = done n' b' ↔
∃ (a : α), p cb n = done n' a ∧ b = b' :=
map_const_eq_done
lemma map_rev_const_eq_fail : (p $> b) cb n = fail n' err ↔ p cb n = fail n' err :=
map_const_eq_fail
end map
@[simp] lemma orelse_eq_orelse : p.orelse q = (p <|> q) := rfl
@[simp] lemma orelse_eq_done : (p <|> q) cb n = done n' a ↔
(p cb n = done n' a ∨ (q cb n = done n' a ∧ ∃ err, p cb n = fail n err)) :=
begin
cases hp : p cb n with np resp np errp,
{ simp [hp, ←orelse_eq_orelse, parser.orelse] },
{ by_cases hn : np = n,
{ cases hq : q cb n with nq resq nq errq,
{ simp [hp, hn, hq, ←orelse_eq_orelse, parser.orelse] },
{ rcases lt_trichotomy nq n with H|rfl|H;
simp [hp, hn, hq, H, not_lt_of_lt H, lt_irrefl, ←orelse_eq_orelse, parser.orelse] <|>
simp [hp, hn, hq, lt_irrefl, ←orelse_eq_orelse, parser.orelse] } },
{ simp [hp, hn, ←orelse_eq_orelse, parser.orelse] } }
end
@[simp] lemma orelse_eq_fail_eq : (p <|> q) cb n = fail n err ↔
(p cb n = fail n err ∧ ∃ (nq errq), n < nq ∧ q cb n = fail nq errq) ∨
(∃ (errp errq), p cb n = fail n errp ∧ q cb n = fail n errq ∧ errp ++ errq = err)
:=
begin
cases hp : p cb n with np resp np errp,
{ simp [hp, ←orelse_eq_orelse, parser.orelse] },
{ by_cases hn : np = n,
{ cases hq : q cb n with nq resq nq errq,
{ simp [hp, hn, hq, ←orelse_eq_orelse, parser.orelse] },
{ rcases lt_trichotomy nq n with H|rfl|H;
simp [hp, hq, hn, ←orelse_eq_orelse, parser.orelse, H,
ne_of_gt H, ne_of_lt H, not_lt_of_lt H] <|>
simp [hp, hq, hn, ←orelse_eq_orelse, parser.orelse, lt_irrefl] } },
{ simp [hp, hn, ←orelse_eq_orelse, parser.orelse] } }
end
lemma orelse_eq_fail_not_mono_lt (hn : n' < n) : (p <|> q) cb n = fail n' err ↔
(p cb n = fail n' err) ∨
(q cb n = fail n' err ∧ (∃ (errp), p cb n = fail n errp)) :=
begin
cases hp : p cb n with np resp np errp,
{ simp [hp, ←orelse_eq_orelse, parser.orelse] },
{ by_cases h : np = n,
{ cases hq : q cb n with nq resq nq errq,
{ simp [hp, h, hn, hq, ne_of_gt hn, ←orelse_eq_orelse, parser.orelse] },
{ rcases lt_trichotomy nq n with H|H|H,
{ simp [hp, hq, h, H, ne_of_gt hn, not_lt_of_lt H, ←orelse_eq_orelse, parser.orelse] },
{ simp [hp, hq, h, H, ne_of_gt hn, lt_irrefl, ←orelse_eq_orelse, parser.orelse] },
{ simp [hp, hq, h, H, ne_of_gt (hn.trans H), ←orelse_eq_orelse, parser.orelse] } } },
{ simp [hp, h, ←orelse_eq_orelse, parser.orelse] } }
end
lemma orelse_eq_fail_of_mono_ne [q.mono] (hn : n ≠ n') :
(p <|> q) cb n = fail n' err ↔ p cb n = fail n' err :=
begin
cases hp : p cb n with np resp np errp,
{ simp [hp, ←orelse_eq_orelse, parser.orelse] },
{ by_cases h : np = n,
{ cases hq : q cb n with nq resq nq errq,
{ simp [hp, h, hn, hq, hn, ←orelse_eq_orelse, parser.orelse] },
{ have : n ≤ nq := mono.of_fail hq,
rcases eq_or_lt_of_le this with rfl|H,
{ simp [hp, hq, h, hn, lt_irrefl, ←orelse_eq_orelse, parser.orelse] },
{ simp [hp, hq, h, hn, H, ←orelse_eq_orelse, parser.orelse] } } },
{ simp [hp, h, ←orelse_eq_orelse, parser.orelse] } },
end
@[simp] lemma failure_eq_failure : @parser.failure α = failure := rfl
@[simp] lemma failure_def : (failure : parser α) cb n = fail n dlist.empty := rfl
lemma not_failure_eq_done : ¬ (failure : parser α) cb n = done n' a :=
by simp
lemma failure_eq_fail : (failure : parser α) cb n = fail n' err ↔ n = n' ∧ err = dlist.empty :=
by simp [eq_comm]
lemma seq_eq_done {f : parser (α → β)} {p : parser α} : (f <*> p) cb n = done n' b ↔
∃ (nf : ℕ) (f' : α → β) (a : α), f cb n = done nf f' ∧ p cb nf = done n' a ∧ f' a = b :=
by simp [seq_eq_bind_map]
lemma seq_eq_fail {f : parser (α → β)} {p : parser α} : (f <*> p) cb n = fail n' err ↔
(f cb n = fail n' err) ∨ (∃ (nf : ℕ) (f' : α → β), f cb n = done nf f' ∧ p cb nf = fail n' err) :=
by simp [seq_eq_bind_map]
lemma seq_left_eq_done {p : parser α} {q : parser β} : (p <* q) cb n = done n' a ↔
∃ (np : ℕ) (b : β), p cb n = done np a ∧ q cb np = done n' b :=
begin
have : ∀ (p q : ℕ → α → Prop),
(∃ (np : ℕ) (x : α), p np x ∧ q np x ∧ x = a) ↔ ∃ (np : ℕ), p np a ∧ q np a :=
λ _ _, ⟨λ ⟨np, x, hp, hq, rfl⟩, ⟨np, hp, hq⟩, λ ⟨np, hp, hq⟩, ⟨np, a, hp, hq, rfl⟩⟩,
simp [seq_left_eq, seq_eq_done, map_eq_done, this]
end
lemma seq_left_eq_fail {p : parser α} {q : parser β} : (p <* q) cb n = fail n' err ↔
(p cb n = fail n' err) ∨ (∃ (np : ℕ) (a : α), p cb n = done np a ∧ q cb np = fail n' err) :=
by simp [seq_left_eq, seq_eq_fail]
lemma seq_right_eq_done {p : parser α} {q : parser β} : (p *> q) cb n = done n' b ↔
∃ (np : ℕ) (a : α), p cb n = done np a ∧ q cb np = done n' b :=
by simp [seq_right_eq, seq_eq_done, map_eq_done, and.comm, and.assoc]
lemma seq_right_eq_fail {p : parser α} {q : parser β} : (p *> q) cb n = fail n' err ↔
(p cb n = fail n' err) ∨ (∃ (np : ℕ) (a : α), p cb n = done np a ∧ q cb np = fail n' err) :=
by simp [seq_right_eq, seq_eq_fail]
lemma mmap_eq_done {f : α → parser β} {a : α} {l : list α} {b : β} {l' : list β} :
(a :: l).mmap f cb n = done n' (b :: l') ↔
∃ (np : ℕ), f a cb n = done np b ∧ l.mmap f cb np = done n' l' :=
by simp [mmap, and.comm, and.assoc, and.left_comm, pure_eq_done]
lemma mmap'_eq_done {f : α → parser β} {a : α} {l : list α} :
(a :: l).mmap' f cb n = done n' () ↔
∃ (np : ℕ) (b : β), f a cb n = done np b ∧ l.mmap' f cb np = done n' () :=
by simp [mmap']
lemma guard_eq_done {p : Prop} [decidable p] {u : unit} :
@guard parser _ p _ cb n = done n' u ↔ p ∧ n = n' :=
by { by_cases hp : p; simp [guard, hp, pure_eq_done] }
lemma guard_eq_fail {p : Prop} [decidable p] :
@guard parser _ p _ cb n = fail n' err ↔ (¬ p) ∧ n = n' ∧ err = dlist.empty :=
by { by_cases hp : p; simp [guard, hp, eq_comm, pure_eq_done] }
namespace mono
variables {sep : parser unit}
instance pure : mono (pure a) :=
⟨λ _ _, by simp [pure_eq_done]⟩
instance bind {f : α → parser β} [p.mono] [∀ a, (f a).mono] :
(p >>= f).mono :=
begin
constructor,
intros cb n,
cases hx : (p >>= f) cb n,
{ obtain ⟨n', a, h, h'⟩ := bind_eq_done.mp hx,
refine le_trans (of_done h) _,
simpa [h'] using of_done h' },
{ obtain h | ⟨n', a, h, h'⟩ := bind_eq_fail.mp hx,
{ simpa [h] using of_fail h },
{ refine le_trans (of_done h) _,
simpa [h'] using of_fail h' } }
end
instance and_then {q : parser β} [p.mono] [q.mono] : (p >> q).mono := mono.bind
instance map [p.mono] {f : α → β} : (f <$> p).mono := mono.bind
instance seq {f : parser (α → β)} [f.mono] [p.mono] : (f <*> p).mono := mono.bind
instance mmap : Π {l : list α} {f : α → parser β} [∀ a ∈ l, (f a).mono],
(l.mmap f).mono
| [] _ _ := mono.pure
| (a :: l) f h := begin
convert mono.bind,
{ exact h _ (list.mem_cons_self _ _) },
{ intro,
convert mono.map,
convert mmap,
exact (λ _ ha, h _ (list.mem_cons_of_mem _ ha)) }
end
instance mmap' : Π {l : list α} {f : α → parser β} [∀ a ∈ l, (f a).mono],
(l.mmap' f).mono
| [] _ _ := mono.pure
| (a :: l) f h := begin
convert mono.and_then,
{ exact h _ (list.mem_cons_self _ _) },
{ convert mmap',
exact (λ _ ha, h _ (list.mem_cons_of_mem _ ha)) }
end
instance failure : (failure : parser α).mono :=
⟨by simp [le_refl]⟩
instance guard {p : Prop} [decidable p] : mono (guard p) :=
⟨by { by_cases h : p; simp [h, pure_eq_done, le_refl] }⟩
instance orelse [p.mono] [q.mono] : (p <|> q).mono :=
begin
constructor,
intros cb n,
cases hx : (p <|> q) cb n with posx resx posx errx,
{ obtain h | ⟨h, -, -⟩ := orelse_eq_done.mp hx;
simpa [h] using of_done h },
{ by_cases h : n = posx,
{ simp [hx, h] },
{ simp only [orelse_eq_fail_of_mono_ne h] at hx,
exact of_fail hx } }
end
instance decorate_errors [p.mono] :
(@decorate_errors α msgs p).mono :=
begin
constructor,
intros cb n,
cases h : p cb n,
{ simpa [decorate_errors, h] using of_done h },
{ simp [decorate_errors, h] }
end
instance decorate_error [p.mono] : (@decorate_error α msg p).mono :=
mono.decorate_errors
instance any_char : mono any_char :=
begin
constructor,
intros cb n,
by_cases h : n < cb.size;
simp [any_char, h],
end
instance sat {p : char → Prop} [decidable_pred p] : mono (sat p) :=
begin
constructor,
intros cb n,
simp only [sat],
split_ifs;
simp
end
instance eps : mono eps := mono.pure
instance ch {c : char} : mono (ch c) := mono.decorate_error
instance char_buf {s : char_buffer} : mono (char_buf s) :=
mono.decorate_error
instance one_of {cs : list char} : (one_of cs).mono :=
mono.decorate_errors
instance one_of' {cs : list char} : (one_of' cs).mono :=
mono.and_then
instance str {s : string} : (str s).mono :=
mono.decorate_error
instance remaining : remaining.mono :=
⟨λ _ _, le_rfl⟩
instance eof : eof.mono :=
mono.decorate_error
instance foldr_core {f : α → β → β} {b : β} [p.mono] :
∀ {reps : ℕ}, (foldr_core f p b reps).mono
| 0 := mono.failure
| (reps + 1) := begin
convert mono.orelse,
{ convert mono.bind,
{ apply_instance },
{ exact λ _, @mono.bind _ _ _ _ foldr_core _ } },
{ exact mono.pure }
end
instance foldr {f : α → β → β} [p.mono] : mono (foldr f p b) :=
⟨λ _ _, by { convert mono.le (foldr_core f p b _) _ _, exact mono.foldr_core }⟩
instance foldl_core {f : α → β → α} {p : parser β} [p.mono] :
∀ {a : α} {reps : ℕ}, (foldl_core f a p reps).mono
| _ 0 := mono.failure
| _ (reps + 1) := begin
convert mono.orelse,
{ convert mono.bind,
{ apply_instance },
{ exact λ _, foldl_core } },
{ exact mono.pure }
end
instance foldl {f : α → β → α} {p : parser β} [p.mono] : mono (foldl f a p) :=
⟨λ _ _, by { convert mono.le (foldl_core f a p _) _ _, exact mono.foldl_core }⟩
instance many [p.mono] : p.many.mono :=
mono.foldr
instance many_char {p : parser char} [p.mono] : p.many_char.mono :=
mono.map
instance many' [p.mono] : p.many'.mono :=
mono.and_then
instance many1 [p.mono] : p.many1.mono :=
mono.seq
instance many_char1 {p : parser char} [p.mono] : p.many_char1.mono :=
mono.map
instance sep_by1 [p.mono] [sep.mono] : mono (sep_by1 sep p) :=
mono.seq
instance sep_by [p.mono] [hs : sep.mono] : mono (sep_by sep p) :=
mono.orelse
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.mono → (F p).mono) :
∀ (max_depth : ℕ), mono (fix_core F max_depth)
| 0 := mono.failure
| (max_depth + 1) := hF _ (fix_core _)
instance digit : digit.mono :=
mono.decorate_error
instance nat : nat.mono :=
mono.decorate_error
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.mono → (F p).mono) :
mono (fix F) :=
⟨λ _ _, by { convert mono.le (parser.fix_core F _) _ _, exact fix_core hF _ }⟩
end mono
@[simp] lemma orelse_pure_eq_fail : (p <|> pure a) cb n = fail n' err ↔
p cb n = fail n' err ∧ n ≠ n' :=
begin
by_cases hn : n = n',
{ simp [hn, pure_eq_done] },
{ simp [orelse_eq_fail_of_mono_ne, hn] }
end
end defn_lemmas
section done
variables {α β : Type} {cb : char_buffer} {n n' : ℕ} {a a' : α} {b : β} {c : char} {u : unit}
{err : dlist string}
lemma any_char_eq_done : any_char cb n = done n' c ↔
∃ (hn : n < cb.size), n' = n + 1 ∧ cb.read ⟨n, hn⟩ = c :=
begin
simp_rw [any_char],
split_ifs with h;
simp [h, eq_comm]
end
lemma any_char_eq_fail : any_char cb n = fail n' err ↔ n = n' ∧ err = dlist.empty ∧ cb.size ≤ n :=
begin
simp_rw [any_char],
split_ifs with h;
simp [←not_lt, h, eq_comm]
end
lemma sat_eq_done {p : char → Prop} [decidable_pred p] : sat p cb n = done n' c ↔
∃ (hn : n < cb.size), p c ∧ n' = n + 1 ∧ cb.read ⟨n, hn⟩ = c :=
begin
by_cases hn : n < cb.size,
{ by_cases hp : p (cb.read ⟨n, hn⟩),
{ simp only [sat, hn, hp, dif_pos, if_true, exists_prop_of_true],
split,
{ rintro ⟨rfl, rfl⟩, simp [hp] },
{ rintro ⟨-, rfl, rfl⟩, simp } },
{ simp only [sat, hn, hp, dif_pos, false_iff, not_and, exists_prop_of_true, if_false],
rintro H - rfl,
exact hp H } },
{ simp [sat, hn] }
end
lemma sat_eq_fail {p : char → Prop} [decidable_pred p] : sat p cb n = fail n' err ↔
n = n' ∧ err = dlist.empty ∧ ∀ (h : n < cb.size), ¬ p (cb.read ⟨n, h⟩) :=
begin
dsimp only [sat],
split_ifs;
simp [*, eq_comm]
end
lemma eps_eq_done : eps cb n = done n' u ↔ n = n' := by simp [eps, pure_eq_done]
lemma ch_eq_done : ch c cb n = done n' u ↔ ∃ (hn : n < cb.size), n' = n + 1 ∧ cb.read ⟨n, hn⟩ = c :=
by simp [ch, eps_eq_done, sat_eq_done, and.comm, @eq_comm _ n']
lemma char_buf_eq_done {cb' : char_buffer} : char_buf cb' cb n = done n' u ↔
n + cb'.size = n' ∧ cb'.to_list <+: (cb.to_list.drop n) :=
begin
simp only [char_buf, decorate_error_eq_done, ne.def, ←buffer.length_to_list],
induction cb'.to_list with hd tl hl generalizing cb n n',
{ simp [pure_eq_done, mmap'_eq_done, -buffer.length_to_list, list.nil_prefix] },
{ simp only [ch_eq_done, and.comm, and.assoc, and.left_comm, hl, mmap', and_then_eq_bind,
bind_eq_done, list.length, exists_and_distrib_left, exists_const],
split,
{ rintro ⟨np, h, rfl, rfl, hn, rfl⟩,
simp only [add_comm, add_left_comm, h, true_and, eq_self_iff_true, and_true],
have : n < cb.to_list.length := by simpa using hn,
rwa [←buffer.nth_le_to_list _ this, ←list.cons_nth_le_drop_succ this, list.prefix_cons_inj] },
{ rintro ⟨h, rfl⟩,
by_cases hn : n < cb.size,
{ have : n < cb.to_list.length := by simpa using hn,
rw [←list.cons_nth_le_drop_succ this, list.cons_prefix_iff] at h,
use [n + 1, h.right],
simpa [buffer.nth_le_to_list, add_comm, add_left_comm, add_assoc, hn] using h.left.symm },
{ have : cb.to_list.length ≤ n := by simpa using hn,
rw list.drop_eq_nil_of_le this at h,
simpa using h } } }
end
lemma one_of_eq_done {cs : list char} : one_of cs cb n = done n' c ↔
∃ (hn : n < cb.size), c ∈ cs ∧ n' = n + 1 ∧ cb.read ⟨n, hn⟩ = c :=
by simp [one_of, sat_eq_done]
lemma one_of'_eq_done {cs : list char} : one_of' cs cb n = done n' u ↔
∃ (hn : n < cb.size), cb.read ⟨n, hn⟩ ∈ cs ∧ n' = n + 1 :=
begin
simp only [one_of', one_of_eq_done, eps_eq_done, and.comm, and_then_eq_bind, bind_eq_done,
exists_eq_left, exists_and_distrib_left],
split,
{ rintro ⟨c, hc, rfl, hn, rfl⟩,
exact ⟨rfl, hn, hc⟩ },
{ rintro ⟨rfl, hn, hc⟩,
exact ⟨cb.read ⟨n, hn⟩, hc, rfl, hn, rfl⟩ }
end
lemma str_eq_char_buf (s : string) : str s = char_buf s.to_list.to_buffer :=
begin
ext cb n,
rw [str, char_buf],
congr,
{ simp [buffer.to_string, string.as_string_inv_to_list] },
{ simp }
end
lemma str_eq_done {s : string} : str s cb n = done n' u ↔
n + s.length = n' ∧ s.to_list <+: (cb.to_list.drop n) :=
by simp [str_eq_char_buf, char_buf_eq_done]
lemma remaining_eq_done {r : ℕ} : remaining cb n = done n' r ↔ n = n' ∧ cb.size - n = r :=
by simp [remaining]
lemma remaining_ne_fail : remaining cb n ≠ fail n' err :=
by simp [remaining]
lemma eof_eq_done {u : unit} : eof cb n = done n' u ↔ n = n' ∧ cb.size ≤ n :=
by simp [eof, guard_eq_done, remaining_eq_done, tsub_eq_zero_iff_le, and_comm, and_assoc]
@[simp] lemma foldr_core_zero_eq_done {f : α → β → β} {p : parser α} {b' : β} :
foldr_core f p b 0 cb n ≠ done n' b' :=
by simp [foldr_core]
lemma foldr_core_eq_done {f : α → β → β} {p : parser α} {reps : ℕ} {b' : β} :
foldr_core f p b (reps + 1) cb n = done n' b' ↔
(∃ (np : ℕ) (a : α) (xs : β), p cb n = done np a ∧ foldr_core f p b reps cb np = done n' xs
∧ f a xs = b') ∨
(n = n' ∧ b = b' ∧ ∃ (err), (p cb n = fail n err) ∨
(∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldr_core f p b reps cb np = fail n err)) :=
by simp [foldr_core, and.comm, and.assoc, pure_eq_done]
@[simp] lemma foldr_core_zero_eq_fail {f : α → β → β} {p : parser α} {err : dlist string} :
foldr_core f p b 0 cb n = fail n' err ↔ n = n' ∧ err = dlist.empty :=
by simp [foldr_core, eq_comm]
lemma foldr_core_succ_eq_fail {f : α → β → β} {p : parser α} {reps : ℕ} {err : dlist string} :
foldr_core f p b (reps + 1) cb n = fail n' err ↔ n ≠ n' ∧
(p cb n = fail n' err ∨
∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldr_core f p b reps cb np = fail n' err) :=
by simp [foldr_core, and_comm]
lemma foldr_eq_done {f : α → β → β} {p : parser α} {b' : β} :
foldr f p b cb n = done n' b' ↔
((∃ (np : ℕ) (a : α) (x : β), p cb n = done np a ∧
foldr_core f p b (cb.size - n) cb np = done n' x ∧ f a x = b') ∨
(n = n' ∧ b = b' ∧ (∃ (err), p cb n = parse_result.fail n err ∨
∃ (np : ℕ) (x : α), p cb n = done np x ∧ foldr_core f p b (cb.size - n) cb np = fail n err))) :=
by simp [foldr, foldr_core_eq_done]
lemma foldr_eq_fail_iff_mono_at_end {f : α → β → β} {p : parser α} {err : dlist string}
[p.mono] (hc : cb.size ≤ n) : foldr f p b cb n = fail n' err ↔
n < n' ∧ (p cb n = fail n' err ∨ ∃ (a : α), p cb n = done n' a ∧ err = dlist.empty) :=
begin
have : cb.size - n = 0 := tsub_eq_zero_iff_le.mpr hc,
simp only [foldr, foldr_core_succ_eq_fail, this, and.left_comm, foldr_core_zero_eq_fail,
ne_iff_lt_iff_le, exists_and_distrib_right, exists_eq_left, and.congr_left_iff,
exists_and_distrib_left],
rintro (h | ⟨⟨a, h⟩, rfl⟩),
{ exact mono.of_fail h },
{ exact mono.of_done h }
end
lemma foldr_eq_fail {f : α → β → β} {p : parser α} {err : dlist string} :
foldr f p b cb n = fail n' err ↔ n ≠ n' ∧ (p cb n = fail n' err ∨
∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldr_core f p b (cb.size - n) cb np = fail n' err) :=
by simp [foldr, foldr_core_succ_eq_fail]
@[simp] lemma foldl_core_zero_eq_done {f : β → α → β} {p : parser α} {b' : β} :
foldl_core f b p 0 cb n = done n' b' ↔ false :=
by simp [foldl_core]
lemma foldl_core_eq_done {f : β → α → β} {p : parser α} {reps : ℕ} {b' : β} :
foldl_core f b p (reps + 1) cb n = done n' b' ↔
(∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldl_core f (f b a) p reps cb np = done n' b') ∨
(n = n' ∧ b = b' ∧ ∃ (err), (p cb n = fail n err) ∨
(∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldl_core f (f b a) p reps cb np = fail n err)) :=
by simp [foldl_core, and.assoc, pure_eq_done]
@[simp] lemma foldl_core_zero_eq_fail {f : β → α → β} {p : parser α} {err : dlist string} :
foldl_core f b p 0 cb n = fail n' err ↔ n = n' ∧ err = dlist.empty :=
by simp [foldl_core, eq_comm]
lemma foldl_core_succ_eq_fail {f : β → α → β} {p : parser α} {reps : ℕ} {err : dlist string} :
foldl_core f b p (reps + 1) cb n = fail n' err ↔ n ≠ n' ∧
(p cb n = fail n' err ∨
∃ (np : ℕ) (a : α), p cb n = done np a ∧ foldl_core f (f b a) p reps cb np = fail n' err) :=
by simp [foldl_core, and_comm]
lemma foldl_eq_done {f : β → α → β} {p : parser α} {b' : β} :
foldl f b p cb n = done n' b' ↔
(∃ (np : ℕ) (a : α), p cb n = done np a ∧
foldl_core f (f b a) p (cb.size - n) cb np = done n' b') ∨
(n = n' ∧ b = b' ∧ ∃ (err), (p cb n = fail n err) ∨
(∃ (np : ℕ) (a : α), p cb n = done np a ∧
foldl_core f (f b a) p (cb.size - n) cb np = fail n err)) :=
by simp [foldl, foldl_core_eq_done]
lemma foldl_eq_fail {f : β → α → β} {p : parser α} {err : dlist string} :
foldl f b p cb n = fail n' err ↔ n ≠ n' ∧ (p cb n = fail n' err ∨
∃ (np : ℕ) (a : α), p cb n = done np a ∧
foldl_core f (f b a) p (cb.size - n) cb np = fail n' err) :=
by simp [foldl, foldl_core_succ_eq_fail]
lemma foldl_eq_fail_iff_mono_at_end {f : β → α → β} {p : parser α} {err : dlist string}
[p.mono] (hc : cb.size ≤ n) : foldl f b p cb n = fail n' err ↔
n < n' ∧ (p cb n = fail n' err ∨ ∃ (a : α), p cb n = done n' a ∧ err = dlist.empty) :=
begin
have : cb.size - n = 0 := tsub_eq_zero_iff_le.mpr hc,
simp only [foldl, foldl_core_succ_eq_fail, this, and.left_comm, ne_iff_lt_iff_le, exists_eq_left,
exists_and_distrib_right, and.congr_left_iff, exists_and_distrib_left,
foldl_core_zero_eq_fail],
rintro (h | ⟨⟨a, h⟩, rfl⟩),
{ exact mono.of_fail h },
{ exact mono.of_done h }
end
lemma many_eq_done_nil {p : parser α} : many p cb n = done n' (@list.nil α) ↔ n = n' ∧
∃ (err), p cb n = fail n err ∨ ∃ (np : ℕ) (a : α), p cb n = done np a ∧
foldr_core list.cons p [] (cb.size - n) cb np = fail n err :=
by simp [many, foldr_eq_done]
lemma many_eq_done {p : parser α} {x : α} {xs : list α} :
many p cb n = done n' (x :: xs) ↔ ∃ (np : ℕ), p cb n = done np x
∧ foldr_core list.cons p [] (cb.size - n) cb np = done n' xs :=
by simp [many, foldr_eq_done, and.comm, and.assoc, and.left_comm]
lemma many_eq_fail {p : parser α} {err : dlist string} :
many p cb n = fail n' err ↔ n ≠ n' ∧ (p cb n = fail n' err ∨
∃ (np : ℕ) (a : α), p cb n = done np a ∧
foldr_core list.cons p [] (cb.size - n) cb np = fail n' err) :=
by simp [many, foldr_eq_fail]
lemma many_char_eq_done_empty {p : parser char} : many_char p cb n = done n' string.empty ↔ n = n' ∧
∃ (err), p cb n = fail n err ∨ ∃ (np : ℕ) (c : char), p cb n = done np c ∧
foldr_core list.cons p [] (cb.size - n) cb np = fail n err :=
by simp [many_char, many_eq_done_nil, map_eq_done, list.as_string_eq]
lemma many_char_eq_done_not_empty {p : parser char} {s : string} (h : s ≠ "") :
many_char p cb n = done n' s ↔ ∃ (np : ℕ), p cb n = done np s.head ∧
foldr_core list.cons p list.nil (buffer.size cb - n) cb np = done n' (s.popn 1).to_list :=
by simp [many_char, list.as_string_eq, string.to_list_nonempty h, many_eq_done]
lemma many_char_eq_many_of_to_list {p : parser char} {s : string} :
many_char p cb n = done n' s ↔ many p cb n = done n' s.to_list :=
by simp [many_char, list.as_string_eq]
lemma many'_eq_done {p : parser α} : many' p cb n = done n' u ↔
many p cb n = done n' [] ∨ ∃ (np : ℕ) (a : α) (l : list α), many p cb n = done n' (a :: l)
∧ p cb n = done np a ∧ foldr_core list.cons p [] (buffer.size cb - n) cb np = done n' l :=
begin
simp only [many', eps_eq_done, many, foldr, and_then_eq_bind, exists_and_distrib_right,
bind_eq_done, exists_eq_right],
split,
{ rintro ⟨_ | ⟨hd, tl⟩, hl⟩,
{ exact or.inl hl },
{ have hl2 := hl,
simp only [foldr_core_eq_done, or_false, exists_and_distrib_left, and_false, false_and,
exists_eq_right_right] at hl,
obtain ⟨np, hp, h⟩ := hl,
refine or.inr ⟨np, _, _, hl2, hp, h⟩ } },
{ rintro (h | ⟨np, a, l, hp, h⟩),
{ exact ⟨[], h⟩ },
{ refine ⟨a :: l, hp⟩ } }
end
@[simp] lemma many1_ne_done_nil {p : parser α} : many1 p cb n ≠ done n' [] :=
by simp [many1, seq_eq_done]
lemma many1_eq_done {p : parser α} {l : list α} : many1 p cb n = done n' (a :: l) ↔
∃ (np : ℕ), p cb n = done np a ∧ many p cb np = done n' l :=
by simp [many1, seq_eq_done, map_eq_done]
lemma many1_eq_fail {p : parser α} {err : dlist string} : many1 p cb n = fail n' err ↔
p cb n = fail n' err ∨ (∃ (np : ℕ) (a : α), p cb n = done np a ∧ many p cb np = fail n' err) :=
by simp [many1, seq_eq_fail]
@[simp] lemma many_char1_ne_empty {p : parser char} : many_char1 p cb n ≠ done n' "" :=
by simp [many_char1, ←string.nil_as_string_eq_empty]
lemma many_char1_eq_done {p : parser char} {s : string} (h : s ≠ "") :
many_char1 p cb n = done n' s ↔
∃ (np : ℕ), p cb n = done np s.head ∧ many_char p cb np = done n' (s.popn 1) :=
by simp [many_char1, list.as_string_eq, string.to_list_nonempty h, many1_eq_done,
many_char_eq_many_of_to_list]
@[simp] lemma sep_by1_ne_done_nil {sep : parser unit} {p : parser α} :
sep_by1 sep p cb n ≠ done n' [] :=
by simp [sep_by1, seq_eq_done]
lemma sep_by1_eq_done {sep : parser unit} {p : parser α} {l : list α} :
sep_by1 sep p cb n = done n' (a :: l) ↔ ∃ (np : ℕ), p cb n = done np a ∧
(sep >> p).many cb np = done n' l :=
by simp [sep_by1, seq_eq_done]
lemma sep_by_eq_done_nil {sep : parser unit} {p : parser α} :
sep_by sep p cb n = done n' [] ↔ n = n' ∧ ∃ (err), sep_by1 sep p cb n = fail n err :=
by simp [sep_by, pure_eq_done]
@[simp] lemma fix_core_ne_done_zero {F : parser α → parser α} :
fix_core F 0 cb n ≠ done n' a :=
by simp [fix_core]
lemma fix_core_eq_done {F : parser α → parser α} {max_depth : ℕ} :
fix_core F (max_depth + 1) cb n = done n' a ↔ F (fix_core F max_depth) cb n = done n' a :=
by simp [fix_core]
lemma digit_eq_done {k : ℕ} : digit cb n = done n' k ↔ ∃ (hn : n < cb.size), n' = n + 1 ∧ k ≤ 9 ∧
(cb.read ⟨n, hn⟩).to_nat - '0'.to_nat = k ∧ '0' ≤ cb.read ⟨n, hn⟩ ∧ cb.read ⟨n, hn⟩ ≤ '9' :=
begin
have c9 : '9'.to_nat - '0'.to_nat = 9 := rfl,
have l09 : '0'.to_nat ≤ '9'.to_nat := dec_trivial,
have le_iff_le : ∀ {c c' : char}, c ≤ c' ↔ c.to_nat ≤ c'.to_nat := λ _ _, iff.rfl,
split,
{ simp only [digit, sat_eq_done, pure_eq_done, decorate_error_eq_done, bind_eq_done, ←c9],
rintro ⟨np, c, ⟨hn, ⟨ge0, le9⟩, rfl, rfl⟩, rfl, rfl⟩,
simpa [hn, ge0, le9, true_and, and_true, eq_self_iff_true, exists_prop_of_true,
tsub_le_tsub_iff_right, l09] using (le_iff_le.mp le9) },
{ simp only [digit, sat_eq_done, pure_eq_done, decorate_error_eq_done, bind_eq_done, ←c9,
le_iff_le],
rintro ⟨hn, rfl, -, rfl, ge0, le9⟩,
use [n + 1, cb.read ⟨n, hn⟩],
simp [hn, ge0, le9] }
end
lemma digit_eq_fail : digit cb n = fail n' err ↔ n = n' ∧ err = dlist.of_list ["<digit>"] ∧
∀ (h : n < cb.size), ¬ ((λ c, '0' ≤ c ∧ c ≤ '9') (cb.read ⟨n, h⟩)) :=
by simp [digit, sat_eq_fail]
end done
namespace static
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
lemma not_of_ne (h : p cb n = done n' a) (hne : n ≠ n') : ¬ static p :=
by { introI, exact hne (of_done h) }
instance pure : static (pure a) :=
⟨λ _ _ _ _, by { simp_rw pure_eq_done, rw [and.comm], simp }⟩
instance bind {f : α → parser β} [p.static] [∀ a, (f a).static] :
(p >>= f).static :=
⟨λ _ _ _ _, by { rw bind_eq_done, rintro ⟨_, _, hp, hf⟩, exact trans (of_done hp) (of_done hf) }⟩
instance and_then {q : parser β} [p.static] [q.static] : (p >> q).static := static.bind
instance map [p.static] {f : α → β} : (f <$> p).static :=
⟨λ _ _ _ _, by { simp_rw map_eq_done, rintro ⟨_, hp, _⟩, exact of_done hp }⟩
instance seq {f : parser (α → β)} [f.static] [p.static] : (f <*> p).static := static.bind
instance mmap : Π {l : list α} {f : α → parser β} [∀ a, (f a).static], (l.mmap f).static
| [] _ _ := static.pure
| (a :: l) _ h := begin
convert static.bind,
{ exact h _ },
{ intro,
convert static.bind,
{ convert mmap,
exact h },
{ exact λ _, static.pure } }
end
instance mmap' : Π {l : list α} {f : α → parser β} [∀ a, (f a).static], (l.mmap' f).static
| [] _ _ := static.pure
| (a :: l) _ h := begin
convert static.and_then,
{ exact h _ },
{ convert mmap',
exact h }
end
instance failure : @parser.static α failure :=
⟨λ _ _ _ _, by simp⟩
instance guard {p : Prop} [decidable p] : static (guard p) :=
⟨λ _ _ _ _, by simp [guard_eq_done]⟩
instance orelse [p.static] [q.static] : (p <|> q).static :=
⟨λ _ _ _ _, by { simp_rw orelse_eq_done, rintro (h | ⟨h, -⟩); exact of_done h }⟩
instance decorate_errors [p.static] :
(@decorate_errors α msgs p).static :=
⟨λ _ _ _ _, by { rw decorate_errors_eq_done, exact of_done }⟩
instance decorate_error [p.static] : (@decorate_error α msg p).static :=
static.decorate_errors
lemma any_char : ¬ static any_char :=
begin
have : any_char "s".to_char_buffer 0 = done 1 's',
{ have : 0 < "s".to_char_buffer.size := dec_trivial,
simpa [any_char_eq_done, this] },
exact not_of_ne this zero_ne_one
end
lemma sat_iff {p : char → Prop} [decidable_pred p] : static (sat p) ↔ ∀ c, ¬ p c :=
begin
split,
{ introI,
intros c hc,
have : sat p [c].to_buffer 0 = done 1 c := by simp [sat_eq_done, hc],
exact zero_ne_one (of_done this) },
{ contrapose!,
simp only [iff, sat_eq_done, and_imp, exists_prop, exists_and_distrib_right,
exists_and_distrib_left, exists_imp_distrib, not_forall],
rintros _ _ _ a h hne rfl hp -,
exact ⟨a, hp⟩ }
end
instance sat : static (sat (λ _, false)) :=
by { apply sat_iff.mpr, simp }
instance eps : static eps := static.pure
lemma ch (c : char) : ¬ static (ch c) :=
begin
have : ch c [c].to_buffer 0 = done 1 (),
{ have : 0 < [c].to_buffer.size := dec_trivial,
simp [ch_eq_done, this] },
exact not_of_ne this zero_ne_one
end
lemma char_buf_iff {cb' : char_buffer} : static (char_buf cb') ↔ cb' = buffer.nil :=
begin
rw ←buffer.size_eq_zero_iff,
have : char_buf cb' cb' 0 = done cb'.size () := by simp [char_buf_eq_done],
cases hc : cb'.size with n,
{ simp only [eq_self_iff_true, iff_true],
exact ⟨λ _ _ _ _ h, by simpa [hc] using (char_buf_eq_done.mp h).left⟩ },
{ rw hc at this,
simpa [nat.succ_ne_zero] using not_of_ne this (nat.succ_ne_zero n).symm }
end
lemma one_of_iff {cs : list char} : static (one_of cs) ↔ cs = [] :=
begin
cases cs with hd tl,
{ simp [one_of, static.decorate_errors] },
{ have : one_of (hd :: tl) (hd :: tl).to_buffer 0 = done 1 hd,
{ simp [one_of_eq_done] },
simpa using not_of_ne this zero_ne_one }
end
instance one_of : static (one_of []) :=
by { apply one_of_iff.mpr, refl }
lemma one_of'_iff {cs : list char} : static (one_of' cs) ↔ cs = [] :=
begin
cases cs with hd tl,
{ simp [one_of', static.bind], },
{ have : one_of' (hd :: tl) (hd :: tl).to_buffer 0 = done 1 (),
{ simp [one_of'_eq_done] },
simpa using not_of_ne this zero_ne_one }
end
instance one_of' : static (one_of []) :=
by { apply one_of_iff.mpr, refl }
lemma str_iff {s : string} : static (str s) ↔ s = "" :=
by simp [str_eq_char_buf, char_buf_iff, ←string.to_list_inj, buffer.ext_iff]
instance remaining : remaining.static :=
⟨λ _ _ _ _ h, (remaining_eq_done.mp h).left⟩
instance eof : eof.static :=
static.decorate_error
instance foldr_core {f : α → β → β} [p.static] :
∀ {b : β} {reps : ℕ}, (foldr_core f p b reps).static
| _ 0 := static.failure
| _ (reps + 1) := begin
simp_rw parser.foldr_core,
convert static.orelse,
{ convert static.bind,
{ apply_instance },
{ intro,
convert static.bind,
{ exact foldr_core },
{ apply_instance } } },
{ exact static.pure }
end
instance foldr {f : α → β → β} [p.static] : static (foldr f p b) :=
⟨λ _ _ _ _, by { dsimp [foldr], exact of_done }⟩
instance foldl_core {f : α → β → α} {p : parser β} [p.static] :
∀ {a : α} {reps : ℕ}, (foldl_core f a p reps).static
| _ 0 := static.failure
| _ (reps + 1) := begin
convert static.orelse,
{ convert static.bind,
{ apply_instance },
{ exact λ _, foldl_core } },
{ exact static.pure }
end
instance foldl {f : α → β → α} {p : parser β} [p.static] : static (foldl f a p) :=
⟨λ _ _ _ _, by { dsimp [foldl], exact of_done }⟩
instance many [p.static] : p.many.static :=
static.foldr
instance many_char {p : parser char} [p.static] : p.many_char.static :=
static.map
instance many' [p.static] : p.many'.static :=
static.and_then
instance many1 [p.static] : p.many1.static :=
static.seq
instance many_char1 {p : parser char} [p.static] : p.many_char1.static :=
static.map
instance sep_by1 [p.static] [sep.static] : static (sep_by1 sep p) :=
static.seq
instance sep_by [p.static] [sep.static] : static (sep_by sep p) :=
static.orelse
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.static → (F p).static) :
∀ (max_depth : ℕ), static (fix_core F max_depth)
| 0 := static.failure
| (max_depth + 1) := hF _ (fix_core _)
lemma digit : ¬ digit.static :=
begin
have : digit "1".to_char_buffer 0 = done 1 1,
{ have : 0 < "s".to_char_buffer.size := dec_trivial,
simpa [this] },
exact not_of_ne this zero_ne_one
end
lemma nat : ¬ nat.static :=
begin
have : nat "1".to_char_buffer 0 = done 1 1,
{ have : 0 < "s".to_char_buffer.size := dec_trivial,
simpa [this] },
exact not_of_ne this zero_ne_one
end
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.static → (F p).static) :
static (fix F) :=
⟨λ cb n _ _ h,
by { haveI := fix_core hF (cb.size - n + 1), dsimp [fix] at h, exact static.of_done h }⟩
end static
namespace bounded
variables {α β : Type} {msgs : thunk (list string)} {msg : thunk string}
variables {p q : parser α} {cb : char_buffer} {n n' : ℕ} {err : dlist string}
variables {a : α} {b : β}
lemma done_of_unbounded (h : ¬p.bounded) : ∃ (cb : char_buffer) (n n' : ℕ) (a : α),
p cb n = done n' a ∧ cb.size ≤ n :=
begin
contrapose! h,
constructor,
intros cb n hn,
cases hp : p cb n,
{ exact absurd hn (h _ _ _ _ hp).not_le },
{ simp [hp] }
end
lemma pure : ¬ bounded (pure a) :=
begin
introI,
have : (pure a : parser α) buffer.nil 0 = done 0 a := by simp [pure_eq_done],
exact absurd (bounded.of_done this) (lt_irrefl _)
end
instance bind {f : α → parser β} [p.bounded] :
(p >>= f).bounded :=
begin
constructor,
intros cb n hn,
obtain ⟨_, _, hp⟩ := bounded.exists p hn,
simp [hp]
end
instance and_then {q : parser β} [p.bounded] : (p >> q).bounded :=
bounded.bind
instance map [p.bounded] {f : α → β} : (f <$> p).bounded :=
bounded.bind
instance seq {f : parser (α → β)} [f.bounded] : (f <*> p).bounded :=
bounded.bind
instance mmap {a : α} {l : list α} {f : α → parser β} [∀ a, (f a).bounded] :
((a :: l).mmap f).bounded :=
bounded.bind
instance mmap' {a : α} {l : list α} {f : α → parser β} [∀ a, (f a).bounded] :
((a :: l).mmap' f).bounded :=
bounded.and_then
instance failure : @parser.bounded α failure :=
⟨by simp⟩
lemma guard_iff {p : Prop} [decidable p] : bounded (guard p) ↔ ¬ p :=
by simpa [guard, apply_ite bounded, pure, failure] using λ _, bounded.failure
instance orelse [p.bounded] [q.bounded] : (p <|> q).bounded :=
begin
constructor,
intros cb n hn,
cases hx : (p <|> q) cb n with posx resx posx errx,
{ obtain h | ⟨h, -, -⟩ := orelse_eq_done.mp hx;
exact absurd hn (of_done h).not_le },
{ simp }
end
instance decorate_errors [p.bounded] :
(@decorate_errors α msgs p).bounded :=
begin
constructor,
intros _ _,
simpa using bounded.exists p
end
lemma decorate_errors_iff : (@parser.decorate_errors α msgs p).bounded ↔ p.bounded :=
begin
split,
{ introI,
constructor,
intros _ _ hn,
obtain ⟨_, _, h⟩ := bounded.exists (@parser.decorate_errors α msgs p) hn,
simp [decorate_errors_eq_fail] at h,
exact h.right.right },
{ introI,
constructor,
intros _ _ hn,
obtain ⟨_, _, h⟩ := bounded.exists p hn,
simp [h] }
end
instance decorate_error [p.bounded] : (@decorate_error α msg p).bounded :=
bounded.decorate_errors
lemma decorate_error_iff : (@parser.decorate_error α msg p).bounded ↔ p.bounded :=
decorate_errors_iff
instance any_char : bounded any_char :=
⟨λ cb n hn, by simp [any_char, hn]⟩
instance sat {p : char → Prop} [decidable_pred p] : bounded (sat p) :=
⟨λ cb n hn, by simp [sat, hn]⟩
lemma eps : ¬ bounded eps := pure
instance ch {c : char} : bounded (ch c) :=
bounded.decorate_error
lemma char_buf_iff {cb' : char_buffer} : bounded (char_buf cb') ↔ cb' ≠ buffer.nil :=
begin
have : cb' ≠ buffer.nil ↔ cb'.to_list ≠ [] :=
not_iff_not_of_iff ⟨λ h, by simp [h], λ h, by simpa using congr_arg list.to_buffer h⟩,
rw [char_buf, decorate_error_iff, this],
cases cb'.to_list,
{ simp [pure, ch] },
{ simp only [iff_true, ne.def, not_false_iff],
apply_instance }
end
instance one_of {cs : list char} : (one_of cs).bounded :=
bounded.decorate_errors
instance one_of' {cs : list char} : (one_of' cs).bounded :=
bounded.and_then
lemma str_iff {s : string} : (str s).bounded ↔ s ≠ "" :=
begin
rw [str, decorate_error_iff],
cases hs : s.to_list,
{ have : s = "",
{ cases s, rw [string.to_list] at hs, simpa [hs] },
simp [pure, this] },
{ have : s ≠ "",
{ intro H, simpa [H] using hs },
simp only [this, iff_true, ne.def, not_false_iff],
apply_instance }
end
lemma remaining : ¬ remaining.bounded :=
begin
introI,
have : remaining buffer.nil 0 = done 0 0 := by simp [remaining_eq_done],
exact absurd (bounded.of_done this) (lt_irrefl _)
end
lemma eof : ¬ eof.bounded :=
begin
introI,
have : eof buffer.nil 0 = done 0 () := by simp [eof_eq_done],
exact absurd (bounded.of_done this) (lt_irrefl _)
end
section fold
instance foldr_core_zero {f : α → β → β} : (foldr_core f p b 0).bounded :=
bounded.failure
instance foldl_core_zero {f : β → α → β} {b : β} : (foldl_core f b p 0).bounded :=
bounded.failure
variables {reps : ℕ} [hpb : p.bounded] (he : ∀ cb n n' err, p cb n = fail n' err → n ≠ n')
include hpb he
lemma foldr_core {f : α → β → β} : (foldr_core f p b reps).bounded :=
begin
cases reps,
{ exact bounded.foldr_core_zero },
constructor,
intros cb n hn,
obtain ⟨np, errp, hp⟩ := bounded.exists p hn,
simpa [foldr_core_succ_eq_fail, hp] using he cb n np errp,
end
lemma foldr {f : α → β → β} : bounded (foldr f p b) :=
begin
constructor,
intros cb n hn,
haveI : (parser.foldr_core f p b (cb.size - n + 1)).bounded := foldr_core he,
obtain ⟨np, errp, hp⟩ := bounded.exists (parser.foldr_core f p b (cb.size - n + 1)) hn,
simp [foldr, hp]
end
lemma foldl_core {f : β → α → β} :
(foldl_core f b p reps).bounded :=
begin
cases reps,
{ exact bounded.foldl_core_zero },
constructor,
intros cb n hn,
obtain ⟨np, errp, hp⟩ := bounded.exists p hn,
simpa [foldl_core_succ_eq_fail, hp] using he cb n np errp,
end
lemma foldl {f : β → α → β} : bounded (foldl f b p) :=
begin
constructor,
intros cb n hn,
haveI : (parser.foldl_core f b p (cb.size - n + 1)).bounded := foldl_core he,
obtain ⟨np, errp, hp⟩ := bounded.exists (parser.foldl_core f b p (cb.size - n + 1)) hn,
simp [foldl, hp]
end
lemma many : p.many.bounded :=
foldr he
omit hpb
lemma many_char {pc : parser char} [pc.bounded]
(he : ∀ cb n n' err, pc cb n = fail n' err → n ≠ n'): pc.many_char.bounded :=
by { convert bounded.map, exact many he }
include hpb
lemma many' : p.many'.bounded :=
by { convert bounded.and_then, exact many he }
end fold
instance many1 [p.bounded] : p.many1.bounded :=
bounded.seq
instance many_char1 {p : parser char} [p.bounded] : p.many_char1.bounded :=
bounded.map
instance sep_by1 {sep : parser unit} [p.bounded] : bounded (sep_by1 sep p) :=
bounded.seq
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.bounded → (F p).bounded) :
∀ (max_depth : ℕ), bounded (fix_core F max_depth)
| 0 := bounded.failure
| (max_depth + 1) := hF _ (fix_core _)
instance digit : digit.bounded :=
bounded.decorate_error
instance nat : nat.bounded :=
bounded.decorate_error
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.bounded → (F p).bounded) :
bounded (fix F) :=
begin
constructor,
intros cb n hn,
haveI : (parser.fix_core F (cb.size - n + 1)).bounded := fix_core hF _,
obtain ⟨np, errp, hp⟩ := bounded.exists (parser.fix_core F (cb.size - n + 1)) hn,
simp [fix, hp]
end
end bounded
namespace unfailing
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
lemma of_bounded [p.bounded] : ¬ unfailing p :=
begin
introI,
cases h : p buffer.nil 0,
{ simpa [lt_irrefl] using bounded.of_done h },
{ exact of_fail h }
end
instance pure : unfailing (pure a) :=
⟨λ _ _, by simp [pure_eq_done]⟩
instance bind {f : α → parser β} [p.unfailing] [∀ a, (f a).unfailing] :
(p >>= f).unfailing :=
⟨λ cb n, begin
obtain ⟨np, a, hp⟩ := exists_done p cb n,
simpa [hp, and.comm, and.left_comm, and.assoc] using exists_done (f a) cb np
end⟩
instance and_then {q : parser β} [p.unfailing] [q.unfailing] : (p >> q).unfailing := unfailing.bind
instance map [p.unfailing] {f : α → β} : (f <$> p).unfailing := unfailing.bind
instance seq {f : parser (α → β)} [f.unfailing] [p.unfailing] : (f <*> p).unfailing :=
unfailing.bind
instance mmap {l : list α} {f : α → parser β} [∀ a, (f a).unfailing] : (l.mmap f).unfailing :=
begin
constructor,
induction l with hd tl hl,
{ intros,
simp [pure_eq_done] },
{ intros,
obtain ⟨np, a, hp⟩ := exists_done (f hd) cb n,
obtain ⟨n', b, hf⟩ := hl cb np,
simp [hp, hf, and.comm, and.left_comm, and.assoc, pure_eq_done] }
end
instance mmap' {l : list α} {f : α → parser β} [∀ a, (f a).unfailing] : (l.mmap' f).unfailing :=
begin
constructor,
induction l with hd tl hl,
{ intros,
simp [pure_eq_done] },
{ intros,
obtain ⟨np, a, hp⟩ := exists_done (f hd) cb n,
obtain ⟨n', b, hf⟩ := hl cb np,
simp [hp, hf, and.comm, and.left_comm, and.assoc, pure_eq_done] }
end
lemma failure : ¬ @parser.unfailing α failure :=
begin
introI h,
have : (failure : parser α) buffer.nil 0 = fail 0 dlist.empty := by simp,
exact of_fail this
end
instance guard_true : unfailing (guard true) := unfailing.pure
lemma guard : ¬ unfailing (guard false) :=
unfailing.failure
instance orelse [p.unfailing] : (p <|> q).unfailing :=
⟨λ cb n, by { obtain ⟨_, _, h⟩ := p.exists_done cb n, simp [success_iff, h] }⟩
instance decorate_errors [p.unfailing] :
(@decorate_errors α msgs p).unfailing :=
⟨λ cb n, by { obtain ⟨_, _, h⟩ := p.exists_done cb n, simp [success_iff, h] }⟩
instance decorate_error [p.unfailing] : (@decorate_error α msg p).unfailing :=
unfailing.decorate_errors
instance any_char : conditionally_unfailing any_char :=
⟨λ _ _ hn, by simp [success_iff, any_char_eq_done, hn]⟩
lemma sat : conditionally_unfailing (sat (λ _, true)) :=
⟨λ _ _ hn, by simp [success_iff, sat_eq_done, hn]⟩
instance eps : unfailing eps := unfailing.pure
instance remaining : remaining.unfailing :=
⟨λ _ _, by simp [success_iff, remaining_eq_done]⟩
lemma foldr_core_zero {f : α → β → β} {b : β} : ¬ (foldr_core f p b 0).unfailing :=
unfailing.failure
instance foldr_core_of_static {f : α → β → β} {b : β} {reps : ℕ} [p.static] [p.unfailing] :
(foldr_core f p b (reps + 1)).unfailing :=
begin
induction reps with reps hr,
{ constructor,
intros cb n,
obtain ⟨np, a, h⟩ := p.exists_done cb n,
simpa [foldr_core_eq_done, h] using (static.of_done h).symm },
{ constructor,
haveI := hr,
intros cb n,
obtain ⟨np, a, h⟩ := p.exists_done cb n,
obtain rfl : n = np := static.of_done h,
obtain ⟨np, b', hf⟩ := exists_done (foldr_core f p b (reps + 1)) cb n,
obtain rfl : n = np := static.of_done hf,
refine ⟨n, f a b', _⟩,
rw foldr_core_eq_done,
simp [h, hf, and.comm, and.left_comm, and.assoc] }
end
instance foldr_core_one_of_err_static {f : α → β → β} {b : β} [p.static] [p.err_static] :
(foldr_core f p b 1).unfailing :=
begin
constructor,
intros cb n,
cases h : p cb n,
{ simpa [foldr_core_eq_done, h] using (static.of_done h).symm },
{ simpa [foldr_core_eq_done, h] using (err_static.of_fail h).symm }
end
-- TODO: add foldr and foldl, many, etc, fix_core
lemma digit : ¬ digit.unfailing :=
of_bounded
lemma nat : ¬ nat.unfailing :=
of_bounded
end unfailing
namespace err_static
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
lemma not_of_ne (h : p cb n = fail n' err) (hne : n ≠ n') : ¬ err_static p :=
by { introI, exact hne (of_fail h) }
instance pure : err_static (pure a) :=
⟨λ _ _ _ _, by { simp [pure_eq_done] }⟩
instance bind {f : α → parser β} [p.static] [p.err_static] [∀ a, (f a).err_static] :
(p >>= f).err_static :=
⟨λ cb n n' err, begin
rw bind_eq_fail,
rintro (hp | ⟨_, _, hp, hf⟩),
{ exact of_fail hp },
{ exact trans (static.of_done hp) (of_fail hf) }
end⟩
instance bind_of_unfailing {f : α → parser β} [p.err_static] [∀ a, (f a).unfailing] :
(p >>= f).err_static :=
⟨λ cb n n' err, begin
rw bind_eq_fail,
rintro (hp | ⟨_, _, hp, hf⟩),
{ exact of_fail hp },
{ exact false.elim (unfailing.of_fail hf) }
end⟩
instance and_then {q : parser β} [p.static] [p.err_static] [q.err_static] : (p >> q).err_static :=
err_static.bind
instance and_then_of_unfailing {q : parser β} [p.err_static] [q.unfailing] : (p >> q).err_static :=
err_static.bind_of_unfailing
instance map [p.err_static] {f : α → β} : (f <$> p).err_static :=
⟨λ _ _ _ _, by { rw map_eq_fail, exact of_fail }⟩
instance seq {f : parser (α → β)} [f.static] [f.err_static] [p.err_static] : (f <*> p).err_static :=
err_static.bind
instance seq_of_unfailing {f : parser (α → β)} [f.err_static] [p.unfailing] :
(f <*> p).err_static :=
err_static.bind_of_unfailing
instance mmap : Π {l : list α} {f : α → parser β}
[∀ a, (f a).static] [∀ a, (f a).err_static], (l.mmap f).err_static
| [] _ _ _ := err_static.pure
| (a :: l) _ h h' := begin
convert err_static.bind,
{ exact h _ },
{ exact h' _ },
{ intro,
convert err_static.bind,
{ convert static.mmap,
exact h },
{ apply mmap,
{ exact h },
{ exact h' } },
{ exact λ _, err_static.pure } }
end
instance mmap_of_unfailing : Π {l : list α} {f : α → parser β}
[∀ a, (f a).unfailing] [∀ a, (f a).err_static], (l.mmap f).err_static
| [] _ _ _ := err_static.pure
| (a :: l) _ h h' := begin
convert err_static.bind_of_unfailing,
{ exact h' _ },
{ intro,
convert unfailing.bind,
{ convert unfailing.mmap,
exact h },
{ exact λ _, unfailing.pure } }
end
instance mmap' : Π {l : list α} {f : α → parser β}
[∀ a, (f a).static] [∀ a, (f a).err_static], (l.mmap' f).err_static
| [] _ _ _ := err_static.pure
| (a :: l) _ h h' := begin
convert err_static.and_then,
{ exact h _ },
{ exact h' _ },
{ convert mmap',
{ exact h },
{ exact h' } }
end
instance mmap'_of_unfailing : Π {l : list α} {f : α → parser β}
[∀ a, (f a).unfailing] [∀ a, (f a).err_static], (l.mmap' f).err_static
| [] _ _ _ := err_static.pure
| (a :: l) _ h h' := begin
convert err_static.and_then_of_unfailing,
{ exact h' _ },
{ convert unfailing.mmap',
exact h }
end
instance failure : @parser.err_static α failure :=
⟨λ _ _ _ _ h, (failure_eq_fail.mp h).left⟩
instance guard {p : Prop} [decidable p] : err_static (guard p) :=
⟨λ _ _ _ _ h, (guard_eq_fail.mp h).right.left⟩
instance orelse [p.err_static] [q.mono] : (p <|> q).err_static :=
⟨λ _ n n' _, begin
by_cases hn : n = n',
{ exact λ _, hn },
{ rw orelse_eq_fail_of_mono_ne hn,
{ exact of_fail },
{ apply_instance } }
end⟩
instance decorate_errors :
(@decorate_errors α msgs p).err_static :=
⟨λ _ _ _ _ h, (decorate_errors_eq_fail.mp h).left⟩
instance decorate_error : (@decorate_error α msg p).err_static :=
err_static.decorate_errors
instance any_char : err_static any_char :=
⟨λ _ _ _ _, by { rw [any_char_eq_fail, and.comm], simp }⟩
instance sat_iff {p : char → Prop} [decidable_pred p] : err_static (sat p) :=
⟨λ _ _ _ _ h, (sat_eq_fail.mp h).left⟩
instance eps : err_static eps := err_static.pure
instance ch (c : char) : err_static (ch c) :=
err_static.decorate_error
instance char_buf {cb' : char_buffer} : err_static (char_buf cb') :=
err_static.decorate_error
instance one_of {cs : list char} : err_static (one_of cs) :=
err_static.decorate_errors
instance one_of' {cs : list char} : err_static (one_of' cs) :=
err_static.and_then_of_unfailing
instance str {s : string} : err_static (str s) :=
err_static.decorate_error
instance remaining : remaining.err_static :=
⟨λ _ _ _ _, by simp [remaining_ne_fail]⟩
instance eof : eof.err_static :=
err_static.decorate_error
-- TODO: add foldr and foldl, many, etc, fix_core
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.err_static → (F p).err_static) :
∀ (max_depth : ℕ), err_static (fix_core F max_depth)
| 0 := err_static.failure
| (max_depth + 1) := hF _ (fix_core _)
instance digit : digit.err_static :=
err_static.decorate_error
instance nat : nat.err_static :=
err_static.decorate_error
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.err_static → (F p).err_static) :
err_static (fix F) :=
⟨λ cb n _ _ h,
by { haveI := fix_core hF (cb.size - n + 1), dsimp [fix] at h, exact err_static.of_fail h }⟩
end err_static
namespace step
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
lemma not_step_of_static_done [static p] (h : ∃ cb n n' a, p cb n = done n' a) : ¬ step p :=
begin
introI,
rcases h with ⟨cb, n, n', a, h⟩,
have hs := static.of_done h,
simpa [←hs] using of_done h
end
lemma pure (a : α) : ¬ step (pure a) :=
begin
apply not_step_of_static_done,
simp [pure_eq_done]
end
instance bind {f : α → parser β} [p.step] [∀ a, (f a).static] :
(p >>= f).step :=
⟨λ _ _ _ _, by { simp_rw bind_eq_done, rintro ⟨_, _, hp, hf⟩,
exact (static.of_done hf) ▸ (of_done hp) }⟩
instance bind' {f : α → parser β} [p.static] [∀ a, (f a).step] :
(p >>= f).step :=
⟨λ _ _ _ _, by { simp_rw bind_eq_done, rintro ⟨_, _, hp, hf⟩,
rw static.of_done hp, exact of_done hf }⟩
instance and_then {q : parser β} [p.step] [q.static] : (p >> q).step := step.bind
instance and_then' {q : parser β} [p.static] [q.step] : (p >> q).step := step.bind'
instance map [p.step] {f : α → β} : (f <$> p).step :=
⟨λ _ _ _ _, by { simp_rw map_eq_done, rintro ⟨_, hp, _⟩, exact of_done hp }⟩
instance seq {f : parser (α → β)} [f.step] [p.static] : (f <*> p).step := step.bind
instance seq' {f : parser (α → β)} [f.static] [p.step] : (f <*> p).step := step.bind'
instance mmap {f : α → parser β} [(f a).step] :
([a].mmap f).step :=
begin
convert step.bind,
{ apply_instance },
{ intro,
convert static.bind,
{ exact static.pure },
{ exact λ _, static.pure } }
end
instance mmap' {f : α → parser β} [(f a).step] :
([a].mmap' f).step :=
begin
convert step.and_then,
{ apply_instance },
{ exact static.pure }
end
instance failure : @parser.step α failure :=
⟨λ _ _ _ _, by simp⟩
lemma guard_true : ¬ step (guard true) := pure _
instance guard : step (guard false) :=
step.failure
instance orelse [p.step] [q.step] : (p <|> q).step :=
⟨λ _ _ _ _, by { simp_rw orelse_eq_done, rintro (h | ⟨h, -⟩); exact of_done h }⟩
lemma decorate_errors_iff : (@parser.decorate_errors α msgs p).step ↔ p.step :=
begin
split,
{ introI,
constructor,
intros cb n n' a h,
have : (@parser.decorate_errors α msgs p) cb n = done n' a := by simpa using h,
exact of_done this },
{ introI,
constructor,
intros _ _ _ _ h,
rw decorate_errors_eq_done at h,
exact of_done h }
end
instance decorate_errors [p.step] :
(@decorate_errors α msgs p).step :=
⟨λ _ _ _ _, by { rw decorate_errors_eq_done, exact of_done }⟩
lemma decorate_error_iff : (@parser.decorate_error α msg p).step ↔ p.step :=
decorate_errors_iff
instance decorate_error [p.step] : (@decorate_error α msg p).step :=
step.decorate_errors
instance any_char : step any_char :=
begin
constructor,
intros cb n,
simp_rw [any_char_eq_done],
rintro _ _ ⟨_, rfl, -⟩,
simp
end
instance sat {p : char → Prop} [decidable_pred p] : step (sat p) :=
begin
constructor,
intros cb n,
simp_rw [sat_eq_done],
rintro _ _ ⟨_, _, rfl, -⟩,
simp
end
lemma eps : ¬ step eps := step.pure ()
instance ch {c : char} : step (ch c) := step.decorate_error
lemma char_buf_iff {cb' : char_buffer} : (char_buf cb').step ↔ cb'.size = 1 :=
begin
have : char_buf cb' cb' 0 = done cb'.size () := by simp [char_buf_eq_done],
split,
{ introI,
simpa using of_done this },
{ intro h,
constructor,
intros cb n n' _,
rw [char_buf_eq_done, h],
rintro ⟨rfl, -⟩,
refl }
end
instance one_of {cs : list char} : (one_of cs).step :=
step.decorate_errors
instance one_of' {cs : list char} : (one_of' cs).step :=
step.and_then
lemma str_iff {s : string} : (str s).step ↔ s.length = 1 :=
by simp [str_eq_char_buf, char_buf_iff, ←string.to_list_inj, buffer.ext_iff]
lemma remaining : ¬ remaining.step :=
begin
apply not_step_of_static_done,
simp [remaining_eq_done]
end
lemma eof : ¬ eof.step :=
begin
apply not_step_of_static_done,
simp only [eof_eq_done, exists_eq_left', exists_const],
use [buffer.nil, 0],
simp
end
-- TODO: add foldr and foldl, many, etc, fix_core
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.step → (F p).step) :
∀ (max_depth : ℕ), step (fix_core F max_depth)
| 0 := step.failure
| (max_depth + 1) := hF _ (fix_core _)
instance digit : digit.step :=
step.decorate_error
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.step → (F p).step) :
step (fix F) :=
⟨λ cb n _ _ h,
by { haveI := fix_core hF (cb.size - n + 1), dsimp [fix] at h, exact of_done h }⟩
end step
section step
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
lemma many1_eq_done_iff_many_eq_done [p.step] [p.bounded] {x : α} {xs : list α} :
many1 p cb n = done n' (x :: xs) ↔ many p cb n = done n' (x :: xs) :=
begin
induction hx : (x :: xs) with hd tl IH generalizing x xs n n',
{ simpa using hx },
split,
{ simp only [many1_eq_done, and_imp, exists_imp_distrib],
intros np hp hm,
have : np = n + 1 := step.of_done hp,
have hn : n < cb.size := bounded.of_done hp,
subst this,
obtain ⟨k, hk⟩ : ∃ k, cb.size - n = k + 1 :=
nat.exists_eq_succ_of_ne_zero (ne_of_gt (tsub_pos_of_lt hn)),
cases k,
{ cases tl;
simpa [many_eq_done_nil, nat.sub_succ, hk, many_eq_done, hp, foldr_core_eq_done] using hm },
cases tl with hd' tl',
{ simpa [many_eq_done_nil, nat.sub_succ, hk, many_eq_done, hp, foldr_core_eq_done] using hm },
{ rw ←@IH hd' tl' at hm, swap, refl,
simp only [many1_eq_done, many, foldr] at hm,
obtain ⟨np, hp', hf⟩ := hm,
obtain rfl : np = n + 1 + 1 := step.of_done hp',
simpa [nat.sub_succ, many_eq_done, hp, hk, foldr_core_eq_done, hp'] using hf } },
{ simp only [many_eq_done, many1_eq_done, and_imp, exists_imp_distrib],
intros np hp hm,
have : np = n + 1 := step.of_done hp,
have hn : n < cb.size := bounded.of_done hp,
subst this,
obtain ⟨k, hk⟩ : ∃ k, cb.size - n = k + 1 :=
nat.exists_eq_succ_of_ne_zero (ne_of_gt (tsub_pos_of_lt hn)),
cases k,
{ cases tl;
simpa [many_eq_done_nil, nat.sub_succ, hk, many_eq_done, hp, foldr_core_eq_done] using hm },
cases tl with hd' tl',
{ simpa [many_eq_done_nil, nat.sub_succ, hk, many_eq_done, hp, foldr_core_eq_done] using hm },
{ simp [hp],
rw ←@IH hd' tl' (n + 1) n', swap, refl,
rw [hk, foldr_core_eq_done, or.comm] at hm,
obtain (hm | ⟨np, hd', tl', hp', hf, hm⟩) := hm,
{ simpa using hm },
simp only at hm,
obtain ⟨rfl, rfl⟩ := hm,
obtain rfl : np = n + 1 + 1 := step.of_done hp',
simp [nat.sub_succ, many, many1_eq_done, hp, hk, foldr_core_eq_done, hp', ←hf, foldr] } }
end
end step
namespace prog
variables {α β : Type} {p q : parser α} {msgs : thunk (list string)} {msg : thunk string}
{cb : char_buffer} {n' n : ℕ} {err : dlist string} {a : α} {b : β} {sep : parser unit}
@[priority 100] -- see Note [lower instance priority]
instance of_step [step p] : prog p :=
⟨λ _ _ _ _ h, by { rw step.of_done h, exact nat.lt_succ_self _ }⟩
lemma pure (a : α) : ¬ prog (pure a) :=
begin
introI h,
have : (pure a : parser α) buffer.nil 0 = done 0 a := by simp [pure_eq_done],
replace this : 0 < 0 := prog.of_done this,
exact (lt_irrefl _) this
end
instance bind {f : α → parser β} [p.prog] [∀ a, (f a).mono] :
(p >>= f).prog :=
⟨λ _ _ _ _, by { simp_rw bind_eq_done, rintro ⟨_, _, hp, hf⟩,
exact lt_of_lt_of_le (of_done hp) (mono.of_done hf) }⟩
instance and_then {q : parser β} [p.prog] [q.mono] : (p >> q).prog := prog.bind
instance map [p.prog] {f : α → β} : (f <$> p).prog :=
⟨λ _ _ _ _, by { simp_rw map_eq_done, rintro ⟨_, hp, _⟩, exact of_done hp }⟩
instance seq {f : parser (α → β)} [f.prog] [p.mono] : (f <*> p).prog := prog.bind
instance mmap {l : list α} {f : α → parser β} [(f a).prog] [∀ a, (f a).mono] :
((a :: l).mmap f).prog :=
begin
constructor,
simp only [and_imp, bind_eq_done, return_eq_pure, mmap, exists_imp_distrib, pure_eq_done],
rintro _ _ _ _ _ _ h _ _ hp rfl rfl,
exact lt_of_lt_of_le (of_done h) (mono.of_done hp)
end
instance mmap' {l : list α} {f : α → parser β} [(f a).prog] [∀ a, (f a).mono] :
((a :: l).mmap' f).prog :=
begin
constructor,
simp only [and_imp, bind_eq_done, mmap', exists_imp_distrib, and_then_eq_bind],
intros _ _ _ _ _ _ h hm,
exact lt_of_lt_of_le (of_done h) (mono.of_done hm)
end
instance failure : @parser.prog α failure :=
prog.of_step
lemma guard_true : ¬ prog (guard true) := pure _
instance guard : prog (guard false) :=
prog.failure
instance orelse [p.prog] [q.prog] : (p <|> q).prog :=
⟨λ _ _ _ _, by { simp_rw orelse_eq_done, rintro (h | ⟨h, -⟩); exact of_done h }⟩
lemma decorate_errors_iff : (@parser.decorate_errors α msgs p).prog ↔ p.prog :=
begin
split,
{ introI,
constructor,
intros cb n n' a h,
have : (@parser.decorate_errors α msgs p) cb n = done n' a := by simpa using h,
exact of_done this },
{ introI,
constructor,
intros _ _ _ _ h,
rw decorate_errors_eq_done at h,
exact of_done h }
end
instance decorate_errors [p.prog] :
(@decorate_errors α msgs p).prog :=
⟨λ _ _ _ _, by { rw decorate_errors_eq_done, exact of_done }⟩
lemma decorate_error_iff : (@parser.decorate_error α msg p).prog ↔ p.prog :=
decorate_errors_iff
instance decorate_error [p.prog] : (@decorate_error α msg p).prog :=
prog.decorate_errors
instance any_char : prog any_char :=
prog.of_step
instance sat {p : char → Prop} [decidable_pred p] : prog (sat p) :=
prog.of_step
lemma eps : ¬ prog eps := prog.pure ()
instance ch {c : char} : prog (ch c) :=
prog.of_step
lemma char_buf_iff {cb' : char_buffer} : (char_buf cb').prog ↔ cb' ≠ buffer.nil :=
begin
have : cb' ≠ buffer.nil ↔ cb'.to_list ≠ [] :=
not_iff_not_of_iff ⟨λ h, by simp [h], λ h, by simpa using congr_arg list.to_buffer h⟩,
rw [char_buf, this, decorate_error_iff],
cases cb'.to_list,
{ simp [pure] },
{ simp only [iff_true, ne.def, not_false_iff],
apply_instance }
end
instance one_of {cs : list char} : (one_of cs).prog :=
prog.decorate_errors
instance one_of' {cs : list char} : (one_of' cs).prog :=
prog.and_then
lemma str_iff {s : string} : (str s).prog ↔ s ≠ "" :=
by simp [str_eq_char_buf, char_buf_iff, ←string.to_list_inj, buffer.ext_iff]
lemma remaining : ¬ remaining.prog :=
begin
introI h,
have : remaining buffer.nil 0 = done 0 0 := by simp [remaining_eq_done],
replace this : 0 < 0 := prog.of_done this,
exact (lt_irrefl _) this
end
lemma eof : ¬ eof.prog :=
begin
introI h,
have : eof buffer.nil 0 = done 0 () := by simpa [remaining_eq_done],
replace this : 0 < 0 := prog.of_done this,
exact (lt_irrefl _) this
end
-- TODO: add foldr and foldl, many, etc, fix_core
instance many1 [p.mono] [p.prog] : p.many1.prog :=
begin
constructor,
rintro cb n n' (_ | ⟨hd, tl⟩),
{ simp },
{ rw many1_eq_done,
rintro ⟨np, hp, h⟩,
exact (of_done hp).trans_le (mono.of_done h) }
end
lemma fix_core {F : parser α → parser α} (hF : ∀ (p : parser α), p.prog → (F p).prog) :
∀ (max_depth : ℕ), prog (fix_core F max_depth)
| 0 := prog.failure
| (max_depth + 1) := hF _ (fix_core _)
instance digit : digit.prog :=
prog.of_step
instance nat : nat.prog :=
prog.decorate_error
lemma fix {F : parser α → parser α} (hF : ∀ (p : parser α), p.prog → (F p).prog) :
prog (fix F) :=
⟨λ cb n _ _ h,
by { haveI := fix_core hF (cb.size - n + 1), dsimp [fix] at h, exact of_done h }⟩
end prog
variables {α β : Type} {msgs : thunk (list string)} {msg : thunk string}
variables {p q : parser α} {cb : char_buffer} {n n' : ℕ} {err : dlist string}
variables {a : α} {b : β}
section many
-- TODO: generalize to p.prog instead of p.step
lemma many_sublist_of_done [p.step] [p.bounded] {l : list α}
(h : p.many cb n = done n' l) :
∀ k < n' - n, p.many cb (n + k) = done n' (l.drop k) :=
begin
induction l with hd tl hl generalizing n,
{ rw many_eq_done_nil at h,
simp [h.left] },
intros m hm,
cases m,
{ exact h },
rw [list.drop, nat.add_succ, ←nat.succ_add],
apply hl,
{ rw [←many1_eq_done_iff_many_eq_done, many1_eq_done] at h,
obtain ⟨_, hp, h⟩ := h,
convert h,
exact (step.of_done hp).symm },
{ exact nat.lt_pred_iff.mpr hm },
end
lemma many_eq_nil_of_done [p.step] [p.bounded] {l : list α}
(h : p.many cb n = done n' l) :
p.many cb n' = done n' [] :=
begin
induction l with hd tl hl generalizing n,
{ convert h,
rw many_eq_done_nil at h,
exact h.left.symm },
{ rw [←many1_eq_done_iff_many_eq_done, many1_eq_done] at h,
obtain ⟨_, -, h⟩ := h,
exact hl h }
end
lemma many_eq_nil_of_out_of_bound [p.bounded] {l : list α}
(h : p.many cb n = done n' l) (hn : cb.size < n) :
n' = n ∧ l = [] :=
begin
cases l,
{ rw many_eq_done_nil at h,
exact ⟨h.left.symm, rfl⟩ },
{ rw many_eq_done at h,
obtain ⟨np, hp, -⟩ := h,
exact absurd (bounded.of_done hp) hn.not_lt }
end
lemma many1_length_of_done [p.mono] [p.step] [p.bounded] {l : list α}
(h : many1 p cb n = done n' l) :
l.length = n' - n :=
begin
induction l with hd tl hl generalizing n n',
{ simpa using h },
{ obtain ⟨k, hk⟩ : ∃ k, n' = n + k + 1 := nat.exists_eq_add_of_lt (prog.of_done h),
subst hk,
simp only [many1_eq_done] at h,
obtain ⟨_, hp, h⟩ := h,
obtain rfl := step.of_done hp,
cases tl,
{ simp only [many_eq_done_nil, add_left_inj, exists_and_distrib_right, self_eq_add_right] at h,
rcases h with ⟨rfl, -⟩,
simp },
rw ←many1_eq_done_iff_many_eq_done at h,
specialize hl h,
simp [hl, add_comm, add_assoc, nat.sub_succ] }
end
lemma many1_bounded_of_done [p.step] [p.bounded] {l : list α}
(h : many1 p cb n = done n' l) :
n' ≤ cb.size :=
begin
induction l with hd tl hl generalizing n n',
{ simpa using h },
{ simp only [many1_eq_done] at h,
obtain ⟨np, hp, h⟩ := h,
obtain rfl := step.of_done hp,
cases tl,
{ simp only [many_eq_done_nil, exists_and_distrib_right] at h,
simpa [←h.left] using bounded.of_done hp },
{ rw ←many1_eq_done_iff_many_eq_done at h,
exact hl h } }
end
end many
section nat
/--
The `val : ℕ` produced by a successful parse of a `cb : char_buffer` is the numerical value
represented by the string of decimal digits (possibly padded with 0s on the left)
starting from the parsing position `n` and ending at position `n'`. The number
of characters parsed in is necessarily `n' - n`.
This is one of the directions of `nat_eq_done`.
-/
lemma nat_of_done {val : ℕ} (h : nat cb n = done n' val) :
val = (nat.of_digits 10 ((((cb.to_list.drop n).take (n' - n)).reverse.map
(λ c, c.to_nat - '0'.to_nat)))) :=
begin
/- The parser `parser.nat` that generates a decimal number from a string of digit characters does
several things. First it ingests in as many digits as it can with `many1 digit`. Then, it folds
over the resulting `list ℕ` using a helper function that keeps track of both the running sum an
and the magnitude so far, using a `(sum, magnitude) : (ℕ × ℕ)` pair. The final sum is extracted
using a `prod.fst`.
To prove that the value that `parser.nat` produces, after moving precisely `n' - n` steps, is
precisely what `nat.of_digits` would give, if supplied the string that is in the ingested
`char_buffer` (modulo conversion from `char` to `ℕ ), we need to induct over the length `n' - n`
of `cb : char_buffer` ingested, and prove that the parser must have terminated due to hitting
either the end of the `char_buffer` or a non-digit character.
The statement of the lemma is phrased using a combination of `list.drop` and `list.map` because
there is no currently better way to extract an "interval" from a `char_buffer`. Additionally, the
statement uses a `list.reverse` because `nat.of_digits` is little-endian.
We try to stop referring to the `cb : char_buffer` as soon as possible, so that we can instead
regard a `list char` instead, which lends itself better to proofs via induction.
-/
/- We first prove some helper lemmas about the definition of `parser.nat`. Since it is defined
in core, we have to work with how it is defined instead of changing its definition.
In its definition, the function that folds over the parsed in digits is defined internally,
as a lambda with anonymous destructor syntax, which leads to an unpleasant `nat._match_1` term
when rewriting the definition of `parser.nat` away. Since we know exactly what the function is,
we have a `rfl`-like lemma here to rewrite it back into a readable form.
-/
have natm : nat._match_1 = (λ (d : ℕ) p, ⟨p.1 + d * p.2, p.2 * 10⟩),
{ ext1, ext1 ⟨⟩, refl },
-- We also have to prove what is the `prod.snd` of the result of the fold of a `list (ℕ × ℕ)` with
-- the function above. We use this lemma later when we finish our inductive case.
have hpow : ∀ l, (list.foldr (λ (digit : ℕ) (x : ℕ × ℕ), (x.fst + digit * x.snd, x.snd * 10))
(0, 1) l).snd = 10 ^ l.length,
{ intro l,
induction l with hd tl hl,
{ simp },
{ simp [hl, pow_succ, mul_comm] } },
-- We convert the hypothesis that `parser.nat` has succeeded into an existential that there is
-- some list of digits that it has parsed in, and that those digits, when folded over by the
-- function above, give the value at hand.
simp only [nat, pure_eq_done, natm, decorate_error_eq_done, bind_eq_done] at h,
obtain ⟨n', l, hp, rfl, rfl⟩ := h,
-- We now want to stop working with the `cb : char_buffer` and parse positions `n` and `n'`,
-- and just deal with the parsed digit list `l : list ℕ`. To do so, we have to show that
-- this is precisely the list that could have been parsed in, no smaller and no greater.
induction l with lhd ltl IH generalizing n n' cb,
{ -- Base case: we parsed in no digits whatsoever. But this is impossible because `parser.many1`
-- must produce a list that is not `list.nil`, by `many1_ne_done_nil`.
simpa using hp },
-- Inductive case:
-- We must prove that the first digit parsed in `lhd : ℕ` is precisely the digit that is
-- represented by the character at position `n` in `cb : char_buffer`.
-- We will also prove the correspondence between the subsequent digits `ltl : list ℕ` and the
-- remaining characters past position `n` up to position `n'`.
cases hx : (list.drop n (buffer.to_list cb)) with chd ctl,
{ -- Are there even characters left to parse, at position `n` in the `cb : char_buffer`? In other
-- words, are we already out of bounds, and thus could not have parsed in any value
-- successfully. But this must be a contradiction because `parser.digit` is a `bounded` parser,
-- (due to its being defined via `parser.decorate_error`), which means it only succeeds
-- in-bounds, and the `many1` parser combinator retains that property.
have : cb.size ≤ n := by simpa using list.drop_eq_nil_iff_le.mp hx,
exact absurd (bounded.of_done hp) this.not_lt },
-- We prove that the first digit parsed in is precisely the digit that is represented by the
-- character at position `n`, which we now call `chd : char`.
have chdh : chd.to_nat - '0'.to_nat = lhd,
{ simp only [many1_eq_done] at hp,
-- We know that `parser.digit` succeeded, so it has moved to a possibly different position.
-- In fact, we know that this new position is `n + 1`, by the `step` property of
-- `parser.digit`.
obtain ⟨_, hp, -⟩ := hp,
obtain rfl := step.of_done hp,
-- We now unfold what it means for `parser.digit` to succeed, which means that the character
-- parsed in was "numeric" (for some definition of that property), and, more importantly,
-- that the `n`th character of `cb`, let's say `c`, when converted to a `ℕ` via
-- `char.to_nat c - '0'.to_nat`, must be equal to the resulting value, `lhd` in our case.
simp only [digit_eq_done, buffer.read_eq_nth_le_to_list, hx, buffer.length_to_list, true_and,
add_left_inj, list.length, list.nth_le, eq_self_iff_true, exists_and_distrib_left,
fin.coe_mk] at hp,
rcases hp with ⟨_, hn, rfl, _, _⟩,
-- But we already know the list corresponding to `cb : char_buffer` from position `n` and on
-- is equal to `(chd :: ctl) : list char`, so our `c` above must satisfy `c = chd`.
have hn' : n < cb.to_list.length := by simpa using hn,
rw ←list.cons_nth_le_drop_succ hn' at hx,
-- We can ignore proving any correspondence of `ctl : list char` to the other portions of the
-- `cb : char_buffer`.
simp only at hx,
simp [hx] },
-- We know that we parsed in more than one character because of the `prog` property of
-- `parser.digit`, which the `many1` parser combinator retains. In other words, we know that
-- `n < n'`, and so, the list of digits `ltl` must correspond to the list of digits that
-- `digit.many1 cb (n + 1)` would produce. We know that the shift of `1` in `n ↦ n + 1` holds
-- due to the `step` property of `parser.digit`.
-- We also get here `k : ℕ` which will indicate how many characters we parsed in past position
-- `n`. We will prove later that this must be the number of digits we produced as well in `ltl`.
obtain ⟨k, hk⟩ : ∃ k, n' = n + k + 1 := nat.exists_eq_add_of_lt (prog.of_done hp),
have hdm : ltl = [] ∨ digit.many1 cb (n + 1) = done n' ltl,
{ cases ltl,
{ simp },
{ rw many1_eq_done at hp,
obtain ⟨_, hp, hp'⟩ := hp,
simpa [step.of_done hp, many1_eq_done_iff_many_eq_done] using hp' } },
-- Now we case on the two possibilities, that there was only a single digit parsed in, and
-- `ltl = []`, or, had we started parsing at `n + 1` instead, we'd parse in the value associated
-- with `ltl`.
-- We prove that the LHS, which is a fold over a `list ℕ` is equal to the RHS, which is that
-- the `val : ℕ` that `nat.of_digits` produces when supplied a `list ℕ that has been produced
-- via mapping a `list char` using `char.to_nat`. Specifically, that `list char` are the
-- characters in the `cb : char_buffer`, from position `n` to position `n'` (excluding `n'`),
-- in reverse.
rcases hdm with rfl|hdm,
{ -- Case that `ltl = []`.
simp only [many1_eq_done, many_eq_done_nil, exists_and_distrib_right] at hp,
-- This means we must have failed parsing with `parser.digit` at some other position,
-- which we prove must be `n + 1` via the `step` property.
obtain ⟨_, hp, rfl, hp'⟩ := hp,
obtain rfl := step.of_done hp,
-- Now we rely on the simplifier, which simplfies the LHS, which is a fold over a singleton
-- list. On the RHS, `list.take (n + 1 - n)` also produces a singleton list, which, when
-- reversed, is the same list. `nat.of_digits` of a singleton list is precisely the value in
-- the list. And we already have that `chd.to_nat - '0'.to_nat = lhd`.
simp [chdh] },
-- We now have to deal with the case where we parsed in more than one digit, and thus
-- `n + 1 < n'`, which means `ctl` has one or more elements. Similarly, `ltl` has one or more
-- elements.
-- We finish ridding ourselves of references to `cb : char_buffer`, by relying on the fact that
-- our `ctl : list char` must be the appropriate portion of `cb` once enough elements have been
-- dropped and taken.
have rearr :
list.take (n + (k + 1) - (n + 1)) (list.drop (n + 1) (buffer.to_list cb)) = ctl.take k,
{ simp [←list.tail_drop, hx, nat.sub_succ, hk] },
-- We have to prove that the number of digits produced (given by `ltl`) is equal to the number
-- of characters parsed in, as given by `ctl.take k`, and that this is precisely `k`. We phrase it
-- in the statement using `min`, because lemmas about `list.length (list.take ...)` simplify to
-- a statement that uses `min`. The `list.length` term appears from the reduction of the folding
-- function, as proven above.
have ltll : min k ctl.length = ltl.length,
{ -- Here is an example of how statements about the `list.length` of `list.take` simplify.
have : (ctl.take k).length = min k ctl.length := by simp,
-- We bring back the underlying definition of `ctl` as the result of a sequence of `list.take`
-- and `list.drop`, so that lemmas about `list.length` of those can fire.
rw [←this, ←rearr, many1_length_of_done hdm],
-- Likewise, we rid ourselves of the `k` we generated earlier.
have : k = n' - n - 1,
{ simp [hk, add_assoc] },
subst this,
simp only [nat.sub_succ, add_comm, ←nat.pred_sub, buffer.length_to_list, nat.pred_one_add,
min_eq_left_iff, list.length_drop, add_tsub_cancel_left, list.length_take,
tsub_zero],
-- We now have a goal of proving an inequality dealing with `nat` subtraction and `nat.pred`,
-- both of which require special care to provide positivity hypotheses.
rw [tsub_le_tsub_iff_right, nat.pred_le_iff],
{ -- We know that `n' ≤ cb.size` because of the `bounded` property, that a parser will not
-- produce a `done` result at a position farther than the size of the underlying
-- `char_buffer`.
convert many1_bounded_of_done hp,
-- What is now left to prove is that `0 < cb.size`, which can be rephrased
-- as proving that it is nonempty.
cases hc : cb.size,
{ -- Proof by contradiction. Let's say that `cb.size = 0`. But we know that we succeeded
-- parsing in at position `n` using a `bounded` parser, so we must have that
-- `n < cb.size`.
have := bounded.of_done hp,
rw hc at this,
-- But then `n < 0`, a contradiction.
exact absurd n.zero_le this.not_le },
{ simp } },
{ -- Here, we use the same result as above, that `n < cb.size`, and relate it to
-- `n ≤ cb.size.pred`.
exact nat.le_pred_of_lt (bounded.of_done hp) } },
-- Finally, we simplify. On the LHS, we have a fold over `lhd :: ltl`, which simplifies to
-- the operation of the summing folding function on `lhd` and the fold over `ltl`. To that we can
-- apply the induction hypothesis, because we know that our parser would have succeeded had we
-- started at position `n + 1`. We replace mentions of `cb : char_buffer` with the appropriate
-- `chd :: ctl`, replace `lhd` with the appropriate statement of how it is calculated from `chd`,
-- and use the lemmas describing the length of `ltl` and how it is associated with `k`. We also
-- remove mentions of `n'` and replace with an expression using solely `n + k + 1`.
-- We use the lemma we proved above about how the folding function produces the
-- `prod.snd` value, which is `10` to the power of the length of the list provided to the fold.
-- Finally, we rely on `nat.of_digits_append` for the related statement of how digits given
-- are used in the `nat.of_digits` calculation, which also involves `10 ^ list.length ...`.
-- The `list.append` operation appears due to the `list.reverse (chd :: ctl)`.
-- We include some addition and multiplication lemmas to help the simplifier rearrange terms.
simp [IH _ hdm, hx, hk, rearr, ←chdh, ←ltll, hpow, add_assoc, nat.of_digits_append, mul_comm]
end
/--
If we know that `parser.nat` was successful, starting at position `n` and ending at position `n'`,
then it must be the case that for all `k : ℕ`, `n ≤ k`, `k < n'`, the character at the `k`th
position in `cb : char_buffer` is "numeric", that is, is between `'0'` and `'9'` inclusive.
This is a necessary part of proving one of the directions of `nat_eq_done`.
-/
lemma nat_of_done_as_digit {val : ℕ} (h : nat cb n = done n' val) :
∀ (hn : n' ≤ cb.size) k (hk : k < n'), n ≤ k →
'0' ≤ cb.read ⟨k, hk.trans_le hn⟩ ∧ cb.read ⟨k, hk.trans_le hn⟩ ≤ '9' :=
begin
-- The properties to be shown for the characters involved rely solely on the success of
-- `parser.digit` at the relevant positions, and not on the actual value `parser.nat` produced.
-- We break done the success of `parser.nat` into the `parser.digit` success and throw away
-- the resulting value given by `parser.nat`, and focus solely on the `list ℕ` generated by
-- `parser.digit.many1`.
simp only [nat, pure_eq_done, and.left_comm, decorate_error_eq_done, bind_eq_done,
exists_eq_left, exists_and_distrib_left] at h,
obtain ⟨xs, h, -⟩ := h,
-- We want to avoid having to make statements about the `cb : char_buffer` itself. Instead, we
-- induct on the `xs : list ℕ` that `parser.digit.many1` produced.
induction xs with hd tl hl generalizing n n',
{ -- Base case: `xs` is empty. But this is a contradiction because `many1` always produces a
-- nonempty list, as proven by `many1_ne_done_nil`.
simpa using h },
-- Inductive case: we prove that the `parser.digit.many1` produced a valid `(hd :: tl) : list ℕ`,
-- by showing that is the case for the character at position `n`, which gave `hd`, and use the
-- induction hypothesis on the remaining `tl`.
-- We break apart a `many1` success into a success of the underlying `parser.digit` to give `hd`
-- and a `parser.digit.many` which gives `tl`. We first deal with the `hd`.
rw many1_eq_done at h,
-- Right away, we can throw away the information about the "new" position that `parser.digit`
-- ended on because we will soon prove that it must have been `n + 1`.
obtain ⟨_, hp, h⟩ := h,
-- The main lemma here is `digit_eq_done`, which already proves the necessary conditions about
-- the character at hand. What is left to do is properly unpack the information.
simp only [digit_eq_done, and.comm, and.left_comm, digit_eq_fail, true_and, exists_eq_left,
eq_self_iff_true, exists_and_distrib_left, exists_and_distrib_left] at hp,
obtain ⟨rfl, -, hn, ge0, le9, rfl⟩ := hp,
-- Let's now consider a position `k` between `n` and `n'`, excluding `n'`.
intros hn k hk hk',
-- What if we are at `n`? What if we are past `n`? We case on the `n ≤ k`.
rcases hk'.eq_or_lt with rfl|hk',
{ -- The `n = k` case. But this is exactly what we know already, so we provide the
-- relevant hypotheses.
exact ⟨ge0, le9⟩ },
-- The `n < k` case. First, we check if there would have even been digits parsed in. So, we
-- case on `tl : list ℕ`
cases tl,
{ -- Case where `tl = []`. But that means `many` gave us a `[]` so either the character at
-- position `k` was not "numeric" or we are out of bounds. More importantly, when `many`
-- successfully produces a `[]`, it does not progress the parser head, so we have that
-- `n + 1 = n'`. This will lead to a contradiction because now we have `n < k` and `k < n + 1`.
simp only [many_eq_done_nil, exists_and_distrib_right] at h,
-- Extract out just the `n + 1 = n'`.
obtain ⟨rfl, -⟩ := h,
-- Form the contradictory hypothesis, and discharge the goal.
have : k < k := hk.trans_le (nat.succ_le_of_lt hk'),
exact absurd this (lt_irrefl _) },
{ -- Case where `tl ≠ []`. But that means that `many` produced a nonempty list as a result, so
-- `many1` would have successfully parsed at this position too. We use this statement to
-- rewrite our hypothesis into something that works with the induction hypothesis, and apply it.
rw ←many1_eq_done_iff_many_eq_done at h,
apply hl h,
-- All that is left to prove is that our `k` is at least our new "lower bound" `n + 1`, which
-- we have from our original split of the `n ≤ k`, since we are now on the `n < k` case.
exact nat.succ_le_of_lt hk' }
end
/--
If we know that `parser.nat` was successful, starting at position `n` and ending at position `n'`,
then it must be the case that for the ending position `n'`, either it is beyond the end of the
`cb : char_buffer`, or the character at that position is not "numeric", that is, between `'0'` and
`'9'` inclusive.
This is a necessary part of proving one of the directions of `nat_eq_done`.
-/
lemma nat_of_done_bounded {val : ℕ} (h : nat cb n = done n' val) :
∀ (hn : n' < cb.size), '0' ≤ cb.read ⟨n', hn⟩ → '9' < cb.read ⟨n', hn⟩ :=
begin
-- The properties to be shown for the characters involved rely solely on the success of
-- `parser.digit` at the relevant positions, and not on the actual value `parser.nat` produced.
-- We break done the success of `parser.nat` into the `parser.digit` success and throw away
-- the resulting value given by `parser.nat`, and focus solely on the `list ℕ` generated by
-- `parser.digit.many1`.
-- We deal with the case of `n'` is "out-of-bounds" right away by requiring that
-- `∀ (hn : n' < cb.size)`. Thus we only have to prove the lemma for the cases where `n'` is still
-- "in-bounds".
simp only [nat, pure_eq_done, and.left_comm, decorate_error_eq_done, bind_eq_done,
exists_eq_left, exists_and_distrib_left] at h,
obtain ⟨xs, h, -⟩ := h,
-- We want to avoid having to make statements about the `cb : char_buffer` itself. Instead, we
-- induct on the `xs : list ℕ` that `parser.digit.many1` produced.
induction xs with hd tl hl generalizing n n',
{ -- Base case: `xs` is empty. But this is a contradiction because `many1` always produces a
-- nonempty list, as proven by `many1_ne_done_nil`.
simpa using h },
-- Inductive case: at least one character has been parsed in, starting at position `n`.
-- We know that the size of `cb : char_buffer` must be at least `n + 1` because
-- `parser.digit.many1` is `bounded` (`n < cb.size`).
-- We show that either we parsed in just that one character, or we use the inductive hypothesis.
obtain ⟨k, hk⟩ : ∃ k, cb.size = n + k + 1 := nat.exists_eq_add_of_lt (bounded.of_done h),
cases tl,
{ -- Case where `tl = []`, so we parsed in only `hd`. That must mean that `parser.digit` failed
-- at `n + 1`.
simp only [many1_eq_done, many_eq_done_nil, and.left_comm, exists_and_distrib_right,
exists_eq_left] at h,
-- We throw away the success information of what happened at position `n`, and we do not need
-- the "error" value that the failure produced.
obtain ⟨-, _, h⟩ := h,
-- If `parser.digit` failed at `n + 1`, then either we hit a non-numeric character, or
-- we are out of bounds. `digit_eq_fail` provides us with those two cases.
simp only [digit_eq_done, and.comm, and.left_comm, digit_eq_fail, true_and, exists_eq_left,
eq_self_iff_true, exists_and_distrib_left] at h,
obtain (⟨rfl, h⟩ | ⟨h, -⟩) := h,
{ -- First case: we are still in bounds, but the character is not numeric. We must prove
-- that we are still in bounds. But we know that from our initial requirement.
intro hn,
simpa using h hn },
{ -- Second case: we are out of bounds, and somehow the fold that `many1` relied on failed.
-- But we know that `parser.digit` is mono, that is, it never goes backward in position,
-- in neither success nor in failure. We also have that `foldr_core` respects `mono`.
-- But in this case, `foldr_core` is starting at position `n' + 1` but failing at
-- position `n'`, which is a contradiction, because otherwise we would have `n' + 1 ≤ n'`.
simpa using mono.of_fail h } },
{ -- Case where `tl ≠ []`. But that means that `many` produced a nonempty list as a result, so
-- `many1` would have successfully parsed at this position too. We use this statement to
-- rewrite our hypothesis into something that works with the induction hypothesis, and apply it.
rw many1_eq_done at h,
obtain ⟨_, -, h⟩ := h,
rw ←many1_eq_done_iff_many_eq_done at h,
exact hl h }
end
/--
The `val : ℕ` produced by a successful parse of a `cb : char_buffer` is the numerical value
represented by the string of decimal digits (possibly padded with 0s on the left)
starting from the parsing position `n` and ending at position `n'`, where `n < n'`. The number
of characters parsed in is necessarily `n' - n`. Additionally, all of the characters in the `cb`
starting at position `n` (inclusive) up to position `n'` (exclusive) are "numeric", in that they
are between `'0'` and `'9'` inclusive. Such a `char_buffer` would produce the `ℕ` value encoded
by its decimal characters.
-/
lemma nat_eq_done {val : ℕ} : nat cb n = done n' val ↔ ∃ (hn : n < n'),
val = (nat.of_digits 10 ((((cb.to_list.drop n).take (n' - n)).reverse.map
(λ c, c.to_nat - '0'.to_nat)))) ∧ (∀ (hn' : n' < cb.size),
('0' ≤ cb.read ⟨n', hn'⟩ → '9' < cb.read ⟨n', hn'⟩)) ∧ ∃ (hn'' : n' ≤ cb.size),
(∀ k (hk : k < n'), n ≤ k →
'0' ≤ cb.read ⟨k, hk.trans_le hn''⟩ ∧ cb.read ⟨k, hk.trans_le hn''⟩ ≤ '9') :=
begin
-- To prove this iff, we have most of the way in the forward direction, using the lemmas proven
-- above. First, we must use that `parser.nat` is `prog`, which means that on success, it must
-- move forward. We also have to prove the statement that a success means the parsed in
-- characters were properly "numeric". It involves first generating ane existential witness
-- that the parse was completely "in-bounds".
-- For the reverse direction, we first discharge the goals that deal with proving that our parser
-- succeeded because it encountered characters with the proper "numeric" properties, was
-- "in-bounds" and hit a nonnumeric character. The more difficult portion is proving that the
-- list of characters from positions `n` to `n'`, when folded over by the function defined inside
-- `parser.nat` gives exactly the same value as `nat.of_digits` when supplied with the same
-- (modulo rearrangement) list. To reach this goal, we try to remove any reliance on the
-- underlying `cb : char_buffer` or parsers as soon as possible, via a cased-induction.
refine ⟨λ h, ⟨prog.of_done h, nat_of_done h, nat_of_done_bounded h, _⟩, _⟩,
{ -- To provide the existential witness that `n'` is within the bounds of the `cb : char_buffer`,
-- we rely on the fact that `parser.nat` is primarily a `parser.digit.many1`, and that `many1`,
-- must finish with the bounds of the `cb`, as long as the underlying parser is `step` and
-- `bounded`, which `digit` is. We do not prove this as a separate lemma about `parser.nat`
-- because it would almost always be only relevant in this larger theorem.
-- We clone the success hypothesis `h` so that we can supply it back later.
have H := h,
-- We unwrap the `parser.nat` success down to the `many1` success, throwing away other info.
rw [nat] at h,
simp only [decorate_error_eq_done, bind_eq_done, pure_eq_done, and.left_comm, exists_eq_left,
exists_and_distrib_left] at h,
obtain ⟨_, h, -⟩ := h,
-- Now we get our existential witness that `n' ≤ cb.size`.
replace h := many1_bounded_of_done h,
-- With that, we can use the lemma proved above that our characters are "numeric"
exact ⟨h, nat_of_done_as_digit H h⟩ },
-- We now prove that given the `cb : char_buffer` with characters within the `n ≤ k < n'` interval
-- properly "numeric" and such that their `nat.of_digits` generates the `val : ℕ`, `parser.nat`
-- of that `cb`, when starting at `n`, will finish at `n'` and produce the same `val`.
-- We first introduce the relevant hypotheses, including the fact that we have a valid interval
-- where `n < n'` and that characters at `n'` and beyond are no longer numeric.
rintro ⟨hn, hv, hb, hn', ho⟩,
-- We first unwrap the `parser.nat` definition to the underlying `parser.digit.many1` success
-- and the fold function of the digits.
rw nat,
simp only [and.left_comm, pure_eq_done, hv, decorate_error_eq_done, list.map_reverse,
bind_eq_done, exists_eq_left, exists_and_distrib_left],
-- We won't actually need the `val : ℕ` itself, since it is entirely characterized by the
-- underlying characters. Instead, we will induct over the `list char` of characters from
-- position `n` onwards, showing that if we could have provided a list at `n`, we could have
-- provided a valid list of characters at `n + 1` too.
clear hv val,
/- We first prove some helper lemmas about the definition of `parser.nat`. Since it is defined
in core, we have to work with how it is defined instead of changing its definition.
In its definition, the function that folds over the parsed in digits is defined internally,
as a lambda with anonymous destructor syntax, which leads to an unpleasant `nat._match_1` term
when rewriting the definition of `parser.nat` away. Since we know exactly what the function is,
we have a `rfl`-like lemma here to rewrite it back into a readable form.
-/
have natm : nat._match_1 = (λ (d : ℕ) p, ⟨p.1 + d * p.2, p.2 * 10⟩),
{ ext1, ext1 ⟨⟩, refl },
-- We induct over the characters available at position `n` and onwards. Because `cb` is used
-- in other expressions, we utilize the `induction H : ...` tactic to induct separately from
-- destructing `cb` itself.
induction H : (cb.to_list.drop n) with hd tl IH generalizing n,
{ -- Base case: there are no characters at position `n` or onwards, which means that
-- `cb.size ≤ n`. But this is a contradiction, since we have `n < n' ≤ cb.size`.
rw list.drop_eq_nil_iff_le at H,
refine absurd ((lt_of_le_of_lt H hn).trans_le hn') _,
simp },
{ -- Inductive case: we prove that if we could have parsed from `n + 1`, we could have also parsed
-- from `n`, if there was a valid numerical character at `n`. Most of the body
-- of this inductive case is generating the appropriate conditions for use of the inductive
-- hypothesis.
specialize @IH (n + 1),
-- We have, by the inductive case, that there is at least one character `hd` at position `n`,
-- with the rest at `tl`. We rearrange our inductive case to make `tl` be expressed as
-- list.drop (n + 1), which fits out induction hypothesis conditions better. To use the
-- rearranging lemma, we must prove that we are "dropping" in bounds, which we supply on-the-fly
simp only [←list.cons_nth_le_drop_succ
(show n < cb.to_list.length, by simpa using hn.trans_le hn')] at H,
-- We prove that parsing our `n`th character, `hd`, would have resulted in a success from
-- `parser.digit`, with the appropriate `ℕ` success value. We use this later to simplify the
-- unwrapped fold, since `hd` is our head character.
have hdigit : digit cb n = done (n + 1) (hd.to_nat - '0'.to_nat),
{ -- By our necessary condition, we know that `n` is in bounds, and that the `n`th character
-- has the necessary "numeric" properties.
specialize ho n hn le_rfl,
-- We prove an additional result that the conversion of `hd : char` to a `ℕ` would give a
-- value `x ≤ 9`, since that is part of the iff statement in the `digit_eq_done` lemma.
have : (buffer.read cb ⟨n, hn.trans_le hn'⟩).to_nat - '0'.to_nat ≤ 9,
{ -- We rewrite the statement to be a statement about characters instead, and split the
-- inequality into the case that our hypotheses prove, and that `'0' ≤ '9'`, which
-- is true by computation, handled by `dec_trivial`.
rw [show 9 = '9'.to_nat - '0'.to_nat, from dec_trivial, tsub_le_tsub_iff_right],
{ exact ho.right },
{ dec_trivial } },
-- We rely on the simplifier, mostly powered by `digit_eq_done`, and supply all the
-- necessary conditions of bounds and identities about `hd`.
simp [digit_eq_done, this, ←H.left, buffer.nth_le_to_list, hn.trans_le hn', ho] },
-- We now case on whether we've moved to the end of our parse or not. We phrase this as
-- casing on either `n + 1 < n` or `n ≤ n + 1`. The more difficult goal comes first.
cases lt_or_ge (n + 1) n' with hn'' hn'',
{ -- Case `n + 1 < n'`. We can directly supply this to our induction hypothesis.
-- We now have to prove, for the induction hypothesis, that the characters at positions `k`,
-- `n + 1 ≤ k < n'` are "numeric". We already had this for `n ≤ k < n`, so we just rearrange
-- the hypotheses we already have.
specialize IH hn'' _ H.right,
{ intros k hk hk',
apply ho,
exact nat.le_of_succ_le hk' },
-- With the induction hypothesis conditions satisfier, we can extract out a list that
-- `parser.digit.many1` would have generated from position `n + 1`, as well as the associated
-- property of the list, that it folds into what `nat.of_digits` generates from the
-- characters in `cb : char_buffer`, now known as `hd :: tl`.
obtain ⟨l, hdl, hvl⟩ := IH,
-- Of course, the parsed in list from position `n` would be `l` prepended with the result
-- of parsing in `hd`, which is provided explicitly.
use (hd.to_nat - '0'.to_nat) :: l,
-- We case on `l : list ℕ` so that we can make statements about the fold on `l`
cases l with lhd ltl,
{ -- As before, if `l = []` then `many1` produced a `[]` success, which is a contradiction.
simpa using hdl },
-- Case `l = lhd :: ltl`. We can rewrite the fold of the function inside `parser.nat` on
-- `lhd :: ltl`, which will be used to rewrite in the goal.
simp only [natm, list.foldr] at hvl,
-- We also expand the fold in the goal, using the expanded fold from our hypothesis, powered
-- by `many1_eq_done` to proceed in the parsing. We know exactly what the next `many` will
-- produce from `many1_eq_done_iff_many_eq_done.mp` of our `hdl` hypothesis. Finally,
-- we also use `hdigit` to express what the single `parser.digit` result would be at `n`.
simp only [natm, hvl, many1_eq_done, hdigit, many1_eq_done_iff_many_eq_done.mp hdl, true_and,
and_true, eq_self_iff_true, list.foldr, exists_eq_left'],
-- Now our goal is solely about the equality of two different folding functions, one from the
-- function defined inside `parser.nat` and the other as `nat.of_digits`, when applied to
-- similar list inputs.
-- First, we rid ourselves of `n'` by replacing with `n + m + 1`, which allows us to
-- simplify the term of how many elements we are keeping using a `list.take`.
obtain ⟨m, rfl⟩ : ∃ m, n' = n + m + 1 := nat.exists_eq_add_of_lt hn,
-- The following rearrangement lemma is to simplify the `list.take (n' - n)` expression we had
have : n + m + 1 - n = m + 1,
{ rw [add_assoc, tsub_eq_iff_eq_add_of_le, add_comm],
exact nat.le_add_right _ _ },
-- We also have to prove what is the `prod.snd` of the result of the fold of a `list (ℕ × ℕ)`
-- with the function above. We use this lemma to finish our inductive case.
have hpow : ∀ l, (list.foldr (λ (digit : ℕ) (x : ℕ × ℕ),
(x.fst + digit * x.snd, x.snd * 10)) (0, 1) l).snd = 10 ^ l.length,
{ intro l,
induction l with hd tl hl,
{ simp },
{ simp [hl, pow_succ, mul_comm] } },
-- We prove that the parsed list of digits `(lhd :: ltl) : list ℕ` must be of length `m`
-- which is used later when the `parser.nat` fold places `ltl.length` in the exponent.
have hml : ltl.length + 1 = m := by simpa using many1_length_of_done hdl,
-- A simplified `list.length (list.take ...)` expression refers to the minimum of the
-- underlying length and the amount of elements taken. We know that `m ≤ tl.length`, so
-- we provide this auxiliary lemma so that the simplified "take-length" can simplify further
have ltll : min m tl.length = m,
{ -- On the way to proving this, we have to actually show that `m ≤ tl.length`, by showing
-- that since `tl` was a subsequence in `cb`, and was retrieved from `n + 1` to `n + m + 1`,
-- then since `n + m + 1 ≤ cb.size`, we have that `tl` must be at least `m` in length.
simpa [←H.right, le_tsub_iff_right (hn''.trans_le hn').le, add_comm, add_assoc,
add_left_comm] using hn' },
-- Finally, we rely on the simplifier. We already expressions of `nat.of_digits` on both
-- the LHS and RHS. All that is left to do is to prove that the summand on the LHS is produced
-- by the fold of `nat.of_digits` on the RHS of `hd :: tl`. The `nat.of_digits_append` is used
-- because of the append that forms from the included `list.reverse`. The lengths of the lists
-- are placed in the exponents with `10` as a base, and are combined using `←pow_succ 10`.
-- Any complicated expression about list lengths is further simplified by the auxiliary
-- lemmas we just proved. Finally, we assist the simplifier by rearranging terms with our
-- `n + m + 1 - n = m + 1` proof and `mul_comm`.
simp [this, hpow, nat.of_digits_append, mul_comm, ←pow_succ 10, hml, ltll] },
{ -- Consider the case that `n' ≤ n + 1`. But then since `n < n' ≤ n + 1`, `n' = n + 1`.
obtain rfl : n' = n + 1 := le_antisymm hn'' (nat.succ_le_of_lt hn),
-- This means we have only parsed in a single character, so the resulting parsed in list
-- is explicitly formed from an expression we can construct from `hd`.
use [[hd.to_nat - '0'.to_nat]],
-- Our list expression simplifies nicely because it is a fold over a singleton, so we
-- do not have to supply any auxiliary lemmas for it, other than what we already know about
-- `hd` and the function defined in `parser.nat`. However, we will have to prove that our
-- parse ended because of a good reason: either we are out of bounds or we hit a nonnumeric
-- character.
simp only [many1_eq_done, many_eq_done_nil, digit_eq_fail, natm, and.comm, and.left_comm,
hdigit, true_and, mul_one, nat.of_digits_singleton, list.take, exists_eq_left,
exists_and_distrib_right, add_tsub_cancel_left, eq_self_iff_true,
list.reverse_singleton, zero_add, list.foldr, list.map],
-- We take the route of proving that we hit a nonnumeric character, since we already have
-- a hypothesis that says that characters at `n'` and past it are nonnumeric. (Note, by now
-- we have substituted `n + 1` for `n'.
-- We are also asked to provide the error value that our failed parse would report. But
-- `digit_eq_fail` already knows what it is, so we can discharge that with an inline `rfl`.
refine ⟨_, or.inl ⟨rfl, _⟩⟩,
-- The nonnumeric condition looks almost exactly like the hypothesis we already have, so
-- we let the simplifier align them for us
simpa using hb } }
end
end nat
end parser
|
9fcaa970230f46914afef2cafc3b540fd04156db
|
0403d75087eccd9fdec22713ec7cff4d40c93610
|
/lean/love01_definitions_and_statements_demo.lean
|
9432804893215c72aa39ca5d27f17a9afe682b17
|
[] |
no_license
|
5l1v3r1/logical_verification_2020
|
9660ae5a83915be2103183490cae279b888be83c
|
000aa1fe212813b8458bf26c16b8a97597b7417e
|
refs/heads/master
| 1,621,861,800,557
| 1,586,181,042,000
| 1,586,181,042,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,565
|
lean
|
import .lovelib
/-! # LoVe Demo 1: Definitions and Statements
We introduce the basics of Lean and proof assistants, without trying to carry
out actual proofs yet. We focus on specifying objects and statements of their
intended properties. -/
set_option pp.beta true
namespace LoVe
/-! ## Proof Assistants
Proof assistants (also called interactive theorem provers)
* check and help develop formal proofs;
* can be used to prove big theorems, not only logic puzzles;
* can be tedious to use;
* are highly addictive (think video games).
A selection of proof assistants, classified by logical foundations:
* set theory: Isabelle/ZF, Metamath, Mizar;
* simple type theory: HOL4, HOL Light, Isabelle/HOL;
* **dependent type theory**: Agda, Coq, **Lean**, Matita, PVS.
## Success Stories
Mathematics:
* the four-color theorem (in Coq);
* the odd-order theorem (in Coq);
* the Kepler conjecture (in HOL Light and Isabelle/HOL).
Computer science:
* hardware
* operating systems
* programming language theory
* compilers
* security
## Lean
Lean is a fairly new proof assistant developed primarily by Leonardo de Moura
(Microsoft Research) since 2012.
Its mathematical library, `mathlib`, is developed under the leadership of
Jeremy Avigad (Carnegie Mellon University).
We use community version 3.5.1. We use its basic libraries, `mathlib`, and
LoVelib`. Lean is a research project, with some rough edges.
Strengths:
* highly expressive logic based on a dependent type theory called the
**calculus of inductive constructions**;
* extended with classical axioms and quotient types;
* metaprogramming framework;
* modern user interface;
* documentation;
* open source;
* endless source of puns (Lean Forward, Lean Together, Boolean, …).
## This Course
### Web Site
https://lean-forward.github.io/logical-verification/2020/index.html
### Installation Instructions
https://github.com/blanchette/logical_verification_2020/blob/master/README.md#logical-verification-2020---installation-instructions
### Repository (Demos, Exercises, Homework)
https://github.com/blanchette/logical_verification_2020
The file you are currently looking at is a demo. There are
* 13 demo files;
* 13 exercise sheets;
* 11 homework sheets (10 points each);
* 1 project (20 points).
You may submit at most 10 homework, or at most 8 homework and the project.
Homework, including the project, must be done individually. The homework builds
on the exercises, which build on the demoes.
### The Hitchhiker's Guide to Logical Verification
https://github.com/blanchette/logical_verification_2020/blob/master/hitchhikers_guide.pdf
The lecture notes consist of a preface and 13 chapters. They cover the same
material as the corresponding lectures but with more details. Sometimes there
will not be enough time to cover everything in class, so reading the lecture
notes will be necessary.
### Final Exam
The course aims at teaching concepts, not syntax. Therefore, the final exam is
on paper.
## Our Goal
We want you to
* master fundamental theory and techniques in interactive theorem proving;
* famliarize yourselves with some application areas;
* develop some practical skills which you can apply on a larger project (as a
hobby, for an MSc or PhD, or in industry);
* feel ready to move to another proof assistant and apply what you have learned;
* understand the domain well enough to start reading scientific papers.
This course is neither a pure metatheory course nor a Lean tutorial. Lean is our
vehicle, not an end in itself.
## A View of Lean
In a first approximation:
Lean = typed functional programming + logic
In today's lecture, we cover inductive types, recursive functions, and lemma
statements.
If you are not familiar with typed functional programming (e.g., Haskell, ML,
OCaml, Scala), we recommend that you study a tutorial, such as the first five
and a half chapters of __Learn You a Haskell for Great Good!__:
http://learnyouahaskell.com/chapters
## Types and Terms
Similar to simply typed λ-calculus or typed functional programming languages
(ML, OCaml, Haskell).
Types `σ`, `τ`, `υ`:
* type variables `α`;
* basic types `T`;
* complex types `T σ1 … σN`.
Some type constructors `T` are written infix, e.g., `→` (function type).
The function arrow is right-associative:
`σ₁ → σ₂ → σ₃ → τ` = `σ₁ → (σ₂ → (σ₃ → τ))`.
In Lean, type variables must be bound using `∀`, e.g., `∀α, α → α`.
Terms `t`, `u`:
* constants `c`;
* variables `x`;
* applications `t u`;
* λ-expressions `λx, t`.
__Currying__: functions can be
* fully applied (e.g., `f x y z` if `f` is ternary);
* partially applied (e.g., `f x y`, `f x`);
* left unapplied (e.g., `f`).
Application is left-associative: `f x y z` = `((f x) y) z`. -/
#check ℕ
#check ℤ
#check empty
#check unit
#check bool
#check ℕ → ℤ
#check ℤ → ℕ
#check bool → ℕ → ℤ
#check (bool → ℕ) → ℤ
#check ℕ → (bool → ℕ) → ℤ
#check λx : ℕ, x
#check λf : ℕ → ℕ, λg : ℕ → ℕ, λh : ℕ → ℕ, λx : ℕ, h (g (f x))
#check λ(f g h : ℕ → ℕ) (x : ℕ), h (g (f x))
constants a b : ℤ
constant f : ℤ → ℤ
constant g : ℤ → ℤ → ℤ
#check λx : ℤ, g (f (g a x)) (g x b)
#check λx, g (f (g a x)) (g x b)
#check λx, x
constant trool : Type
constants trool.true trool.false trool.maybe : trool
/-! ### Type Checking and Type Inference
Type checking and type inference are decidable problems, but this property is
quickly lost if features such as overloading or subtyping are added.
Type judgment: `C ⊢ t : σ`, meaning `t` has type `σ` in local context `C`.
Typing rules:
—————————— Cst if c is declared with type σ
C ⊢ c : σ
—————————— Var if x : σ occurs in C
C ⊢ x : σ
C ⊢ t : σ → τ C ⊢ u : σ
——————————————————————————— App
C ⊢ t u : τ
C, x : σ ⊢ t : τ
———————————————————————— Lam
C ⊢ (λx : σ, t) : σ → τ
### Type Inhabitation
Given a type `σ`, the __type inhabitation__ problem consists of finding a term
of that type.
Recursive procedure:
1. If `σ` is of the form `τ → υ`, a candidate inhabitant is an anonymous
function of the form `λx, _`.
2. Alternatively, you can use any constant or variable `x : τ₁ → ⋯ → τN → σ` to
build the term `x _ … _`. -/
constants α β γ : Type
def some_fun_of_type : (α → β → γ) → ((β → α) → β) → α → γ :=
λf g a, f a (g (λb, a))
## Type Definitions
An __inductive type__ (also called __inductive datatype__,
__algebraic datatype__, or just __datatype__) is a type that consists all the
values that can be built using a finite number of applications of its
__constructors__, and only those.
### Natural Numbers -/
namespace my_nat
/-! Definition of type `nat` (= `ℕ`) of natural numbers, using Peano-style unary
notation: -/
inductive nat : Type
| zero : nat
| succ : nat → nat
#check nat
#check nat.zero
#check nat.succ
end my_nat
#print nat
#print ℕ
namespace my_list
/-! ### Arithmetic Expressions -/
inductive aexp : Type
| num : ℤ → aexp
| var : string → aexp
| add : aexp → aexp → aexp
| sub : aexp → aexp → aexp
| mul : aexp → aexp → aexp
| div : aexp → aexp → aexp
/-! ### Lists -/
inductive list (α : Type) : Type
| nil : list
| cons : α → list → list
#check list.nil
#check list.cons
end my_list
#print list
/-! ## Function Definitions
The syntax for defining a function operating on an inductive type is very
compact: We define a single function and use __pattern matching__ to extract the
arguments to the constructors. -/
def add : ℕ → ℕ → ℕ
| m nat.zero := m
| m (nat.succ n) := nat.succ (add m n)
#eval add 2 7
#reduce add 2 7
def mul : ℕ → ℕ → ℕ
| _ nat.zero := nat.zero
| m (nat.succ n) := add m (mul m n)
#eval mul 2 7
#print mul
#print mul._main
def power : ℕ → ℕ → ℕ
| _ nat.zero := 1
| m (nat.succ n) := m * power m n
#eval power 2 5
def power₂ (m : ℕ) : ℕ → ℕ
| nat.zero := 1
| (nat.succ n) := m * power₂ n
#eval power₂ 2 5
def iter (α : Type) (z : α) (f : α → α) : ℕ → α
| nat.zero := z
| (nat.succ n) := f (iter n)
#check iter
def power₃ (m n : ℕ) : ℕ :=
iter ℕ 1 (λl, m * l) n
#eval power₃ 2 5
def append (α : Type) : list α → list α → list α
| list.nil ys := ys
| (list.cons x xs) ys := list.cons x (append xs ys)
#check append
#eval append _ [3, 1] [4, 1, 5]
/-! Aliases:
`[]` := `nil`
`x :: xs` := `cons x xs`
`[x₁, …, xN]` := `x₁ :: … :: xN` -/
def append₂ {α : Type} : list α → list α → list α
| list.nil ys := ys
| (list.cons x xs) ys := list.cons x (append₂ xs ys)
#check append₂
#eval append₂ [3, 1] [4, 1, 5]
#check @append₂
#eval @append₂ _ [3, 1] [4, 1, 5]
def append₃ {α : Type} : list α → list α → list α
| [] ys := ys
| (x :: xs) ys := x :: append₃ xs ys
def reverse {α : Type} : list α → list α
| [] := []
| (x :: xs) := reverse xs ++ [x]
def eval (env : string → ℤ) : aexp → ℤ
| (aexp.num i) := i
| (aexp.var x) := env x
| (aexp.add e₁ e₂) := eval e₁ + eval e₂
| (aexp.sub e₁ e₂) := eval e₁ - eval e₂
| (aexp.mul e₁ e₂) := eval e₁ * eval e₂
| (aexp.div e₁ e₂) := eval e₁ / eval e₂
/-! Lean only accepts the function definitions for which it can prove
termination. In particular, it accepts __structurally recursive__ functions,
which peel off exactly one constructor at a time.
## Lemma Statements
Notice the similarity with `def` commands. -/
namespace sorry_lemmas
lemma add_comm (m n : ℕ) :
add m n = add n m :=
sorry
lemma add_assoc (l m n : ℕ) :
add (add l m) n = add l (add m n) :=
sorry
lemma mul_comm (m n : ℕ) :
mul m n = mul n m :=
sorry
lemma mul_assoc (l m n : ℕ) :
mul (mul l m) n = mul l (mul m n) :=
sorry
lemma mul_add (l m n : ℕ) :
mul l (add m n) = add (mul l m) (mul l n) :=
sorry
lemma reverse_reverse {α : Type} (xs : list α) :
reverse (reverse xs) = xs :=
sorry
/-! Axioms are like lemmas but without proofs (`:= …`). Constant declarations
are like definitions but without bodies (`:= …`). -/
constants a b : ℤ
axiom a_less_b :
a < b
end sorry_lemmas
end LoVe
|
3218ddb9adf90254fa2d141d09c8f016091ffc3f
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/src/Lean/Elab/PreDefinition/Structural/Main.lean
|
1692deced27337e3af3445d75a5d8e0d2aa2a0c2
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,770
|
lean
|
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Elab.PreDefinition.Structural.Basic
import Lean.Elab.PreDefinition.Structural.FindRecArg
import Lean.Elab.PreDefinition.Structural.Preprocess
import Lean.Elab.PreDefinition.Structural.BRecOn
import Lean.Elab.PreDefinition.Structural.IndPred
import Lean.Elab.PreDefinition.Structural.Eqns
import Lean.Elab.PreDefinition.Structural.SmartUnfolding
namespace Lean.Elab
namespace Structural
open Meta
private def getFixedPrefix (declName : Name) (xs : Array Expr) (value : Expr) : MetaM Nat := do
let numFixedRef ← IO.mkRef xs.size
forEachExpr' value fun e => do
if e.isAppOf declName then
let args := e.getAppArgs
numFixedRef.modify fun numFixed => if args.size < numFixed then args.size else numFixed
for arg in args, x in xs do
/- We should not use structural equality here. For example, given the definition
```
def V.map {α β} f x x_1 :=
@V.map.match_1.{1} α (fun x x_2 => V β x) x x_1
(fun x x_2 => @V.mk₁ β x (f Bool.true x_2))
(fun e => @V.mk₂ β (V.map (fun b => α b) (fun b => β b) f Bool.false e))
```
The first three arguments at `V.map (fun b => α b) (fun b => β b) f Bool.false e` are "fixed"
modulo definitional equality.
We disable to proof irrelevance to be able to use structural recursion on inductive predicates.
For example, consider the example
```
inductive PList (α : Type) : Prop
| nil
| cons : α → PList α → PList α
infixr:67 " ::: " => PList.cons
set_option trace.Elab.definition.structural true in
def pmap {α β} (f : α → β) : PList α → PList β
| PList.nil => PList.nil
| a:::as => f a ::: pmap f as
```
The "Fixed" prefix would be 4 since all elements of type `PList α` are definitionally equal.
-/
if !(← withoutProofIrrelevance <| withReducible <| isDefEq arg x) then
-- We continue searching if e's arguments are not a prefix of `xs`
return true
return false
else
return true
numFixedRef.get
private def elimRecursion (preDef : PreDefinition) : M (Nat × PreDefinition) := do
trace[Elab.definition.structural] "{preDef.declName} := {preDef.value}"
withoutModifyingEnv do lambdaTelescope preDef.value fun xs value => do
addAsAxiom preDef
let value ← preprocess value preDef.declName
trace[Elab.definition.structural] "{preDef.declName} {xs} :=\n{value}"
let numFixed ← getFixedPrefix preDef.declName xs value
trace[Elab.definition.structural] "numFixed: {numFixed}"
findRecArg numFixed xs fun recArgInfo => do
-- when (recArgInfo.indName == `Nat) throwStructuralFailed -- HACK to skip Nat argument
let valueNew ← if recArgInfo.indPred then
mkIndPredBRecOn preDef.declName recArgInfo value
else
mkBRecOn preDef.declName recArgInfo value
let valueNew ← mkLambdaFVars xs valueNew
trace[Elab.definition.structural] "result: {valueNew}"
-- Recursive applications may still occur in expressions that were not visited by replaceRecApps (e.g., in types)
let valueNew ← ensureNoRecFn preDef.declName valueNew
let recArgPos := recArgInfo.fixedParams.size + recArgInfo.pos
return (recArgPos, { preDef with value := valueNew })
def structuralRecursion (preDefs : Array PreDefinition) : TermElabM Unit :=
if preDefs.size != 1 then
throwError "structural recursion does not handle mutually recursive functions"
else do
let ((recArgPos, preDefNonRec), state) ← run <| elimRecursion preDefs[0]!
let preDefNonRec ← eraseRecAppSyntax preDefNonRec
let preDef ← eraseRecAppSyntax preDefs[0]!
state.addMatchers.forM liftM
unless preDef.kind.isTheorem do
unless (← isProp preDef.type) do
/-
Don't save predefinition info for equation generator
for theorems and definitions that are propositions.
See issue #2327
-/
registerEqnsInfo preDef recArgPos
mapError (addNonRec preDefNonRec (applyAttrAfterCompilation := false)) fun msg =>
m!"structural recursion failed, produced type incorrect term{indentD msg}"
addAndCompilePartialRec #[preDef]
addSmartUnfoldingDef preDef recArgPos
applyAttributesOf #[preDefNonRec] AttributeApplicationTime.afterCompilation
builtin_initialize
registerTraceClass `Elab.definition.structural
end Structural
export Structural (structuralRecursion)
end Lean.Elab
|
4eb736af36f506173b10e12b3c8220a1e46c6567
|
fa02ed5a3c9c0adee3c26887a16855e7841c668b
|
/src/tactic/simps.lean
|
270d2762564f5cd909ea4bb323b6c8d770781e64
|
[
"Apache-2.0"
] |
permissive
|
jjgarzella/mathlib
|
96a345378c4e0bf26cf604aed84f90329e4896a2
|
395d8716c3ad03747059d482090e2bb97db612c8
|
refs/heads/master
| 1,686,480,124,379
| 1,625,163,323,000
| 1,625,163,323,000
| 281,190,421
| 2
| 0
|
Apache-2.0
| 1,595,268,170,000
| 1,595,268,169,000
| null |
UTF-8
|
Lean
| false
| false
| 42,220
|
lean
|
/-
Copyright (c) 2019 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import tactic.core
import tactic.protected
import data.sum
/-!
# simps attribute
This file defines the `@[simps]` attribute, to automatically generate `simp` lemmas
reducing a definition when projections are applied to it.
## Implementation Notes
There are three attributes being defined here
* `@[simps]` is the attribute for objects of a structure or instances of a class. It will
automatically generate simplification lemmas for each projection of the object/instance that
contains data. See the doc strings for `simps_attr` and `simps_cfg` for more details and
configuration options.
* `@[_simps_str]` is automatically added to structures that have been used in `@[simps]` at least
once. This attribute contains the data of the projections used for this structure by all following
invocations of `@[simps]`.
* `@[notation_class]` should be added to all classes that define notation, like `has_mul` and
`has_zero`. This specifies that the projections that `@[simps]` used are the projections from
these notation classes instead of the projections of the superclasses.
Example: if `has_mul` is tagged with `@[notation_class]` then the projection used for `semigroup`
will be `λ α hα, @has_mul.mul α (@semigroup.to_has_mul α hα)` instead of `@semigroup.mul`.
## Tags
structures, projections, simp, simplifier, generates declarations
-/
open tactic expr option sum
setup_tactic_parser
declare_trace simps.verbose
declare_trace simps.debug
/--
Projection data for a single projection of a structure, consisting of the following fields:
- the name used in the generated `simp` lemmas
- an expression used by simps for the projection. It must be definitionally equal to an original
projection (or a composition of multiple projections).
These expressions can contain the universe parameters specified in the first argument of
`simps_str_attr`.
- a list of natural numbers, which is the projection number(s) that have to be applied to the
expression. For example the list `[0, 1]` corresponds to applying the first projection of the
structure, and then the second projection of the resulting structure (this assumes that the
target of the first projection is a structure with at least two projections).
The composition of these projections is required to be definitionally equal to the provided
expression.
- A boolean specifying whether `simp` lemmas are generated for this projection by default.
- A boolean specifying whether this projection is written as prefix.
-/
@[protect_proj, derive [has_reflect, inhabited]]
meta structure projection_data :=
(name : name)
(expr : expr)
(proj_nrs : list ℕ)
(is_default : bool)
(is_prefix : bool)
/-- Temporary projection data parsed from `initialize_simps_projections` before the expression
matching this projection has been found. Only used internally in `simps_get_raw_projections`. -/
meta structure parsed_projection_data :=
(orig_name : name) -- name for this projection used in the structure definition
(new_name : name) -- name for this projection used in the generated `simp` lemmas
(is_default : bool)
(is_prefix : bool)
section
open format
meta instance : has_to_tactic_format projection_data :=
⟨λ ⟨a, b, c, d, e⟩, (λ x, group $ nest 1 $ to_fmt "⟨" ++ to_fmt a ++ to_fmt "," ++ line ++ x ++
to_fmt "," ++ line ++ to_fmt c ++ to_fmt "," ++ line ++ to_fmt d ++ to_fmt "," ++ line ++
to_fmt e ++ to_fmt "⟩") <$> pp b⟩
meta instance : has_to_format parsed_projection_data :=
⟨λ ⟨a, b, c, d⟩, group $ nest 1 $ to_fmt "⟨" ++ to_fmt a ++ to_fmt "," ++ line ++ to_fmt b ++
to_fmt "," ++ line ++ to_fmt c ++ to_fmt "," ++ line ++ to_fmt d ++ to_fmt "⟩"⟩
end
/-- The type of rules that specify how metadata for projections in changes.
See `initialize_simps_projection`. -/
abbreviation projection_rule := (name × name ⊕ name) × bool
/--
The `@[_simps_str]` attribute specifies the preferred projections of the given structure,
used by the `@[simps]` attribute.
- This will usually be tagged by the `@[simps]` tactic.
- You can also generate this with the command `initialize_simps_projections`.
- To change the default value, see Note [custom simps projection].
- You are strongly discouraged to add this attribute manually.
- The first argument is the list of names of the universe variables used in the structure
- The second argument is a list that consists of the projection data for each projection.
-/
@[user_attribute] meta def simps_str_attr :
user_attribute unit (list name × list projection_data) :=
{ name := `_simps_str,
descr := "An attribute specifying the projection of the given structure.",
parser := do e ← texpr, eval_pexpr _ e }
/--
The `@[notation_class]` attribute specifies that this is a notation class,
and this notation should be used instead of projections by @[simps].
* The first argument `tt` for notation classes and `ff` for classes applied to the structure,
like `has_coe_to_sort` and `has_coe_to_fun`
* The second argument is the name of the projection (by default it is the first projection
of the structure)
-/
@[user_attribute] meta def notation_class_attr : user_attribute unit (bool × option name) :=
{ name := `notation_class,
descr := "An attribute specifying that this is a notation class. Used by @[simps].",
parser := prod.mk <$> (option.is_none <$> (tk "*")?) <*> ident? }
attribute [notation_class] has_zero has_one has_add has_mul has_inv has_neg has_sub has_div has_dvd
has_mod has_le has_lt has_append has_andthen has_union has_inter has_sdiff has_equiv has_subset
has_ssubset has_emptyc has_insert has_singleton has_sep has_mem has_pow
attribute [notation_class* coe_sort] has_coe_to_sort
attribute [notation_class* coe_fn] has_coe_to_fun
/-- Returns the projection information of a structure. -/
meta def projections_info (l : list projection_data) (pref : string) (str : name) : tactic format :=
do
⟨defaults, nondefaults⟩ ← return $ l.partition_map $
λ s, if s.is_default then inl s else inr s,
to_print ← defaults.mmap $ λ s, to_string <$>
let prefix_str := if s.is_prefix then "(prefix) " else "" in
pformat!"Projection {prefix_str}{s.name}: {s.expr}",
let print2 :=
string.join $ (nondefaults.map (λ nm : projection_data, to_string nm.1)).intersperse ", ",
let to_print := to_print ++ if nondefaults.length = 0 then [] else
["No lemmas are generated for the projections: " ++ print2 ++ "."],
let to_print := string.join $ to_print.intersperse "\n > ",
return format!"[simps] > {pref} {str}:\n > {to_print}"
/-- Auxiliary function of `get_composite_of_projections`. -/
meta def get_composite_of_projections_aux : Π (str : name) (proj : string) (x : expr)
(pos : list ℕ) (args : list expr), tactic (expr × list ℕ) | str proj x pos args := do
e ← get_env,
projs ← e.structure_fields str,
let proj_info := projs.map_with_index $ λ n p, (λ x, (x, n, p)) <$> proj.get_rest ("_" ++ p.last),
when (proj_info.filter_map id = []) $
fail!"Failed to find constructor {proj.popn 1} in structure {str}.",
(proj_rest, index, proj_nm) ← return (proj_info.filter_map id).ilast,
str_d ← e.get str,
let proj_e : expr := const (str ++ proj_nm) str_d.univ_levels,
proj_d ← e.get (str ++ proj_nm),
type ← infer_type x,
let params := get_app_args type,
let univs := proj_d.univ_params.zip type.get_app_fn.univ_levels,
let new_x := (proj_e.instantiate_univ_params univs).mk_app $ params ++ [x],
let new_pos := pos ++ [index],
if proj_rest.is_empty then return (new_x.lambdas args, new_pos) else do
type ← infer_type new_x,
(type_args, tgt) ← open_pis_whnf type,
let new_str := tgt.get_app_fn.const_name,
get_composite_of_projections_aux new_str proj_rest (new_x.mk_app type_args) new_pos
(args ++ type_args)
/-- Given a structure `str` and a projection `proj`, that could be multiple nested projections
(separated by `_`), returns an expression that is the composition of these projections and a
list of natural numbers, that are the projection numbers of the applied projections. -/
meta def get_composite_of_projections (str : name) (proj : string) : tactic (expr × list ℕ) := do
e ← get_env,
str_d ← e.get str,
let str_e : expr := const str str_d.univ_levels,
type ← infer_type str_e,
(type_args, tgt) ← open_pis_whnf type,
let str_ap := str_e.mk_app type_args,
x ← mk_local' `x binder_info.default str_ap,
get_composite_of_projections_aux str ("_" ++ proj) x [] $ type_args ++ [x]
/--
Get the projections used by `simps` associated to a given structure `str`.
The returned information is also stored in a parameter of the attribute `@[_simps_str]`, which
is given to `str`. If `str` already has this attribute, the information is read from this
attribute instead. See the documentation for this attribute for the data this tactic returns.
The returned universe levels are the universe levels of the structure. For the projections there
are three cases
* If the declaration `{structure_name}.simps.{projection_name}` has been declared, then the value
of this declaration is used (after checking that it is definitionally equal to the actual
projection. If you rename the projection name, the declaration should have the *new* projection
name.
* You can also declare a custom projection that is a composite of multiple projections.
* Otherwise, for every class with the `notation_class` attribute, and the structure has an
instance of that notation class, then the projection of that notation class is used for the
projection that is definitionally equal to it (if there is such a projection).
This means in practice that coercions to function types and sorts will be used instead of
a projection, if this coercion is definitionally equal to a projection. Furthermore, for
notation classes like `has_mul` and `has_zero` those projections are used instead of the
corresponding projection.
Projections for coercions and notation classes are not automatically generated if they are
composites of multiple projections (for example when you use `extend` without the
`old_structure_cmd`).
* Otherwise, the projection of the structure is chosen.
For example: ``simps_get_raw_projections env `prod`` gives the default projections
```
([u, v], [prod.fst.{u v}, prod.snd.{u v}])
```
while ``simps_get_raw_projections env `equiv`` gives
```
([u_1, u_2], [λ α β, coe_fn, λ {α β} (e : α ≃ β), ⇑(e.symm), left_inv, right_inv])
```
after declaring the coercion from `equiv` to function and adding the declaration
```
def equiv.simps.inv_fun {α β} (e : α ≃ β) : β → α := e.symm
```
Optionally, this command accepts three optional arguments:
* If `trace_if_exists` the command will always generate a trace message when the structure already
has the attribute `@[_simps_str]`.
* The `rules` argument accepts a list of pairs `sum.inl (old_name, new_name)`. This is used to
change the projection name `old_name` to the custom projection name `new_name`. Example:
for the structure `equiv` the projection `to_fun` could be renamed `apply`. This name will be
used for parsing and generating projection names. This argument is ignored if the structure
already has an existing attribute. If an element of `rules` is of the form `sum.inr name`, this
means that the projection `name` will not be applied by default.
* if `trc` is true, this tactic will trace information.
-/
-- if performance becomes a problem, possible heuristic: use the names of the projections to
-- skip all classes that don't have the corresponding field.
meta def simps_get_raw_projections (e : environment) (str : name) (trace_if_exists : bool := ff)
(rules : list projection_rule := []) (trc := ff) :
tactic (list name × list projection_data) := do
let trc := trc || is_trace_enabled_for `simps.verbose,
has_attr ← has_attribute' `_simps_str str,
if has_attr then do
data ← simps_str_attr.get_param str,
-- We always print the projections when they already exists and are called by
-- `initialize_simps_projections`.
when (trace_if_exists || is_trace_enabled_for `simps.verbose) $ projections_info data.2
"Already found projection information for structure" str >>= trace,
return data
else do
when trc trace!"[simps] > generating projection information for structure {str}.",
when_tracing `simps.debug trace!"[simps] > Applying the rules {rules}.",
d_str ← e.get str,
let raw_univs := d_str.univ_params,
let raw_levels := level.param <$> raw_univs,
/- Figure out projections, including renamings. The information for a projection is (before we
figure out the `expr` of the projection:
`(original name, given name, is default, is prefix)`.
The first projections are always the actual projections of the structure, but `rules` could
specify custom projections that are compositions of multiple projections. -/
projs ← e.structure_fields str,
let projs : list parsed_projection_data := projs.map $ λ nm, ⟨nm, nm, tt, ff⟩,
let projs : list parsed_projection_data := rules.foldl (λ projs rule,
match rule with
| (inl (old_nm, new_nm), is_prefix) := if old_nm ∈ projs.map (λ x, x.new_name) then
projs.map $ λ proj,
if proj.new_name = old_nm then
{ new_name := new_nm, is_prefix := is_prefix, ..proj } else
proj else
projs ++ [⟨old_nm, new_nm, tt, is_prefix⟩]
| (inr nm, is_prefix) := if nm ∈ projs.map (λ x, x.new_name) then
projs.map $ λ proj, if proj.new_name = nm then
{ is_default := ff, is_prefix := is_prefix, ..proj } else
proj else
projs ++ [⟨nm, nm, ff, is_prefix⟩]
end) projs,
when_tracing `simps.debug trace!"[simps] > Projection info after applying the rules: {projs}.",
when ¬ (projs.map $ λ x, x.new_name : list name).nodup $
fail $ "Invalid projection names. Two projections have the same name.
This is likely because a custom composition of projections was given the same name as an " ++
"existing projection. Solution: rename the existing projection (before renaming the custom " ++
"projection).",
/- Define the raw expressions for the projections, by default as the projections
(as an expression), but this can be overriden by the user. -/
raw_exprs_and_nrs ← projs.mmap $ λ ⟨orig_nm, new_nm, _, _⟩, do {
(raw_expr, nrs) ← get_composite_of_projections str orig_nm.last,
custom_proj ← do {
decl ← e.get (str ++ `simps ++ new_nm.last),
let custom_proj := decl.value.instantiate_univ_params $ decl.univ_params.zip raw_levels,
when trc trace!
"[simps] > found custom projection for {new_nm}:\n > {custom_proj}",
return custom_proj } <|> return raw_expr,
is_def_eq custom_proj raw_expr <|>
-- if the type of the expression is different, we show a different error message, because
-- that is more likely going to be helpful.
do {
custom_proj_type ← infer_type custom_proj,
raw_expr_type ← infer_type raw_expr,
b ← succeeds (is_def_eq custom_proj_type raw_expr_type),
if b then fail!"Invalid custom projection:\n {custom_proj}
Expression is not definitionally equal to\n {raw_expr}"
else fail!"Invalid custom projection:\n {custom_proj}
Expression has different type than {str ++ orig_nm}. Given type:\n {custom_proj_type}
Expected type:\n {raw_expr_type}" },
return (custom_proj, nrs) },
let raw_exprs := raw_exprs_and_nrs.map prod.fst,
/- Check for other coercions and type-class arguments to use as projections instead. -/
(args, _) ← open_pis d_str.type,
let e_str := (expr.const str raw_levels).mk_app args,
automatic_projs ← attribute.get_instances `notation_class,
raw_exprs ← automatic_projs.mfoldl (λ (raw_exprs : list expr) class_nm, do {
(is_class, proj_nm) ← notation_class_attr.get_param class_nm,
proj_nm ← proj_nm <|> (e.structure_fields_full class_nm).map list.head,
/- For this class, find the projection. `raw_expr` is the projection found applied to `args`,
and `lambda_raw_expr` has the arguments `args` abstracted. -/
(raw_expr, lambda_raw_expr) ← if is_class then (do
guard $ args.length = 1,
let e_inst_type := (const class_nm raw_levels).mk_app args,
(hyp, e_inst) ← try_for 1000 (mk_conditional_instance e_str e_inst_type),
raw_expr ← mk_mapp proj_nm [args.head, e_inst],
clear hyp,
-- Note: `expr.bind_lambda` doesn't give the correct type
raw_expr_lambda ← lambdas [hyp] raw_expr,
return (raw_expr, raw_expr_lambda.lambdas args))
else (do
e_inst_type ← to_expr ((const class_nm []).app (pexpr.of_expr e_str)),
e_inst ← try_for 1000 (mk_instance e_inst_type),
raw_expr ← mk_mapp proj_nm [e_str, e_inst],
return (raw_expr, raw_expr.lambdas args)),
raw_expr_whnf ← whnf raw_expr,
let relevant_proj := raw_expr_whnf.binding_body.get_app_fn.const_name,
/- Use this as projection, if the function reduces to a projection, and this projection has
not been overrriden by the user. -/
guard $ projs.any $
λ x, x.1 = relevant_proj.last ∧ ¬ e.contains (str ++ `simps ++ x.new_name.last),
let pos := projs.find_index (λ x, x.1 = relevant_proj.last),
when trc trace!
" > using {proj_nm} instead of the default projection {relevant_proj.last}.",
when_tracing `simps.debug trace!"[simps] > The raw projection is:\n {lambda_raw_expr}",
return $ raw_exprs.update_nth pos lambda_raw_expr } <|> return raw_exprs) raw_exprs,
let positions := raw_exprs_and_nrs.map prod.snd,
let proj_names := projs.map (λ x, x.new_name),
let defaults := projs.map (λ x, x.is_default),
let prefixes := projs.map (λ x, x.is_prefix),
let projs := proj_names.zip_with5 projection_data.mk raw_exprs positions defaults prefixes,
/- make all proof non-default. -/
projs ← projs.mmap $ λ proj,
is_proof proj.expr >>= λ b, return $ if b then { is_default := ff, .. proj } else proj,
when trc $ projections_info projs "generated projections for" str >>= trace,
simps_str_attr.set str (raw_univs, projs) tt,
when_tracing `simps.debug trace!
"[simps] > Generated raw projection data: \n{(raw_univs, projs)}",
return (raw_univs, projs)
/-- Parse a rule for `initialize_simps_projections`. It is either `<name>→<name>` or `-<name>`,
possibly following by `as_prefix`.-/
meta def simps_parse_rule : parser projection_rule :=
prod.mk <$>
((λ x y, inl (x, y)) <$> ident <*> (tk "->" >> ident) <|> inr <$> (tk "-" >> ident)) <*>
is_some <$> (tk "as_prefix")?
/--
You can specify custom projections for the `@[simps]` attribute.
To do this for the projection `my_structure.original_projection` by adding a declaration
`my_structure.simps.my_projection` that is definitionally equal to
`my_structure.original_projection` but has the projection in the desired (simp-normal) form.
Then you can call
```
initialize_simps_projections (original_projection → my_projection, ...)
```
to register this projection.
Running `initialize_simps_projections` without arguments is not necessary, it has the same effect
if you just add `@[simps]` to a declaration.
If you do anything to change the default projections, make sure to call either `@[simps]` or
`initialize_simps_projections` in the same file as the structure declaration. Otherwise, you might
have a file that imports the structure, but not your custom projections.
-/
library_note "custom simps projection"
/-- Specify simps projections, see Note [custom simps projection].
* You can specify custom names by writing e.g.
`initialize_simps_projections equiv (to_fun → apply, inv_fun → symm_apply)`.
* You can disable a projection by default by running
`initialize_simps_projections equiv (-inv_fun)`
This will ensure that no simp lemmas are generated for this projection,
unless this projection is explicitly specified by the user.
* If you want the projection name added as a prefix in the generated lemma name, you can add the
`as_prefix` modifier:
`initialize_simps_projections equiv (to_fun → coe as_prefix)`
Note that this does not influence the parsing of projection names: if you have a declaration
`foo` and you want to apply the projections `snd`, `coe` (which is a prefix) and `fst`, in that
order you can run `@[simps snd_coe_fst] def foo ...` and this will generate a lemma with the
name `coe_foo_snd_fst`.
* Run `initialize_simps_projections?` (or set `trace.simps.verbose`)
to see the generated projections. -/
@[user_command] meta def initialize_simps_projections_cmd
(_ : parse $ tk "initialize_simps_projections") : parser unit := do
env ← get_env,
trc ← is_some <$> (tk "?")?,
ns ← (prod.mk <$> ident <*> (tk "(" >> sep_by (tk ",") simps_parse_rule <* tk ")")?)*,
ns.mmap' $ λ data, do
nm ← resolve_constant data.1,
simps_get_raw_projections env nm tt (data.2.get_or_else []) trc
/--
Configuration options for the `@[simps]` attribute.
* `attrs` specifies the list of attributes given to the generated lemmas. Default: ``[`simp]``.
The attributes can be either basic attributes, or user attributes without parameters.
There are two attributes which `simps` might add itself:
* If ``[`simp]`` is in the list, then ``[`_refl_lemma]`` is added automatically if appropriate.
* If the definition is marked with `@[to_additive ...]` then all generated lemmas are marked
with `@[to_additive]`
* if `simp_rhs` is `tt` then the right-hand-side of the generated lemmas will be put in
simp-normal form. More precisely: `dsimp, simp` will be called on all these expressions.
See note [dsimp, simp].
* `type_md` specifies how aggressively definitions are unfolded in the type of expressions
for the purposes of finding out whether the type is a function type.
Default: `instances`. This will unfold coercion instances (so that a coercion to a function type
is recognized as a function type), but not declarations like `set`.
* `rhs_md` specifies how aggressively definition in the declaration are unfolded for the purposes
of finding out whether it is a constructor.
Default: `none`
Exception: `@[simps]` will automatically add the options
`{rhs_md := semireducible, simp_rhs := tt}` if the given definition is not a constructor with
the given reducibility setting for `rhs_md`.
* If `fully_applied` is `ff` then the generated `simp` lemmas will be between non-fully applied
terms, i.e. equalities between functions. This does not restrict the recursive behavior of
`@[simps]`, so only the "final" projection will be non-fully applied.
However, it can be used in combination with explicit field names, to get a partially applied
intermediate projection.
* The option `not_recursive` contains the list of names of types for which `@[simps]` doesn't
recursively apply projections. For example, given an equivalence `α × β ≃ β × α` one usually
wants to only apply the projections for `equiv`, and not also those for `×`. This option is
only relevant if no explicit projection names are given as argument to `@[simps]`.
* The option `trace` is set to `tt` when you write `@[simps?]`. In this case, the attribute will
print all generated lemmas. It is almost the same as setting the option `trace.simps.verbose`,
except that it doesn't print information about the found projections.
-/
@[derive [has_reflect, inhabited]] structure simps_cfg :=
(attrs := [`simp])
(simp_rhs := ff)
(type_md := transparency.instances)
(rhs_md := transparency.none)
(fully_applied := tt)
(not_recursive := [`prod, `pprod])
(trace := ff)
/-- A common configuration for `@[simps]`: generate equalities between functions instead equalities
between fully applied expressions. -/
def as_fn : simps_cfg := {fully_applied := ff}
/-- A common configuration for `@[simps]`: don't tag the generated lemmas with `@[simp]`. -/
def lemmas_only : simps_cfg := {attrs := []}
/--
Get the projections of a structure used by `@[simps]` applied to the appropriate arguments.
Returns a list of tuples
```
(corresponding right-hand-side, given projection name, projection expression, projection numbers,
used by default, is prefix)
```
(where all fields except the first are packed in a `projection_data` structure)
one for each projection. The given projection name is the name for the projection used by the user
used to generate (and parse) projection names. For example, in the structure
Example 1: ``simps_get_projection_exprs env `(α × β) `(⟨x, y⟩)`` will give the output
```
[(`(x), `fst, `(@prod.fst.{u v} α β), [0], tt, ff),
(`(y), `snd, `(@prod.snd.{u v} α β), [1], tt, ff)]
```
Example 2: ``simps_get_projection_exprs env `(α ≃ α) `(⟨id, id, λ _, rfl, λ _, rfl⟩)``
will give the output
```
[(`(id), `apply, `(coe), [0], tt, ff),
(`(id), `symm_apply, `(λ f, ⇑f.symm), [1], tt, ff),
...,
...]
```
-/
meta def simps_get_projection_exprs (e : environment) (tgt : expr)
(rhs : expr) (cfg : simps_cfg) : tactic $ list $ expr × projection_data := do
let params := get_app_args tgt, -- the parameters of the structure
(params.zip $ (get_app_args rhs).take params.length).mmap' (λ ⟨a, b⟩, is_def_eq a b)
<|> fail "unreachable code (1)",
let str := tgt.get_app_fn.const_name,
let rhs_args := (get_app_args rhs).drop params.length, -- the fields of the object
(raw_univs, proj_data) ← simps_get_raw_projections e str ff [] cfg.trace,
let univs := raw_univs.zip tgt.get_app_fn.univ_levels,
let new_proj_data : list $ expr × projection_data := proj_data.map $
λ proj, (rhs_args.inth proj.proj_nrs.head,
{ expr := (proj.expr.instantiate_univ_params univs).instantiate_lambdas_or_apps params,
proj_nrs := proj.proj_nrs.tail,
.. proj }),
return new_proj_data
/-- Add a lemma with `nm` stating that `lhs = rhs`. `type` is the type of both `lhs` and `rhs`,
`args` is the list of local constants occurring, and `univs` is the list of universe variables.
If `add_simp` then we make the resulting lemma a `simp` lemma. -/
meta def simps_add_projection (nm : name) (type lhs rhs : expr) (args : list expr)
(univs : list name) (cfg : simps_cfg) : tactic unit := do
when_tracing `simps.debug trace!
"[simps] > Planning to add the equality\n > {lhs} = ({rhs} : {type})",
lvl ← get_univ_level type,
-- simplify `rhs` if `cfg.simp_rhs` is true
(rhs, prf) ← do { guard cfg.simp_rhs,
rhs' ← rhs.dsimp {fail_if_unchanged := ff},
when_tracing `simps.debug $ when (rhs ≠ rhs') trace!
"[simps] > `dsimp` simplified rhs to\n > {rhs'}",
(rhsprf1, rhsprf2, ns) ← rhs'.simp {fail_if_unchanged := ff},
when_tracing `simps.debug $ when (rhs' ≠ rhsprf1) trace!
"[simps] > `simp` simplified rhs to\n > {rhsprf1}",
return (prod.mk rhsprf1 rhsprf2) }
<|> return (rhs, const `eq.refl [lvl] type lhs),
let eq_ap := const `eq [lvl] type lhs rhs,
decl_name ← get_unused_decl_name nm,
let decl_type := eq_ap.pis args,
let decl_value := prf.lambdas args,
let decl := declaration.thm decl_name univs decl_type (pure decl_value),
when cfg.trace trace!
"[simps] > adding projection {decl_name}:\n > {decl_type}",
decorate_error ("Failed to add projection lemma " ++ decl_name.to_string ++ ". Nested error:") $
add_decl decl,
b ← succeeds $ is_def_eq lhs rhs,
when (b ∧ `simp ∈ cfg.attrs) (set_basic_attribute `_refl_lemma decl_name tt),
cfg.attrs.mmap' $ λ nm, set_attribute nm decl_name tt
/-- Derive lemmas specifying the projections of the declaration.
If `todo` is non-empty, it will generate exactly the names in `todo`.
`to_apply` is non-empty after a custom projection that is a composition of multiple projections
was just used. In that case we need to apply these projections before we continue changing lhs. -/
meta def simps_add_projections : Π (e : environment) (nm : name)
(type lhs rhs : expr) (args : list expr) (univs : list name) (must_be_str : bool)
(cfg : simps_cfg) (todo : list string) (to_apply : list ℕ), tactic unit
| e nm type lhs rhs args univs must_be_str cfg todo to_apply := do
-- we don't want to unfold non-reducible definitions (like `set`) to apply more arguments
when_tracing `simps.debug trace!
"[simps] > Type of the expression before normalizing: {type}",
(type_args, tgt) ← open_pis_whnf type cfg.type_md,
when_tracing `simps.debug trace!"[simps] > Type after removing pi's: {tgt}",
tgt ← whnf tgt,
when_tracing `simps.debug trace!"[simps] > Type after reduction: {tgt}",
let new_args := args ++ type_args,
let lhs_ap := lhs.instantiate_lambdas_or_apps type_args,
let rhs_ap := rhs.instantiate_lambdas_or_apps type_args,
let str := tgt.get_app_fn.const_name,
/- We want to generate the current projection if it is in `todo` -/
let todo_next := todo.filter (≠ ""),
/- Don't recursively continue if `str` is not a structure or if the structure is in
`not_recursive`. -/
if e.is_structure str ∧ ¬(todo = [] ∧ str ∈ cfg.not_recursive ∧ ¬must_be_str) then do
[intro] ← return $ e.constructors_of str | fail "unreachable code (3)",
rhs_whnf ← whnf rhs_ap cfg.rhs_md,
(rhs_ap, todo_now) ← -- `todo_now` means that we still have to generate the current simp lemma
if ¬ is_constant_of rhs_ap.get_app_fn intro ∧
is_constant_of rhs_whnf.get_app_fn intro then
/- If this was a desired projection, we want to apply it before taking the whnf.
However, if the current field is an eta-expansion (see below), we first want
to eta-reduce it and only then construct the projection.
This makes the flow of this function messy. -/
when ("" ∈ todo ∧ to_apply = []) (if cfg.fully_applied then
simps_add_projection nm tgt lhs_ap rhs_ap new_args univs cfg else
simps_add_projection nm type lhs rhs args univs cfg) >>
return (rhs_whnf, ff) else
return (rhs_ap, "" ∈ todo ∧ to_apply = []),
if is_constant_of (get_app_fn rhs_ap) intro then do -- if the value is a constructor application
proj_info ← simps_get_projection_exprs e tgt rhs_ap cfg,
when_tracing `simps.debug trace!"[simps] > Raw projection information:\n {proj_info}",
eta ← rhs_ap.is_eta_expansion, -- check whether `rhs_ap` is an eta-expansion
let rhs_ap := eta.lhoare rhs_ap, -- eta-reduce `rhs_ap`
/- As a special case, we want to automatically generate the current projection if `rhs_ap`
was an eta-expansion. Also, when this was a desired projection, we need to generate the
current projection if we haven't done it above. -/
when (todo_now ∨ (todo = [] ∧ eta.is_some ∧ to_apply = [])) $
if cfg.fully_applied then
simps_add_projection nm tgt lhs_ap rhs_ap new_args univs cfg else
simps_add_projection nm type lhs rhs args univs cfg,
/- If we are in the middle of a composite projection. -/
when (to_apply ≠ []) $ do {
⟨new_rhs, proj, proj_expr, proj_nrs, is_default, is_prefix⟩ ←
return $ proj_info.inth to_apply.head,
new_type ← infer_type new_rhs,
when_tracing `simps.debug
trace!"[simps] > Applying a custom composite projection. Current lhs:
> {lhs_ap}",
simps_add_projections e nm new_type lhs_ap new_rhs new_args univs ff cfg todo
to_apply.tail },
/- We stop if no further projection is specified or if we just reduced an eta-expansion and we
automatically choose projections -/
when ¬(to_apply ≠ [] ∨ todo = [""] ∨ (eta.is_some ∧ todo = [])) $ do
let projs : list name := proj_info.map $ λ x, x.snd.name,
let todo := if to_apply = [] then todo_next else todo,
-- check whether all elements in `todo` have a projection as prefix
guard (todo.all $ λ x, projs.any $ λ proj, ("_" ++ proj.last).is_prefix_of x) <|>
let x := (todo.find $ λ x, projs.all $ λ proj, ¬ ("_" ++ proj.last).is_prefix_of x).iget,
simp_lemma := nm.append_suffix x,
needed_proj := (x.split_on '_').tail.head in
fail!
"Invalid simp lemma {simp_lemma}. Structure {str} does not have projection {needed_proj}.
The known projections are:
{projs}
You can also see this information by running
`initialize_simps_projections? {str}`.
Note: these projection names might not correspond to the projection names of the structure.",
proj_info.mmap_with_index' $
λ proj_nr ⟨new_rhs, proj, proj_expr, proj_nrs, is_default, is_prefix⟩, do
new_type ← infer_type new_rhs,
let new_todo :=
todo.filter_map $ λ x, x.get_rest ("_" ++ proj.last),
-- we only continue with this field if it is non-propositional or mentioned in todo
when ((is_default ∧ todo = []) ∨ new_todo ≠ []) $ do
let new_lhs := proj_expr.instantiate_lambdas_or_apps [lhs_ap],
let new_nm := nm.append_to_last proj.last is_prefix,
when_tracing `simps.debug trace!"[simps] > Recursively add projections for:
> {new_lhs}",
simps_add_projections e new_nm new_type new_lhs new_rhs new_args univs
ff cfg new_todo proj_nrs
-- if I'm about to run into an error, try to set the transparency for `rhs_md` higher.
else if cfg.rhs_md = transparency.none ∧ (must_be_str ∨ todo_next ≠ [] ∨ to_apply ≠ []) then do
when cfg.trace trace!
"[simps] > The given definition is not a constructor application:
> {rhs_ap}
> Retrying with the options {{ rhs_md := semireducible, simp_rhs := tt}.",
simps_add_projections e nm type lhs rhs args univs must_be_str
{ rhs_md := semireducible, simp_rhs := tt, ..cfg} todo to_apply
else do
when (to_apply ≠ []) $
fail!"Invalid simp lemma {nm}.
The given definition is not a constructor application:\n {rhs_ap}",
when must_be_str $
fail!"Invalid `simps` attribute. The body is not a constructor application:\n {rhs_ap}",
when (todo_next ≠ []) $
fail!"Invalid simp lemma {nm.append_suffix todo_next.head}.
The given definition is not a constructor application:\n {rhs_ap}",
if cfg.fully_applied then
simps_add_projection nm tgt lhs_ap rhs_ap new_args univs cfg else
simps_add_projection nm type lhs rhs args univs cfg
else do
when must_be_str $
fail!"Invalid `simps` attribute. Target {str} is not a structure",
when (todo_next ≠ [] ∧ str ∉ cfg.not_recursive) $
let first_todo := todo_next.head in
fail!"Invalid simp lemma {nm.append_suffix first_todo}.
Projection {(first_todo.split_on '_').tail.head} doesn't exist, because target is not a structure.",
if cfg.fully_applied then
simps_add_projection nm tgt lhs_ap rhs_ap new_args univs cfg else
simps_add_projection nm type lhs rhs args univs cfg
/-- `simps_tac` derives `simp` lemmas for all (nested) non-Prop projections of the declaration.
If `todo` is non-empty, it will generate exactly the names in `todo`.
If `short_nm` is true, the generated names will only use the last projection name.
If `trc` is true, trace as if `trace.simps.verbose` is true. -/
meta def simps_tac (nm : name) (cfg : simps_cfg := {}) (todo : list string := []) (trc := ff) :
tactic unit := do
e ← get_env,
d ← e.get nm,
let lhs : expr := const d.to_name d.univ_levels,
let todo := todo.erase_dup.map $ λ proj, "_" ++ proj,
b ← has_attribute' `to_additive nm,
let cfg := if b then { attrs := cfg.attrs ++ [`to_additive], ..cfg } else cfg,
let cfg := { trace := cfg.trace || is_trace_enabled_for `simps.verbose || trc, ..cfg },
when (cfg.trace ∧ `to_additive ∈ cfg.attrs)
trace!"[simps] > @[to_additive] will be added to all generated lemmas.",
simps_add_projections e nm d.type lhs d.value [] d.univ_params tt cfg todo []
/-- The parser for the `@[simps]` attribute. -/
meta def simps_parser : parser (bool × list string × simps_cfg) := do
/- note: we don't check whether the user has written a nonsense namespace in an argument. -/
prod.mk <$> is_some <$> (tk "?")? <*>
(prod.mk <$> many (name.last <$> ident) <*>
(do some e ← parser.pexpr? | return {}, eval_pexpr simps_cfg e))
/--
The `@[simps]` attribute automatically derives lemmas specifying the projections of this
declaration.
Example:
```lean
@[simps] def foo : ℕ × ℤ := (1, 2)
```
derives two `simp` lemmas:
```lean
@[simp] lemma foo_fst : foo.fst = 1
@[simp] lemma foo_snd : foo.snd = 2
```
* It does not derive `simp` lemmas for the prop-valued projections.
* It will automatically reduce newly created beta-redexes, but will not unfold any definitions.
* If the structure has a coercion to either sorts or functions, and this is defined to be one
of the projections, then this coercion will be used instead of the projection.
* If the structure is a class that has an instance to a notation class, like `has_mul`, then this
notation is used instead of the corresponding projection.
* You can specify custom projections, by giving a declaration with name
`{structure_name}.simps.{projection_name}`. See Note [custom simps projection].
Example:
```lean
def equiv.simps.inv_fun (e : α ≃ β) : β → α := e.symm
@[simps] def equiv.trans (e₁ : α ≃ β) (e₂ : β ≃ γ) : α ≃ γ :=
⟨e₂ ∘ e₁, e₁.symm ∘ e₂.symm⟩
```
generates
```
@[simp] lemma equiv.trans_to_fun : ∀ {α β γ} (e₁ e₂) (a : α), ⇑(e₁.trans e₂) a = (⇑e₂ ∘ ⇑e₁) a
@[simp] lemma equiv.trans_inv_fun : ∀ {α β γ} (e₁ e₂) (a : γ),
⇑((e₁.trans e₂).symm) a = (⇑(e₁.symm) ∘ ⇑(e₂.symm)) a
```
* You can specify custom projection names, by specifying the new projection names using
`initialize_simps_projections`.
Example: `initialize_simps_projections equiv (to_fun → apply, inv_fun → symm_apply)`.
* If one of the fields itself is a structure, this command will recursively create
`simp` lemmas for all fields in that structure.
* Exception: by default it will not recursively create `simp` lemmas for fields in the structures
`prod` and `pprod`. Give explicit projection names to override this behavior.
Example:
```lean
structure my_prod (α β : Type*) := (fst : α) (snd : β)
@[simps] def foo : prod ℕ ℕ × my_prod ℕ ℕ := ⟨⟨1, 2⟩, 3, 4⟩
```
generates
```lean
@[simp] lemma foo_fst : foo.fst = (1, 2)
@[simp] lemma foo_snd_fst : foo.snd.fst = 3
@[simp] lemma foo_snd_snd : foo.snd.snd = 4
```
* You can use `@[simps proj1 proj2 ...]` to only generate the projection lemmas for the specified
projections.
* Recursive projection names can be specified using `proj1_proj2_proj3`.
This will create a lemma of the form `foo.proj1.proj2.proj3 = ...`.
Example:
```lean
structure my_prod (α β : Type*) := (fst : α) (snd : β)
@[simps fst fst_fst snd] def foo : prod ℕ ℕ × my_prod ℕ ℕ := ⟨⟨1, 2⟩, 3, 4⟩
```
generates
```lean
@[simp] lemma foo_fst : foo.fst = (1, 2)
@[simp] lemma foo_fst_fst : foo.fst.fst = 1
@[simp] lemma foo_snd : foo.snd = {fst := 3, snd := 4}
```
* If one of the values is an eta-expanded structure, we will eta-reduce this structure.
Example:
```lean
structure equiv_plus_data (α β) extends α ≃ β := (data : bool)
@[simps] def bar {α} : equiv_plus_data α α := { data := tt, ..equiv.refl α }
```
generates the following, even though Lean inserts an eta-expanded version of `equiv.refl α` in the
definition of `bar`:
```lean
@[simp] lemma bar_to_equiv : ∀ {α : Sort u_1}, bar.to_equiv = equiv.refl α
@[simp] lemma bar_data : ∀ {α : Sort u_1}, bar.data = tt
```
* For configuration options, see the doc string of `simps_cfg`.
* The precise syntax is `('simps' ident* e)`, where `e` is an expression of type `simps_cfg`.
* `@[simps]` reduces let-expressions where necessary.
* If one of the fields is a partially applied constructor, we will eta-expand it
(this likely never happens).
* When option `trace.simps.verbose` is true, `simps` will print the projections it finds and the
lemmas it generates. The same can be achieved by using `@[simps?]`, except that in this case it
will not print projection information.
* Use `@[to_additive, simps]` to apply both `to_additive` and `simps` to a definition, making sure
that `simps` comes after `to_additive`. This will also generate the additive versions of all
`simp` lemmas. Note however, that the additive versions of the `simp` lemmas always use the
default name generated by `to_additive`, even if a custom name is given for the additive version
of the definition.
-/
@[user_attribute] meta def simps_attr : user_attribute unit (bool × list string × simps_cfg) :=
{ name := `simps,
descr := "Automatically derive lemmas specifying the projections of this declaration.",
parser := simps_parser,
after_set := some $
λ n _ persistent, do
guard persistent <|> fail "`simps` currently cannot be used as a local attribute",
(trc, todo, cfg) ← simps_attr.get_param n,
simps_tac n cfg todo trc }
add_tactic_doc
{ name := "simps",
category := doc_category.attr,
decl_names := [`simps_attr],
tags := ["simplification"] }
|
ed363200ee375efd2363855a561b59503be5de79
|
d1bbf1801b3dcb214451d48214589f511061da63
|
/src/algebraic_geometry/prime_spectrum.lean
|
a6b5adf4db0c95609b27f280d6b368d2457407ea
|
[
"Apache-2.0"
] |
permissive
|
cheraghchi/mathlib
|
5c366f8c4f8e66973b60c37881889da8390cab86
|
f29d1c3038422168fbbdb2526abf7c0ff13e86db
|
refs/heads/master
| 1,676,577,831,283
| 1,610,894,638,000
| 1,610,894,638,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 14,398
|
lean
|
/-
Copyright (c) 2020 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import topology.opens
import ring_theory.ideal.prod
import linear_algebra.finsupp
import algebra.punit_instances
/-!
# Prime spectrum of a commutative ring
The prime spectrum of a commutative ring is the type of all prime ideals.
It is naturally endowed with a topology: the Zariski topology.
(It is also naturally endowed with a sheaf of rings,
which is constructed in `algebraic_geometry.structure_sheaf`.)
## Main definitions
* `prime_spectrum R`: The prime spectrum of a commutative ring `R`,
i.e., the set of all prime ideals of `R`.
* `zero_locus s`: The zero locus of a subset `s` of `R`
is the subset of `prime_spectrum R` consisting of all prime ideals that contain `s`.
* `vanishing_ideal t`: The vanishing ideal of a subset `t` of `prime_spectrum R`
is the intersection of points in `t` (viewed as prime ideals).
## Conventions
We denote subsets of rings with `s`, `s'`, etc...
whereas we denote subsets of prime spectra with `t`, `t'`, etc...
## Inspiration/contributors
The contents of this file draw inspiration from
<https://github.com/ramonfmir/lean-scheme>
which has contributions from Ramon Fernandez Mir, Kevin Buzzard, Kenny Lau,
and Chris Hughes (on an earlier repository).
-/
noncomputable theory
open_locale classical
universe variables u v
variables (R : Type u) [comm_ring R]
/-- The prime spectrum of a commutative ring `R`
is the type of all prime ideals of `R`.
It is naturally endowed with a topology (the Zariski topology),
and a sheaf of commutative rings (see `algebraic_geometry.structure_sheaf`).
It is a fundamental building block in algebraic geometry. -/
@[nolint has_inhabited_instance]
def prime_spectrum := {I : ideal R // I.is_prime}
variable {R}
namespace prime_spectrum
/-- A method to view a point in the prime spectrum of a commutative ring
as an ideal of that ring. -/
abbreviation as_ideal (x : prime_spectrum R) : ideal R := x.val
instance is_prime (x : prime_spectrum R) :
x.as_ideal.is_prime := x.2
/--
The prime spectrum of the zero ring is empty.
-/
lemma punit (x : prime_spectrum punit) : false :=
x.1.ne_top_iff_one.1 x.2.1 $ subsingleton.elim (0 : punit) 1 ▸ x.1.zero_mem
section
variables (R) (S : Type v) [comm_ring S]
/-- The prime spectrum of `R × S` is in bijection with the disjoint unions of the prime spectrum of
`R` and the prime spectrum of `S`. -/
noncomputable def prime_spectrum_prod :
prime_spectrum (R × S) ≃ prime_spectrum R ⊕ prime_spectrum S :=
ideal.prime_ideals_equiv R S
variables {R S}
@[simp] lemma prime_spectrum_prod_symm_inl_as_ideal (x : prime_spectrum R) :
((prime_spectrum_prod R S).symm (sum.inl x)).as_ideal = ideal.prod x.as_ideal ⊤ :=
by { cases x, refl }
@[simp] lemma prime_spectrum_prod_symm_inr_as_ideal (x : prime_spectrum S) :
((prime_spectrum_prod R S).symm (sum.inr x)).as_ideal = ideal.prod ⊤ x.as_ideal :=
by { cases x, refl }
end
@[ext] lemma ext {x y : prime_spectrum R} :
x = y ↔ x.as_ideal = y.as_ideal :=
subtype.ext_iff_val
/-- The zero locus of a set `s` of elements of a commutative ring `R`
is the set of all prime ideals of the ring that contain the set `s`.
An element `f` of `R` can be thought of as a dependent function
on the prime spectrum of `R`.
At a point `x` (a prime ideal)
the function (i.e., element) `f` takes values in the quotient ring `R` modulo the prime ideal `x`.
In this manner, `zero_locus s` is exactly the subset of `prime_spectrum R`
where all "functions" in `s` vanish simultaneously.
-/
def zero_locus (s : set R) : set (prime_spectrum R) :=
{x | s ⊆ x.as_ideal}
@[simp] lemma mem_zero_locus (x : prime_spectrum R) (s : set R) :
x ∈ zero_locus s ↔ s ⊆ x.as_ideal := iff.rfl
@[simp] lemma zero_locus_span (s : set R) :
zero_locus (ideal.span s : set R) = zero_locus s :=
by { ext x, exact (submodule.gi R R).gc s x.as_ideal }
/-- The vanishing ideal of a set `t` of points
of the prime spectrum of a commutative ring `R`
is the intersection of all the prime ideals in the set `t`.
An element `f` of `R` can be thought of as a dependent function
on the prime spectrum of `R`.
At a point `x` (a prime ideal)
the function (i.e., element) `f` takes values in the quotient ring `R` modulo the prime ideal `x`.
In this manner, `vanishing_ideal t` is exactly the ideal of `R`
consisting of all "functions" that vanish on all of `t`.
-/
def vanishing_ideal (t : set (prime_spectrum R)) : ideal R :=
⨅ (x : prime_spectrum R) (h : x ∈ t), x.as_ideal
lemma coe_vanishing_ideal (t : set (prime_spectrum R)) :
(vanishing_ideal t : set R) = {f : R | ∀ x : prime_spectrum R, x ∈ t → f ∈ x.as_ideal} :=
begin
ext f,
rw [vanishing_ideal, submodule.mem_coe, submodule.mem_infi],
apply forall_congr, intro x,
rw [submodule.mem_infi],
end
lemma mem_vanishing_ideal (t : set (prime_spectrum R)) (f : R) :
f ∈ vanishing_ideal t ↔ ∀ x : prime_spectrum R, x ∈ t → f ∈ x.as_ideal :=
by rw [← submodule.mem_coe, coe_vanishing_ideal, set.mem_set_of_eq]
lemma subset_zero_locus_iff_le_vanishing_ideal (t : set (prime_spectrum R)) (I : ideal R) :
t ⊆ zero_locus I ↔ I ≤ vanishing_ideal t :=
begin
split; intro h,
{ intros f hf,
rw [mem_vanishing_ideal],
intros x hx,
have hxI := h hx,
rw mem_zero_locus at hxI,
exact hxI hf },
{ intros x hx,
rw mem_zero_locus,
refine le_trans h _,
intros f hf,
rw [mem_vanishing_ideal] at hf,
exact hf x hx }
end
section gc
variable (R)
/-- `zero_locus` and `vanishing_ideal` form a galois connection. -/
lemma gc : @galois_connection
(ideal R) (order_dual (set (prime_spectrum R))) _ _
(λ I, zero_locus I) (λ t, vanishing_ideal t) :=
λ I t, subset_zero_locus_iff_le_vanishing_ideal t I
/-- `zero_locus` and `vanishing_ideal` form a galois connection. -/
lemma gc_set : @galois_connection
(set R) (order_dual (set (prime_spectrum R))) _ _
(λ s, zero_locus s) (λ t, vanishing_ideal t) :=
have ideal_gc : galois_connection (ideal.span) coe := (submodule.gi R R).gc,
by simpa [zero_locus_span, function.comp] using galois_connection.compose _ _ _ _ ideal_gc (gc R)
lemma subset_zero_locus_iff_subset_vanishing_ideal (t : set (prime_spectrum R)) (s : set R) :
t ⊆ zero_locus s ↔ s ⊆ vanishing_ideal t :=
(gc_set R) s t
end gc
-- TODO: we actually get the radical ideal,
-- but I think that isn't in mathlib yet.
lemma subset_vanishing_ideal_zero_locus (s : set R) :
s ⊆ vanishing_ideal (zero_locus s) :=
(gc_set R).le_u_l s
lemma le_vanishing_ideal_zero_locus (I : ideal R) :
I ≤ vanishing_ideal (zero_locus I) :=
(gc R).le_u_l I
lemma subset_zero_locus_vanishing_ideal (t : set (prime_spectrum R)) :
t ⊆ zero_locus (vanishing_ideal t) :=
(gc R).l_u_le t
lemma zero_locus_bot :
zero_locus ((⊥ : ideal R) : set R) = set.univ :=
(gc R).l_bot
@[simp] lemma zero_locus_singleton_zero :
zero_locus (0 : set R) = set.univ :=
zero_locus_bot
@[simp] lemma zero_locus_empty :
zero_locus (∅ : set R) = set.univ :=
(gc_set R).l_bot
@[simp] lemma vanishing_ideal_univ :
vanishing_ideal (∅ : set (prime_spectrum R)) = ⊤ :=
by simpa using (gc R).u_top
lemma zero_locus_empty_of_one_mem {s : set R} (h : (1:R) ∈ s) :
zero_locus s = ∅ :=
begin
rw set.eq_empty_iff_forall_not_mem,
intros x hx,
rw mem_zero_locus at hx,
have x_prime : x.as_ideal.is_prime := by apply_instance,
have eq_top : x.as_ideal = ⊤, { rw ideal.eq_top_iff_one, exact hx h },
apply x_prime.1 eq_top,
end
lemma zero_locus_empty_iff_eq_top {I : ideal R} :
zero_locus (I : set R) = ∅ ↔ I = ⊤ :=
begin
split,
{ contrapose!,
intro h,
apply set.ne_empty_iff_nonempty.mpr,
rcases ideal.exists_le_maximal I h with ⟨M, hM, hIM⟩,
exact ⟨⟨M, hM.is_prime⟩, hIM⟩ },
{ rintro rfl, apply zero_locus_empty_of_one_mem, trivial }
end
@[simp] lemma zero_locus_univ :
zero_locus (set.univ : set R) = ∅ :=
zero_locus_empty_of_one_mem (set.mem_univ 1)
lemma zero_locus_sup (I J : ideal R) :
zero_locus ((I ⊔ J : ideal R) : set R) = zero_locus I ∩ zero_locus J :=
(gc R).l_sup
lemma zero_locus_union (s s' : set R) :
zero_locus (s ∪ s') = zero_locus s ∩ zero_locus s' :=
(gc_set R).l_sup
lemma vanishing_ideal_union (t t' : set (prime_spectrum R)) :
vanishing_ideal (t ∪ t') = vanishing_ideal t ⊓ vanishing_ideal t' :=
(gc R).u_inf
lemma zero_locus_supr {ι : Sort*} (I : ι → ideal R) :
zero_locus ((⨆ i, I i : ideal R) : set R) = (⋂ i, zero_locus (I i)) :=
(gc R).l_supr
lemma zero_locus_Union {ι : Sort*} (s : ι → set R) :
zero_locus (⋃ i, s i) = (⋂ i, zero_locus (s i)) :=
(gc_set R).l_supr
lemma zero_locus_bUnion (s : set (set R)) :
zero_locus (⋃ s' ∈ s, s' : set R) = ⋂ s' ∈ s, zero_locus s' :=
by simp only [zero_locus_Union]
lemma vanishing_ideal_Union {ι : Sort*} (t : ι → set (prime_spectrum R)) :
vanishing_ideal (⋃ i, t i) = (⨅ i, vanishing_ideal (t i)) :=
(gc R).u_infi
lemma zero_locus_inf (I J : ideal R) :
zero_locus ((I ⊓ J : ideal R) : set R) = zero_locus I ∪ zero_locus J :=
begin
ext x,
split,
{ rintro h,
rw set.mem_union,
simp only [mem_zero_locus] at h ⊢,
-- TODO: The rest of this proof should be factored out.
rw or_iff_not_imp_right,
intros hs r hr,
rw set.not_subset at hs,
rcases hs with ⟨s, hs1, hs2⟩,
apply (ideal.is_prime.mem_or_mem (by apply_instance) _).resolve_left hs2,
apply h,
exact ⟨I.mul_mem_left _ hr, J.mul_mem_right _ hs1⟩ },
{ rintro (h|h),
all_goals
{ rw mem_zero_locus at h ⊢,
refine set.subset.trans _ h,
intros r hr, cases hr, assumption } }
end
lemma union_zero_locus (s s' : set R) :
zero_locus s ∪ zero_locus s' = zero_locus ((ideal.span s) ⊓ (ideal.span s') : ideal R) :=
by { rw zero_locus_inf, simp }
lemma sup_vanishing_ideal_le (t t' : set (prime_spectrum R)) :
vanishing_ideal t ⊔ vanishing_ideal t' ≤ vanishing_ideal (t ∩ t') :=
begin
intros r,
rw [submodule.mem_sup, mem_vanishing_ideal],
rintro ⟨f, hf, g, hg, rfl⟩ x ⟨hxt, hxt'⟩,
rw mem_vanishing_ideal at hf hg,
apply submodule.add_mem; solve_by_elim
end
/-- The Zariski topology on the prime spectrum of a commutative ring
is defined via the closed sets of the topology:
they are exactly those sets that are the zero locus of a subset of the ring. -/
instance zariski_topology : topological_space (prime_spectrum R) :=
topological_space.of_closed (set.range prime_spectrum.zero_locus)
(⟨set.univ, by simp⟩)
begin
intros Zs h,
rw set.sInter_eq_Inter,
let f : Zs → set R := λ i, classical.some (h i.2),
have hf : ∀ i : Zs, ↑i = zero_locus (f i) := λ i, (classical.some_spec (h i.2)).symm,
simp only [hf],
exact ⟨_, zero_locus_Union _⟩
end
(by { rintro _ _ ⟨s, rfl⟩ ⟨t, rfl⟩, exact ⟨_, (union_zero_locus s t).symm⟩ })
lemma is_open_iff (U : set (prime_spectrum R)) :
is_open U ↔ ∃ s, Uᶜ = zero_locus s :=
by simp only [@eq_comm _ Uᶜ]; refl
lemma is_closed_iff_zero_locus (Z : set (prime_spectrum R)) :
is_closed Z ↔ ∃ s, Z = zero_locus s :=
by rw [is_closed, is_open_iff, compl_compl]
lemma is_closed_zero_locus (s : set R) :
is_closed (zero_locus s) :=
by { rw [is_closed_iff_zero_locus], exact ⟨s, rfl⟩ }
section comap
variables {S : Type v} [comm_ring S] {S' : Type*} [comm_ring S']
/-- The function between prime spectra of commutative rings induced by a ring homomorphism.
This function is continuous. -/
def comap (f : R →+* S) : prime_spectrum S → prime_spectrum R :=
λ y, ⟨ideal.comap f y.as_ideal, by exact ideal.is_prime.comap _⟩
variables (f : R →+* S)
@[simp] lemma comap_as_ideal (y : prime_spectrum S) :
(comap f y).as_ideal = ideal.comap f y.as_ideal :=
rfl
@[simp] lemma comap_id : comap (ring_hom.id R) = id :=
funext $ λ x, ext.mpr $ by { rw [comap_as_ideal], apply ideal.ext, intros r, simp }
@[simp] lemma comap_comp (f : R →+* S) (g : S →+* S') :
comap (g.comp f) = comap f ∘ comap g :=
funext $ λ x, ext.mpr $ by { simp, refl }
@[simp] lemma preimage_comap_zero_locus (s : set R) :
(comap f) ⁻¹' (zero_locus s) = zero_locus (f '' s) :=
begin
ext x,
simp only [mem_zero_locus, set.mem_preimage, comap_as_ideal, set.image_subset_iff],
refl
end
lemma comap_continuous (f : R →+* S) : continuous (comap f) :=
begin
rw continuous_iff_is_closed,
simp only [is_closed_iff_zero_locus],
rintro _ ⟨s, rfl⟩,
exact ⟨_, preimage_comap_zero_locus f s⟩
end
end comap
lemma zero_locus_vanishing_ideal_eq_closure (t : set (prime_spectrum R)) :
zero_locus (vanishing_ideal t : set R) = closure t :=
begin
apply set.subset.antisymm,
{ rintro x hx t' ⟨ht', ht⟩,
obtain ⟨fs, rfl⟩ : ∃ s, t' = zero_locus s,
by rwa [is_closed_iff_zero_locus] at ht',
rw [subset_zero_locus_iff_subset_vanishing_ideal] at ht,
calc fs ⊆ vanishing_ideal t : ht
... ⊆ x.as_ideal : hx },
{ rw (is_closed_zero_locus _).closure_subset_iff,
exact subset_zero_locus_vanishing_ideal t }
end
/-- The prime spectrum of a commutative ring is a compact topological space. -/
instance : compact_space (prime_spectrum R) :=
begin
apply compact_space_of_finite_subfamily_closed,
intros ι Z hZc hZ,
let I : ι → ideal R := λ i, vanishing_ideal (Z i),
have hI : ∀ i, Z i = zero_locus (I i),
{ intro i,
rw [zero_locus_vanishing_ideal_eq_closure, is_closed.closure_eq],
exact hZc i },
have one_mem : (1:R) ∈ ⨆ (i : ι), I i,
{ rw [← ideal.eq_top_iff_one, ← zero_locus_empty_iff_eq_top, zero_locus_supr],
simpa only [hI] using hZ },
obtain ⟨s, hs⟩ : ∃ s : finset ι, (1:R) ∈ ⨆ i ∈ s, I i :=
submodule.exists_finset_of_mem_supr I one_mem,
show ∃ t : finset ι, (⋂ i ∈ t, Z i) = ∅,
use s,
rw [← ideal.eq_top_iff_one, ←zero_locus_empty_iff_eq_top] at hs,
simpa only [zero_locus_supr, hI] using hs
end
section basic_open
/-- `basic_open r` is the open subset containing all prime ideals not containing `r`. -/
def basic_open (r : R) : topological_space.opens (prime_spectrum R) :=
{ val := { x | r ∉ x.as_ideal },
property := ⟨{r}, set.ext $ λ x, set.singleton_subset_iff.trans $ not_not.symm⟩ }
end basic_open
end prime_spectrum
|
b51b7ec2b31238a6e4194a7a0b277359005a9202
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/tests/lean/run/group.lean
|
4c153525e3c5f8e6c6033520b6fd66c06f82d399
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,094
|
lean
|
section
variable {A : Type*}
variable f : A → A → A
variable one : A
variable inv : A → A
local infixl `*` := f
local postfix `^-1`:100 := inv
definition is_assoc := ∀ a b c, (a*b)*c = a*b*c
definition is_id := ∀ a, a*one = a
definition is_inv := ∀ a, a*a^-1 = one
end
inductive [class] group_struct (A : Type*) : Type*
| mk_group_struct : Π (mul : A → A → A) (one : A) (inv : A → A), is_assoc mul → is_id mul one → is_inv mul one inv → group_struct
inductive group : Type*
| mk_group : Π (A : Type*), group_struct A → group
definition carrier (g : group) : Type*
:= group.rec (λ c s, c) g
attribute [instance]
definition group_to_struct (g : group) : group_struct (carrier g)
:= group.rec (λ (A : Type*) (s : group_struct A), s) g
check group_struct
definition mul1 {A : Type*} {s : group_struct A} (a b : A) : A
:= group_struct.rec (λ mul one inv h1 h2 h3, mul) s a b
infixl `*` := mul1
constant G1 : group.{1}
constant G2 : group.{1}
constants a b c : (carrier G2)
constants d e : (carrier G1)
check a * b * b
check d * e
|
fc490dbfd8583a8ef86728554ca3a2696c0c1c6f
|
8b9f17008684d796c8022dab552e42f0cb6fb347
|
/tests/lean/run/inf_tree.lean
|
b53a73136ca66e2b6d9a12fede9aae77f935e95b
|
[
"Apache-2.0"
] |
permissive
|
chubbymaggie/lean
|
0d06ae25f9dd396306fb02190e89422ea94afd7b
|
d2c7b5c31928c98f545b16420d37842c43b4ae9a
|
refs/heads/master
| 1,611,313,622,901
| 1,430,266,839,000
| 1,430,267,083,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,276
|
lean
|
import logic data.nat.basic
open nat
inductive inftree (A : Type) :=
| leaf : A → inftree A
| node : (nat → inftree A) → inftree A
namespace inftree
inductive dsub {A : Type} : inftree A → inftree A → Prop :=
intro : Π (f : nat → inftree A) (a : nat), dsub (f a) (node f)
definition dsub.node.acc {A : Type} (f : nat → inftree A) (H : ∀a, acc dsub (f a)) : acc dsub (node f) :=
acc.intro (node f) (λ (y : inftree A) (hlt : dsub y (node f)),
have aux : ∀ z, dsub y z → node f = z → acc dsub y, from
λ z hlt, dsub.rec_on hlt (λ (f₁ : nat → inftree A) (a : nat) (eq₁ : node f = node f₁),
inftree.no_confusion eq₁ (λe, eq.rec_on e (H a))),
aux (node f) hlt rfl)
definition dsub.leaf.acc {A : Type} (a : A) : acc dsub (leaf a) :=
acc.intro (leaf a) (λ (y : inftree A) (hlt : dsub y (leaf a)),
have aux : ∀ z, dsub y z → leaf a = z → acc dsub y, from
λz hlt, dsub.rec_on hlt (λ f n (heq : leaf a = node f), inftree.no_confusion heq),
aux (leaf a) hlt rfl)
definition dsub.wf (A : Type) : well_founded (@dsub A) :=
well_founded.intro (λ (t : inftree A),
inftree.rec_on t
(λ a, dsub.leaf.acc a)
(λ f (ih :∀a, acc dsub (f a)), dsub.node.acc f ih))
end inftree
|
bfcffbe7721b8d888a78c8a3d881f830c6b2a13f
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/category_theory/limits/shapes/strong_epi.lean
|
27b26d0c1d7e662761d38c2f55f58e37de687749
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 2,715
|
lean
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import category_theory.arrow
/-!
# Strong epimorphisms
In this file, we define strong epimorphisms. A strong epimorphism is an epimorphism `f`, such
that for every commutative square with `f` at the top and a monomorphism at the bottom, there is
a diagonal morphism making the two triangles commute. This lift is necessarily unique (as shown in
`comma.lean`).
## Main results
Besides the definition, we show that
* the composition of two strong epimorphisms is a strong epimorphism,
* if `f ≫ g` is a strong epimorphism, then so is `g`,
* if `f` is both a strong epimorphism and a monomorphism, then it is an isomorphism
## Future work
There is also the dual notion of strong monomorphism.
## References
* [F. Borceux, *Handbook of Categorical Algebra 1*][borceux-vol1]
-/
universes v u
namespace category_theory
variables {C : Type u} [category.{v} C]
variables {P Q : C}
/-- A strong epimorphism `f` is an epimorphism such that every commutative square with `f` at the
top and a monomorphism at the bottom has a lift. -/
class strong_epi (f : P ⟶ Q) :=
(epi : epi f)
(has_lift : Π {X Y : C} {u : P ⟶ X} {v : Q ⟶ Y} {z : X ⟶ Y} [mono z] (h : u ≫ z = f ≫ v),
arrow.has_lift $ arrow.hom_mk' h)
attribute [instance] strong_epi.has_lift
@[priority 100]
instance epi_of_strong_epi (f : P ⟶ Q) [strong_epi f] : epi f := strong_epi.epi
section
variables {R : C} (f : P ⟶ Q) (g : Q ⟶ R)
/-- The composition of two strong epimorphisms is a strong epimorphism. -/
def strong_epi_comp [strong_epi f] [strong_epi g] : strong_epi (f ≫ g) :=
{ epi := epi_comp _ _,
has_lift :=
begin
introsI,
have h₀ : u ≫ z = f ≫ g ≫ v, by simpa [category.assoc] using h,
let w : Q ⟶ X := arrow.lift (arrow.hom_mk' h₀),
have h₁ : w ≫ z = g ≫ v, by rw arrow.lift_mk'_right,
exact ⟨(arrow.lift (arrow.hom_mk' h₁) : R ⟶ X), by simp, by simp⟩
end }
/-- If `f ≫ g` is a strong epimorphism, then so is g. -/
def strong_epi_of_strong_epi [strong_epi (f ≫ g)] : strong_epi g :=
{ epi := epi_of_epi f g,
has_lift :=
begin
introsI,
have h₀ : (f ≫ u) ≫ z = (f ≫ g) ≫ v, by simp only [category.assoc, h],
exact ⟨(arrow.lift (arrow.hom_mk' h₀) : R ⟶ X), (cancel_mono z).1 (by simp [h]), by simp⟩,
end }
end
/-- A strong epimorphism that is a monomorphism is an isomorphism. -/
def is_iso_of_mono_of_strong_epi (f : P ⟶ Q) [mono f] [strong_epi f] : is_iso f :=
{ inv := arrow.lift $ arrow.hom_mk' $ show 𝟙 P ≫ f = f ≫ 𝟙 Q, by simp }
end category_theory
|
8676b6a7dadbafb94199dc2104d4f4143ea5dfee
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/topology/metric_space/algebra.lean
|
a825f59d62905b67d9bd37a21c1245588f505614
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 7,938
|
lean
|
/-
Copyright (c) 2021 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import topology.algebra.mul_action
import topology.metric_space.lipschitz
/-!
# Compatibility of algebraic operations with metric space structures
In this file we define mixin typeclasses `has_lipschitz_mul`, `has_lipschitz_add`,
`has_bounded_smul` expressing compatibility of multiplication, addition and scalar-multiplication
operations with an underlying metric space structure. The intended use case is to abstract certain
properties shared by normed groups and by `R≥0`.
## Implementation notes
We deduce a `has_continuous_mul` instance from `has_lipschitz_mul`, etc. In principle there should
be an intermediate typeclass for uniform spaces, but the algebraic hierarchy there (see
`uniform_group`) is structured differently.
-/
open_locale nnreal
noncomputable theory
variables (α β : Type*) [pseudo_metric_space α] [pseudo_metric_space β]
section has_lipschitz_mul
/-- Class `has_lipschitz_add M` says that the addition `(+) : X × X → X` is Lipschitz jointly in
the two arguments. -/
class has_lipschitz_add [add_monoid β] : Prop :=
( lipschitz_add : ∃ C, lipschitz_with C (λ p : β × β, p.1 + p.2) )
/-- Class `has_lipschitz_mul M` says that the multiplication `(*) : X × X → X` is Lipschitz jointly
in the two arguments. -/
@[to_additive] class has_lipschitz_mul [monoid β] : Prop :=
( lipschitz_mul : ∃ C, lipschitz_with C (λ p : β × β, p.1 * p.2) )
variables [monoid β]
/-- The Lipschitz constant of a monoid `β` satisfying `has_lipschitz_mul` -/
@[to_additive "The Lipschitz constant of an `add_monoid` `β` satisfying `has_lipschitz_add`"]
def has_lipschitz_mul.C [_i : has_lipschitz_mul β] : ℝ≥0 :=
classical.some _i.lipschitz_mul
variables {β}
@[to_additive] lemma lipschitz_with_lipschitz_const_mul_edist [_i : has_lipschitz_mul β] :
lipschitz_with (has_lipschitz_mul.C β) (λ p : β × β, p.1 * p.2) :=
classical.some_spec _i.lipschitz_mul
variables [has_lipschitz_mul β]
@[to_additive] lemma lipschitz_with_lipschitz_const_mul :
∀ p q : β × β, dist (p.1 * p.2) (q.1 * q.2) ≤ (has_lipschitz_mul.C β) * dist p q :=
begin
rw ← lipschitz_with_iff_dist_le_mul,
exact lipschitz_with_lipschitz_const_mul_edist,
end
@[to_additive, priority 100] -- see Note [lower instance priority]
instance has_lipschitz_mul.has_continuous_mul : has_continuous_mul β :=
⟨ lipschitz_with_lipschitz_const_mul_edist.continuous ⟩
@[to_additive] instance submonoid.has_lipschitz_mul (s : submonoid β) : has_lipschitz_mul s :=
{ lipschitz_mul := ⟨has_lipschitz_mul.C β, begin
rintros ⟨x₁, x₂⟩ ⟨y₁, y₂⟩,
convert lipschitz_with_lipschitz_const_mul_edist ⟨(x₁:β), x₂⟩ ⟨y₁, y₂⟩ using 1
end⟩ }
@[to_additive] instance mul_opposite.has_lipschitz_mul : has_lipschitz_mul βᵐᵒᵖ :=
{ lipschitz_mul := ⟨has_lipschitz_mul.C β, λ ⟨x₁, x₂⟩ ⟨y₁, y₂⟩,
(lipschitz_with_lipschitz_const_mul_edist ⟨x₂.unop, x₁.unop⟩ ⟨y₂.unop, y₁.unop⟩).trans_eq
(congr_arg _ $ max_comm _ _)⟩ }
-- this instance could be deduced from `normed_group.has_lipschitz_add`, but we prove it separately
-- here so that it is available earlier in the hierarchy
instance real.has_lipschitz_add : has_lipschitz_add ℝ :=
{ lipschitz_add := ⟨2, begin
rw lipschitz_with_iff_dist_le_mul,
intros p q,
simp only [real.dist_eq, prod.dist_eq, prod.fst_sub, prod.snd_sub, nnreal.coe_one,
nnreal.coe_bit0],
convert le_trans (abs_add (p.1 - q.1) (p.2 - q.2)) _ using 2,
{ abel },
have := le_max_left (|p.1 - q.1|) (|p.2 - q.2|),
have := le_max_right (|p.1 - q.1|) (|p.2 - q.2|),
linarith,
end⟩ }
-- this instance has the same proof as `add_submonoid.has_lipschitz_add`, but the former can't
-- directly be applied here since `ℝ≥0` is a subtype of `ℝ`, not an additive submonoid.
instance nnreal.has_lipschitz_add : has_lipschitz_add ℝ≥0 :=
{ lipschitz_add := ⟨has_lipschitz_add.C ℝ, begin
rintros ⟨x₁, x₂⟩ ⟨y₁, y₂⟩,
convert lipschitz_with_lipschitz_const_add_edist ⟨(x₁:ℝ), x₂⟩ ⟨y₁, y₂⟩ using 1
end⟩ }
end has_lipschitz_mul
section has_bounded_smul
variables [has_zero α] [has_zero β] [has_scalar α β]
/-- Mixin typeclass on a scalar action of a metric space `α` on a metric space `β` both with
distinguished points `0`, requiring compatibility of the action in the sense that
`dist (x • y₁) (x • y₂) ≤ dist x 0 * dist y₁ y₂` and
`dist (x₁ • y) (x₂ • y) ≤ dist x₁ x₂ * dist y 0`. -/
class has_bounded_smul : Prop :=
( dist_smul_pair' : ∀ x : α, ∀ y₁ y₂ : β, dist (x • y₁) (x • y₂) ≤ dist x 0 * dist y₁ y₂ )
( dist_pair_smul' : ∀ x₁ x₂ : α, ∀ y : β, dist (x₁ • y) (x₂ • y) ≤ dist x₁ x₂ * dist y 0 )
variables {α β} [has_bounded_smul α β]
lemma dist_smul_pair (x : α) (y₁ y₂ : β) : dist (x • y₁) (x • y₂) ≤ dist x 0 * dist y₁ y₂ :=
has_bounded_smul.dist_smul_pair' x y₁ y₂
lemma dist_pair_smul (x₁ x₂ : α) (y : β) : dist (x₁ • y) (x₂ • y) ≤ dist x₁ x₂ * dist y 0 :=
has_bounded_smul.dist_pair_smul' x₁ x₂ y
/-- The typeclass `has_bounded_smul` on a metric-space scalar action implies continuity of the
action. -/
@[priority 100] -- see Note [lower instance priority]
instance has_bounded_smul.has_continuous_smul : has_continuous_smul α β :=
{ continuous_smul := begin
rw metric.continuous_iff,
rintros ⟨a, b⟩ ε hε,
have : 0 ≤ dist a 0 := dist_nonneg,
have : 0 ≤ dist b 0 := dist_nonneg,
let δ : ℝ := min 1 ((dist a 0 + dist b 0 + 2)⁻¹ * ε),
have hδ_pos : 0 < δ,
{ refine lt_min_iff.mpr ⟨by norm_num, mul_pos _ hε⟩,
rw inv_pos,
linarith },
refine ⟨δ, hδ_pos, _⟩,
rintros ⟨a', b'⟩ hab',
calc _ ≤ _ : dist_triangle _ (a • b') _
... ≤ δ * (dist a 0 + dist b 0 + δ) : _
... < ε : _,
{ have : 0 ≤ dist a' a := dist_nonneg,
have := dist_triangle b' b 0,
have := dist_comm (a • b') (a' • b'),
have := dist_comm a a',
have : dist a' a ≤ dist (a', b') (a, b) := le_max_left _ _,
have : dist b' b ≤ dist (a', b') (a, b) := le_max_right _ _,
have := dist_smul_pair a b' b,
have := dist_pair_smul a a' b',
nlinarith },
{ have : δ ≤ _ := min_le_right _ _,
have : δ ≤ _ := min_le_left _ _,
have : (dist a 0 + dist b 0 + 2)⁻¹ * (ε * (dist a 0 + dist b 0 + δ)) < ε,
{ rw inv_mul_lt_iff; nlinarith },
nlinarith }
end }
-- this instance could be deduced from `normed_space.has_bounded_smul`, but we prove it separately
-- here so that it is available earlier in the hierarchy
instance real.has_bounded_smul : has_bounded_smul ℝ ℝ :=
{ dist_smul_pair' := λ x y₁ y₂, by simpa [real.dist_eq, mul_sub] using (abs_mul x (y₁ - y₂)).le,
dist_pair_smul' := λ x₁ x₂ y, by simpa [real.dist_eq, sub_mul] using (abs_mul (x₁ - x₂) y).le }
instance nnreal.has_bounded_smul : has_bounded_smul ℝ≥0 ℝ≥0 :=
{ dist_smul_pair' := λ x y₁ y₂, by convert dist_smul_pair (x:ℝ) (y₁:ℝ) y₂ using 1,
dist_pair_smul' := λ x₁ x₂ y, by convert dist_pair_smul (x₁:ℝ) x₂ (y:ℝ) using 1 }
/-- If a scalar is central, then its right action is bounded when its left action is. -/
instance has_bounded_smul.op [has_scalar αᵐᵒᵖ β] [is_central_scalar α β] :
has_bounded_smul αᵐᵒᵖ β :=
{ dist_smul_pair' := mul_opposite.rec $ λ x y₁ y₂,
by simpa only [op_smul_eq_smul] using dist_smul_pair x y₁ y₂,
dist_pair_smul' := mul_opposite.rec $ λ x₁, mul_opposite.rec $ λ x₂ y,
by simpa only [op_smul_eq_smul] using dist_pair_smul x₁ x₂ y }
end has_bounded_smul
|
1617149d31a05fd4a0587a76d6a22e4b22439a89
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/geometry/manifold/conformal_groupoid.lean
|
b1f8c1fc6573a662bfc40606e49f95097c2bc554
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 1,069
|
lean
|
/-
Copyright (c) 2021 Yourong Zang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yourong Zang
-/
import analysis.calculus.conformal.normed_space
import geometry.manifold.charted_space
/-!
# Conformal Groupoid
In this file we define the groupoid of conformal maps on normed spaces.
## Main definitions
* `conformal_groupoid`: the groupoid of conformal local homeomorphisms.
## Tags
conformal, groupoid
-/
variables {X : Type*} [normed_group X] [normed_space ℝ X]
/-- The pregroupoid of conformal maps. -/
def conformal_pregroupoid : pregroupoid X :=
{ property := λ f u, ∀ x, x ∈ u → conformal_at f x,
comp := λ f g u v hf hg hu hv huv x hx, (hg (f x) hx.2).comp x (hf x hx.1),
id_mem := λ x hx, conformal_at_id x,
locality := λ f u hu h x hx, let ⟨v, h₁, h₂, h₃⟩ := h x hx in h₃ x ⟨hx, h₂⟩,
congr := λ f g u hu h hf x hx, (hf x hx).congr hx hu h, }
/-- The groupoid of conformal maps. -/
def conformal_groupoid : structure_groupoid X := conformal_pregroupoid.groupoid
|
ab7e6af0140b2ca432a079c2721350f26d46da4e
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/mv_polynomial/equiv.lean
|
d635f23cf17f71bdc944ccf1fa393bbbe1c8f46c
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 10,096
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Johan Commelin, Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.mv_polynomial.rename
import Mathlib.data.equiv.fin
import Mathlib.PostPort
universes u u_1 v w x
namespace Mathlib
/-!
# Equivalences between polynomial rings
This file establishes a number of equivalences between polynomial rings,
based on equivalences between the underlying types.
## Notation
As in other polynomial files, we typically use the notation:
+ `σ : Type*` (indexing the variables)
+ `R : Type*` `[comm_semiring R]` (the coefficients)
+ `s : σ →₀ ℕ`, a function from `σ` to `ℕ` which is zero away from a finite set.
This will give rise to a monomial in `mv_polynomial σ R` which mathematicians might call `X^s`
+ `a : R`
+ `i : σ`, with corresponding monomial `X i`, often denoted `X_i` by mathematicians
+ `p : mv_polynomial σ R`
## Tags
equivalence, isomorphism, morphism, ring hom, hom
-/
namespace mv_polynomial
/-- The ring isomorphism between multivariable polynomials in no variables and the ground ring. -/
def pempty_ring_equiv (R : Type u) [comm_semiring R] : mv_polynomial pempty R ≃+* R :=
ring_equiv.mk (eval₂ (ring_hom.id R) pempty.elim) ⇑C sorry sorry sorry sorry
/-- The algebra isomorphism between multivariable polynomials in no variables and the ground ring. -/
@[simp] theorem pempty_alg_equiv_symm_apply (R : Type u) [comm_semiring R] : ∀ (ᾰ : R), coe_fn (alg_equiv.symm (pempty_alg_equiv R)) ᾰ = coe_fn C ᾰ :=
fun (ᾰ : R) => Eq.refl (coe_fn (alg_equiv.symm (pempty_alg_equiv R)) ᾰ)
/--
The ring isomorphism between multivariable polynomials in a single variable and
polynomials over the ground ring.
-/
@[simp] theorem punit_ring_equiv_symm_apply (R : Type u) [comm_semiring R] (p : polynomial R) : coe_fn (ring_equiv.symm (punit_ring_equiv R)) p = polynomial.eval₂ C (X PUnit.unit) p :=
Eq.refl (coe_fn (ring_equiv.symm (punit_ring_equiv R)) p)
/-- The ring isomorphism between multivariable polynomials induced by an equivalence of the variables. -/
@[simp] theorem ring_equiv_of_equiv_symm_apply (R : Type u) {S₁ : Type v} {S₂ : Type w} [comm_semiring R] (e : S₁ ≃ S₂) : ∀ (ᾰ : mv_polynomial S₂ R), coe_fn (ring_equiv.symm (ring_equiv_of_equiv R e)) ᾰ = coe_fn (rename ⇑(equiv.symm e)) ᾰ :=
fun (ᾰ : mv_polynomial S₂ R) => Eq.refl (coe_fn (ring_equiv.symm (ring_equiv_of_equiv R e)) ᾰ)
/-- The algebra isomorphism between multivariable polynomials induced by an equivalence of the variables. -/
@[simp] theorem alg_equiv_of_equiv_symm_apply (R : Type u) {S₁ : Type v} {S₂ : Type w} [comm_semiring R] (e : S₁ ≃ S₂) : ∀ (ᾰ : mv_polynomial S₂ R), coe_fn (alg_equiv.symm (alg_equiv_of_equiv R e)) ᾰ = coe_fn (rename ⇑(equiv.symm e)) ᾰ :=
fun (ᾰ : mv_polynomial S₂ R) => Eq.refl (coe_fn (alg_equiv.symm (alg_equiv_of_equiv R e)) ᾰ)
/-- The ring isomorphism between multivariable polynomials induced by a ring isomorphism of the ground ring. -/
def ring_equiv_congr (R : Type u) {S₁ : Type v} {S₂ : Type w} [comm_semiring R] [comm_semiring S₂] (e : R ≃+* S₂) : mv_polynomial S₁ R ≃+* mv_polynomial S₁ S₂ :=
ring_equiv.mk ⇑(map ↑e) ⇑(map ↑(ring_equiv.symm e)) sorry sorry sorry sorry
/--
The function from multivariable polynomials in a sum of two types,
to multivariable polynomials in one of the types,
with coefficents in multivariable polynomials in the other type.
See `sum_ring_equiv` for the ring isomorphism.
-/
def sum_to_iter (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] : mv_polynomial (S₁ ⊕ S₂) R →+* mv_polynomial S₁ (mv_polynomial S₂ R) :=
eval₂_hom (ring_hom.comp C C) fun (bc : S₁ ⊕ S₂) => sum.rec_on bc X (⇑C ∘ X)
protected instance is_semiring_hom_sum_to_iter (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] : is_semiring_hom ⇑(sum_to_iter R S₁ S₂) :=
eval₂.is_semiring_hom (ring_hom.comp C C) fun (n : S₁ ⊕ S₂) => (fun (bc : S₁ ⊕ S₂) => sum.rec_on bc X (⇑C ∘ X)) n
@[simp] theorem sum_to_iter_C (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (a : R) : coe_fn (sum_to_iter R S₁ S₂) (coe_fn C a) = coe_fn C (coe_fn C a) :=
eval₂_C (ring_hom.comp C C) (fun (n : S₁ ⊕ S₂) => (fun (bc : S₁ ⊕ S₂) => sum.rec_on bc X (⇑C ∘ X)) n) a
@[simp] theorem sum_to_iter_Xl (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (b : S₁) : coe_fn (sum_to_iter R S₁ S₂) (X (sum.inl b)) = X b :=
eval₂_X (ring_hom.comp C C) (fun (n : S₁ ⊕ S₂) => (fun (bc : S₁ ⊕ S₂) => sum.rec_on bc X (⇑C ∘ X)) n) (sum.inl b)
@[simp] theorem sum_to_iter_Xr (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (c : S₂) : coe_fn (sum_to_iter R S₁ S₂) (X (sum.inr c)) = coe_fn C (X c) :=
eval₂_X (ring_hom.comp C C) (fun (n : S₁ ⊕ S₂) => (fun (bc : S₁ ⊕ S₂) => sum.rec_on bc X (⇑C ∘ X)) n) (sum.inr c)
/--
The function from multivariable polynomials in one type,
with coefficents in multivariable polynomials in another type,
to multivariable polynomials in the sum of the two types.
See `sum_ring_equiv` for the ring isomorphism.
-/
def iter_to_sum (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] : mv_polynomial S₁ (mv_polynomial S₂ R) →+* mv_polynomial (S₁ ⊕ S₂) R :=
eval₂_hom (ring_hom.of (eval₂ C (X ∘ sum.inr))) (X ∘ sum.inl)
theorem iter_to_sum_C_C (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (a : R) : coe_fn (iter_to_sum R S₁ S₂) (coe_fn C (coe_fn C a)) = coe_fn C a :=
Eq.trans (eval₂_C (ring_hom.of (eval₂ C (X ∘ sum.inr))) (fun (n : S₁) => function.comp X sum.inl n) (coe_fn C a))
(eval₂_C C (fun (n : S₂) => function.comp X sum.inr n) a)
theorem iter_to_sum_X (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (b : S₁) : coe_fn (iter_to_sum R S₁ S₂) (X b) = X (sum.inl b) :=
eval₂_X (ring_hom.of (eval₂ C (X ∘ sum.inr))) (fun (n : S₁) => function.comp X sum.inl n) b
theorem iter_to_sum_C_X (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] (c : S₂) : coe_fn (iter_to_sum R S₁ S₂) (coe_fn C (X c)) = X (sum.inr c) :=
Eq.trans (eval₂_C (ring_hom.of (eval₂ C (X ∘ sum.inr))) (fun (n : S₁) => function.comp X sum.inl n) (X c))
(eval₂_X C (fun (n : S₂) => function.comp X sum.inr n) c)
/-- A helper function for `sum_ring_equiv`. -/
@[simp] theorem mv_polynomial_equiv_mv_polynomial_apply (R : Type u) (S₁ : Type v) (S₂ : Type w) (S₃ : Type x) [comm_semiring R] [comm_semiring S₃] (f : mv_polynomial S₁ R →+* mv_polynomial S₂ S₃) (g : mv_polynomial S₂ S₃ →+* mv_polynomial S₁ R) (hfgC : ∀ (a : S₃), coe_fn f (coe_fn g (coe_fn C a)) = coe_fn C a) (hfgX : ∀ (n : S₂), coe_fn f (coe_fn g (X n)) = X n) (hgfC : ∀ (a : R), coe_fn g (coe_fn f (coe_fn C a)) = coe_fn C a) (hgfX : ∀ (n : S₁), coe_fn g (coe_fn f (X n)) = X n) : ∀ (ᾰ : mv_polynomial S₁ R), coe_fn (mv_polynomial_equiv_mv_polynomial R S₁ S₂ S₃ f g hfgC hfgX hgfC hgfX) ᾰ = coe_fn f ᾰ :=
fun (ᾰ : mv_polynomial S₁ R) =>
Eq.refl (coe_fn (mv_polynomial_equiv_mv_polynomial R S₁ S₂ S₃ f g hfgC hfgX hgfC hgfX) ᾰ)
/--
The ring isomorphism between multivariable polynomials in a sum of two types,
and multivariable polynomials in one of the types,
with coefficents in multivariable polynomials in the other type.
-/
def sum_ring_equiv (R : Type u) (S₁ : Type v) (S₂ : Type w) [comm_semiring R] : mv_polynomial (S₁ ⊕ S₂) R ≃+* mv_polynomial S₁ (mv_polynomial S₂ R) :=
mv_polynomial_equiv_mv_polynomial R (S₁ ⊕ S₂) S₁ (mv_polynomial S₂ R) (sum_to_iter R S₁ S₂) (iter_to_sum R S₁ S₂) sorry
sorry sorry sorry
/--
The ring isomorphism between multivariable polynomials in `option S₁` and
polynomials with coefficients in `mv_polynomial S₁ R`.
-/
def option_equiv_left (R : Type u) (S₁ : Type v) [comm_semiring R] : mv_polynomial (Option S₁) R ≃+* polynomial (mv_polynomial S₁ R) :=
ring_equiv.trans (ring_equiv_of_equiv R (equiv.trans (equiv.option_equiv_sum_punit S₁) (equiv.sum_comm S₁ PUnit)))
(ring_equiv.trans (sum_ring_equiv R PUnit S₁) (punit_ring_equiv (mv_polynomial S₁ R)))
/--
The ring isomorphism between multivariable polynomials in `option S₁` and
multivariable polynomials with coefficients in polynomials.
-/
def option_equiv_right (R : Type u) (S₁ : Type v) [comm_semiring R] : mv_polynomial (Option S₁) R ≃+* mv_polynomial S₁ (polynomial R) :=
ring_equiv.trans (ring_equiv_of_equiv R (equiv.option_equiv_sum_punit S₁))
(ring_equiv.trans (sum_ring_equiv R S₁ Unit) (ring_equiv_congr (mv_polynomial Unit R) (punit_ring_equiv R)))
/--
The ring isomorphism between multivariable polynomials in `fin (n + 1)` and
polynomials over multivariable polynomials in `fin n`.
-/
def fin_succ_equiv (R : Type u) [comm_semiring R] (n : ℕ) : mv_polynomial (fin (n + 1)) R ≃+* polynomial (mv_polynomial (fin n) R) :=
ring_equiv.trans (ring_equiv_of_equiv R (fin_succ_equiv n)) (option_equiv_left R (fin n))
theorem fin_succ_equiv_eq (R : Type u) [comm_semiring R] (n : ℕ) : ↑(fin_succ_equiv R n) =
eval₂_hom (ring_hom.comp polynomial.C C)
fun (i : fin (n + 1)) => fin.cases polynomial.X (fun (k : fin n) => coe_fn polynomial.C (X k)) i := sorry
@[simp] theorem fin_succ_equiv_apply (R : Type u) [comm_semiring R] (n : ℕ) (p : mv_polynomial (fin (n + 1)) R) : coe_fn (fin_succ_equiv R n) p =
coe_fn
(eval₂_hom (ring_hom.comp polynomial.C C)
fun (i : fin (n + 1)) => fin.cases polynomial.X (fun (k : fin n) => coe_fn polynomial.C (X k)) i)
p := sorry
theorem fin_succ_equiv_comp_C_eq_C {R : Type u} [comm_semiring R] (n : ℕ) : ring_hom.comp (ring_equiv.to_ring_hom (ring_equiv.symm (fin_succ_equiv R n))) (ring_hom.comp polynomial.C C) = C := sorry
|
86be2826782ff090067afc676458837bd440097d
|
4950bf76e5ae40ba9f8491647d0b6f228ddce173
|
/src/analysis/normed_space/basic.lean
|
07dd02bedf5d4e5e3244162849d7d68631647cdf
|
[
"Apache-2.0"
] |
permissive
|
ntzwq/mathlib
|
ca50b21079b0a7c6781c34b62199a396dd00cee2
|
36eec1a98f22df82eaccd354a758ef8576af2a7f
|
refs/heads/master
| 1,675,193,391,478
| 1,607,822,996,000
| 1,607,822,996,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 52,713
|
lean
|
/-
Copyright (c) 2018 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot, Johannes Hölzl
-/
import topology.instances.nnreal
import topology.algebra.module
import topology.metric_space.antilipschitz
/-!
# Normed spaces
-/
variables {α : Type*} {β : Type*} {γ : Type*} {ι : Type*}
noncomputable theory
open filter metric
open_locale topological_space big_operators nnreal
/-- Auxiliary class, endowing a type `α` with a function `norm : α → ℝ`. This class is designed to
be extended in more interesting classes specifying the properties of the norm. -/
class has_norm (α : Type*) := (norm : α → ℝ)
export has_norm (norm)
notation `∥`:1024 e:1 `∥`:1 := norm e
/-- A normed group is an additive group endowed with a norm for which `dist x y = ∥x - y∥` defines
a metric space structure. -/
class normed_group (α : Type*) extends has_norm α, add_comm_group α, metric_space α :=
(dist_eq : ∀ x y, dist x y = norm (x - y))
/-- Construct a normed group from a translation invariant distance -/
def normed_group.of_add_dist [has_norm α] [add_comm_group α] [metric_space α]
(H1 : ∀ x:α, ∥x∥ = dist x 0)
(H2 : ∀ x y z : α, dist x y ≤ dist (x + z) (y + z)) : normed_group α :=
{ dist_eq := λ x y, begin
rw H1, apply le_antisymm,
{ rw [sub_eq_add_neg, ← add_right_neg y], apply H2 },
{ have := H2 (x-y) 0 y, rwa [sub_add_cancel, zero_add] at this }
end }
/-- Construct a normed group from a translation invariant distance -/
def normed_group.of_add_dist' [has_norm α] [add_comm_group α] [metric_space α]
(H1 : ∀ x:α, ∥x∥ = dist x 0)
(H2 : ∀ x y z : α, dist (x + z) (y + z) ≤ dist x y) : normed_group α :=
{ dist_eq := λ x y, begin
rw H1, apply le_antisymm,
{ have := H2 (x-y) 0 y, rwa [sub_add_cancel, zero_add] at this },
{ rw [sub_eq_add_neg, ← add_right_neg y], apply H2 }
end }
/-- A normed group can be built from a norm that satisfies algebraic properties. This is
formalised in this structure. -/
structure normed_group.core (α : Type*) [add_comm_group α] [has_norm α] : Prop :=
(norm_eq_zero_iff : ∀ x : α, ∥x∥ = 0 ↔ x = 0)
(triangle : ∀ x y : α, ∥x + y∥ ≤ ∥x∥ + ∥y∥)
(norm_neg : ∀ x : α, ∥-x∥ = ∥x∥)
/-- Constructing a normed group from core properties of a norm, i.e., registering the distance and
the metric space structure from the norm properties. -/
noncomputable def normed_group.of_core (α : Type*) [add_comm_group α] [has_norm α]
(C : normed_group.core α) : normed_group α :=
{ dist := λ x y, ∥x - y∥,
dist_eq := assume x y, by refl,
dist_self := assume x, (C.norm_eq_zero_iff (x - x)).mpr (show x - x = 0, by simp),
eq_of_dist_eq_zero := assume x y h, show (x = y), from sub_eq_zero.mp $ (C.norm_eq_zero_iff (x - y)).mp h,
dist_triangle := assume x y z,
calc ∥x - z∥ = ∥x - y + (y - z)∥ : by rw sub_add_sub_cancel
... ≤ ∥x - y∥ + ∥y - z∥ : C.triangle _ _,
dist_comm := assume x y,
calc ∥x - y∥ = ∥ -(y - x)∥ : by simp
... = ∥y - x∥ : by { rw [C.norm_neg] } }
section normed_group
variables [normed_group α] [normed_group β]
lemma dist_eq_norm (g h : α) : dist g h = ∥g - h∥ :=
normed_group.dist_eq _ _
@[simp] lemma dist_zero_right (g : α) : dist g 0 = ∥g∥ :=
by rw [dist_eq_norm, sub_zero]
lemma tendsto_norm_cocompact_at_top [proper_space α] :
tendsto norm (cocompact α) at_top :=
by simpa only [dist_zero_right] using tendsto_dist_right_cocompact_at_top (0:α)
lemma norm_sub_rev (g h : α) : ∥g - h∥ = ∥h - g∥ :=
by simpa only [dist_eq_norm] using dist_comm g h
@[simp] lemma norm_neg (g : α) : ∥-g∥ = ∥g∥ :=
by simpa using norm_sub_rev 0 g
@[simp] lemma dist_add_left (g h₁ h₂ : α) : dist (g + h₁) (g + h₂) = dist h₁ h₂ :=
by simp [dist_eq_norm]
@[simp] lemma dist_add_right (g₁ g₂ h : α) : dist (g₁ + h) (g₂ + h) = dist g₁ g₂ :=
by simp [dist_eq_norm]
@[simp] lemma dist_neg_neg (g h : α) : dist (-g) (-h) = dist g h :=
by simp only [dist_eq_norm, neg_sub_neg, norm_sub_rev]
@[simp] lemma dist_sub_left (g h₁ h₂ : α) : dist (g - h₁) (g - h₂) = dist h₁ h₂ :=
by simp only [sub_eq_add_neg, dist_add_left, dist_neg_neg]
@[simp] lemma dist_sub_right (g₁ g₂ h : α) : dist (g₁ - h) (g₂ - h) = dist g₁ g₂ :=
by simpa only [sub_eq_add_neg] using dist_add_right _ _ _
/-- Triangle inequality for the norm. -/
lemma norm_add_le (g h : α) : ∥g + h∥ ≤ ∥g∥ + ∥h∥ :=
by simpa [dist_eq_norm] using dist_triangle g 0 (-h)
lemma norm_add_le_of_le {g₁ g₂ : α} {n₁ n₂ : ℝ} (H₁ : ∥g₁∥ ≤ n₁) (H₂ : ∥g₂∥ ≤ n₂) :
∥g₁ + g₂∥ ≤ n₁ + n₂ :=
le_trans (norm_add_le g₁ g₂) (add_le_add H₁ H₂)
lemma dist_add_add_le (g₁ g₂ h₁ h₂ : α) :
dist (g₁ + g₂) (h₁ + h₂) ≤ dist g₁ h₁ + dist g₂ h₂ :=
by simpa only [dist_add_left, dist_add_right] using dist_triangle (g₁ + g₂) (h₁ + g₂) (h₁ + h₂)
lemma dist_add_add_le_of_le {g₁ g₂ h₁ h₂ : α} {d₁ d₂ : ℝ}
(H₁ : dist g₁ h₁ ≤ d₁) (H₂ : dist g₂ h₂ ≤ d₂) :
dist (g₁ + g₂) (h₁ + h₂) ≤ d₁ + d₂ :=
le_trans (dist_add_add_le g₁ g₂ h₁ h₂) (add_le_add H₁ H₂)
lemma dist_sub_sub_le (g₁ g₂ h₁ h₂ : α) :
dist (g₁ - g₂) (h₁ - h₂) ≤ dist g₁ h₁ + dist g₂ h₂ :=
by simpa only [sub_eq_add_neg, dist_neg_neg] using dist_add_add_le g₁ (-g₂) h₁ (-h₂)
lemma dist_sub_sub_le_of_le {g₁ g₂ h₁ h₂ : α} {d₁ d₂ : ℝ}
(H₁ : dist g₁ h₁ ≤ d₁) (H₂ : dist g₂ h₂ ≤ d₂) :
dist (g₁ - g₂) (h₁ - h₂) ≤ d₁ + d₂ :=
le_trans (dist_sub_sub_le g₁ g₂ h₁ h₂) (add_le_add H₁ H₂)
lemma abs_dist_sub_le_dist_add_add (g₁ g₂ h₁ h₂ : α) :
abs (dist g₁ h₁ - dist g₂ h₂) ≤ dist (g₁ + g₂) (h₁ + h₂) :=
by simpa only [dist_add_left, dist_add_right, dist_comm h₂]
using abs_dist_sub_le (g₁ + g₂) (h₁ + h₂) (h₁ + g₂)
@[simp] lemma norm_nonneg (g : α) : 0 ≤ ∥g∥ :=
by { rw[←dist_zero_right], exact dist_nonneg }
@[simp] lemma norm_eq_zero {g : α} : ∥g∥ = 0 ↔ g = 0 :=
dist_zero_right g ▸ dist_eq_zero
@[simp] lemma norm_zero : ∥(0:α)∥ = 0 := norm_eq_zero.2 rfl
@[nontriviality] lemma norm_of_subsingleton [subsingleton α] (x : α) : ∥x∥ = 0 :=
by rw [subsingleton.elim x 0, norm_zero]
lemma norm_sum_le {β} : ∀(s : finset β) (f : β → α), ∥∑ a in s, f a∥ ≤ ∑ a in s, ∥ f a ∥ :=
finset.le_sum_of_subadditive norm norm_zero norm_add_le
lemma norm_sum_le_of_le {β} (s : finset β) {f : β → α} {n : β → ℝ} (h : ∀ b ∈ s, ∥f b∥ ≤ n b) :
∥∑ b in s, f b∥ ≤ ∑ b in s, n b :=
le_trans (norm_sum_le s f) (finset.sum_le_sum h)
@[simp] lemma norm_pos_iff {g : α} : 0 < ∥ g ∥ ↔ g ≠ 0 :=
dist_zero_right g ▸ dist_pos
@[simp] lemma norm_le_zero_iff {g : α} : ∥g∥ ≤ 0 ↔ g = 0 :=
by { rw[←dist_zero_right], exact dist_le_zero }
lemma norm_sub_le (g h : α) : ∥g - h∥ ≤ ∥g∥ + ∥h∥ :=
by simpa [dist_eq_norm] using dist_triangle g 0 h
lemma norm_sub_le_of_le {g₁ g₂ : α} {n₁ n₂ : ℝ} (H₁ : ∥g₁∥ ≤ n₁) (H₂ : ∥g₂∥ ≤ n₂) :
∥g₁ - g₂∥ ≤ n₁ + n₂ :=
le_trans (norm_sub_le g₁ g₂) (add_le_add H₁ H₂)
lemma dist_le_norm_add_norm (g h : α) : dist g h ≤ ∥g∥ + ∥h∥ :=
by { rw dist_eq_norm, apply norm_sub_le }
lemma abs_norm_sub_norm_le (g h : α) : abs(∥g∥ - ∥h∥) ≤ ∥g - h∥ :=
by simpa [dist_eq_norm] using abs_dist_sub_le g h 0
lemma norm_sub_norm_le (g h : α) : ∥g∥ - ∥h∥ ≤ ∥g - h∥ :=
le_trans (le_abs_self _) (abs_norm_sub_norm_le g h)
lemma dist_norm_norm_le (g h : α) : dist ∥g∥ ∥h∥ ≤ ∥g - h∥ :=
abs_norm_sub_norm_le g h
lemma eq_of_norm_sub_eq_zero {u v : α} (h : ∥u - v∥ = 0) : u = v :=
begin
apply eq_of_dist_eq_zero,
rwa dist_eq_norm
end
lemma norm_le_insert (u v : α) : ∥v∥ ≤ ∥u∥ + ∥u - v∥ :=
calc ∥v∥ = ∥u - (u - v)∥ : by abel
... ≤ ∥u∥ + ∥u - v∥ : norm_sub_le u _
lemma ball_0_eq (ε : ℝ) : ball (0:α) ε = {x | ∥x∥ < ε} :=
set.ext $ assume a, by simp
lemma mem_ball_iff_norm {g h : α} {r : ℝ} :
h ∈ ball g r ↔ ∥h - g∥ < r :=
by rw [mem_ball, dist_eq_norm]
lemma mem_ball_iff_norm' {g h : α} {r : ℝ} :
h ∈ ball g r ↔ ∥g - h∥ < r :=
by rw [mem_ball', dist_eq_norm]
lemma mem_closed_ball_iff_norm {g h : α} {r : ℝ} :
h ∈ closed_ball g r ↔ ∥h - g∥ ≤ r :=
by rw [mem_closed_ball, dist_eq_norm]
lemma mem_closed_ball_iff_norm' {g h : α} {r : ℝ} :
h ∈ closed_ball g r ↔ ∥g - h∥ ≤ r :=
by rw [mem_closed_ball', dist_eq_norm]
lemma norm_le_of_mem_closed_ball {g h : α} {r : ℝ} (H : h ∈ closed_ball g r) :
∥h∥ ≤ ∥g∥ + r :=
calc
∥h∥ = ∥g + (h - g)∥ : by rw [add_sub_cancel'_right]
... ≤ ∥g∥ + ∥h - g∥ : norm_add_le _ _
... ≤ ∥g∥ + r : by { apply add_le_add_left, rw ← dist_eq_norm, exact H }
lemma norm_lt_of_mem_ball {g h : α} {r : ℝ} (H : h ∈ ball g r) :
∥h∥ < ∥g∥ + r :=
calc
∥h∥ = ∥g + (h - g)∥ : by rw [add_sub_cancel'_right]
... ≤ ∥g∥ + ∥h - g∥ : norm_add_le _ _
... < ∥g∥ + r : by { apply add_lt_add_left, rw ← dist_eq_norm, exact H }
theorem normed_group.tendsto_nhds_zero {f : γ → α} {l : filter γ} :
tendsto f l (𝓝 0) ↔ ∀ ε > 0, ∀ᶠ x in l, ∥ f x ∥ < ε :=
metric.tendsto_nhds.trans $ by simp only [dist_zero_right]
lemma normed_group.tendsto_nhds_nhds {f : α → β} {x : α} {y : β} :
tendsto f (𝓝 x) (𝓝 y) ↔ ∀ ε > 0, ∃ δ > 0, ∀ x', ∥x' - x∥ < δ → ∥f x' - y∥ < ε :=
by simp_rw [metric.tendsto_nhds_nhds, dist_eq_norm]
/-- A homomorphism `f` of normed groups is Lipschitz, if there exists a constant `C` such that for
all `x`, one has `∥f x∥ ≤ C * ∥x∥`.
The analogous condition for a linear map of normed spaces is in `normed_space.operator_norm`. -/
lemma add_monoid_hom.lipschitz_of_bound (f :α →+ β) (C : ℝ) (h : ∀x, ∥f x∥ ≤ C * ∥x∥) :
lipschitz_with (nnreal.of_real C) f :=
lipschitz_with.of_dist_le' $ λ x y, by simpa only [dist_eq_norm, f.map_sub] using h (x - y)
lemma lipschitz_on_with_iff_norm_sub_le {f : α → β} {C : ℝ≥0} {s : set α} :
lipschitz_on_with C f s ↔ ∀ (x ∈ s) (y ∈ s), ∥f x - f y∥ ≤ C * ∥x - y∥ :=
by simp only [lipschitz_on_with_iff_dist_le_mul, dist_eq_norm]
lemma lipschitz_on_with.norm_sub_le {f : α → β} {C : ℝ≥0} {s : set α} (h : lipschitz_on_with C f s)
{x y : α} (x_in : x ∈ s) (y_in : y ∈ s) : ∥f x - f y∥ ≤ C * ∥x - y∥ :=
lipschitz_on_with_iff_norm_sub_le.mp h x x_in y y_in
/-- A homomorphism `f` of normed groups is continuous, if there exists a constant `C` such that for
all `x`, one has `∥f x∥ ≤ C * ∥x∥`.
The analogous condition for a linear map of normed spaces is in `normed_space.operator_norm`. -/
lemma add_monoid_hom.continuous_of_bound (f :α →+ β) (C : ℝ) (h : ∀x, ∥f x∥ ≤ C * ∥x∥) :
continuous f :=
(f.lipschitz_of_bound C h).continuous
section nnnorm
/-- Version of the norm taking values in nonnegative reals. -/
def nnnorm (a : α) : ℝ≥0 := ⟨norm a, norm_nonneg a⟩
@[simp] lemma coe_nnnorm (a : α) : (nnnorm a : ℝ) = norm a := rfl
lemma nndist_eq_nnnorm (a b : α) : nndist a b = nnnorm (a - b) := nnreal.eq $ dist_eq_norm _ _
@[simp] lemma nnnorm_eq_zero {a : α} : nnnorm a = 0 ↔ a = 0 :=
by simp only [nnreal.eq_iff.symm, nnreal.coe_zero, coe_nnnorm, norm_eq_zero]
@[simp] lemma nnnorm_zero : nnnorm (0 : α) = 0 :=
nnreal.eq norm_zero
lemma nnnorm_add_le (g h : α) : nnnorm (g + h) ≤ nnnorm g + nnnorm h :=
nnreal.coe_le_coe.2 $ norm_add_le g h
@[simp] lemma nnnorm_neg (g : α) : nnnorm (-g) = nnnorm g :=
nnreal.eq $ norm_neg g
lemma nndist_nnnorm_nnnorm_le (g h : α) : nndist (nnnorm g) (nnnorm h) ≤ nnnorm (g - h) :=
nnreal.coe_le_coe.2 $ dist_norm_norm_le g h
lemma of_real_norm_eq_coe_nnnorm (x : β) : ennreal.of_real ∥x∥ = (nnnorm x : ennreal) :=
ennreal.of_real_eq_coe_nnreal _
lemma edist_eq_coe_nnnorm_sub (x y : β) : edist x y = (nnnorm (x - y) : ennreal) :=
by rw [edist_dist, dist_eq_norm, of_real_norm_eq_coe_nnnorm]
lemma edist_eq_coe_nnnorm (x : β) : edist x 0 = (nnnorm x : ennreal) :=
by rw [edist_eq_coe_nnnorm_sub, _root_.sub_zero]
lemma nndist_add_add_le (g₁ g₂ h₁ h₂ : α) :
nndist (g₁ + g₂) (h₁ + h₂) ≤ nndist g₁ h₁ + nndist g₂ h₂ :=
nnreal.coe_le_coe.2 $ dist_add_add_le g₁ g₂ h₁ h₂
lemma edist_add_add_le (g₁ g₂ h₁ h₂ : α) :
edist (g₁ + g₂) (h₁ + h₂) ≤ edist g₁ h₁ + edist g₂ h₂ :=
by { simp only [edist_nndist], norm_cast, apply nndist_add_add_le }
lemma nnnorm_sum_le {β} : ∀(s : finset β) (f : β → α), nnnorm (∑ a in s, f a) ≤ ∑ a in s, nnnorm (f a) :=
finset.le_sum_of_subadditive nnnorm nnnorm_zero nnnorm_add_le
end nnnorm
lemma lipschitz_with.neg {α : Type*} [emetric_space α] {K : nnreal} {f : α → β}
(hf : lipschitz_with K f) : lipschitz_with K (λ x, -f x) :=
λ x y, by simpa only [edist_dist, dist_neg_neg] using hf x y
lemma lipschitz_with.add {α : Type*} [emetric_space α] {Kf : nnreal} {f : α → β}
(hf : lipschitz_with Kf f) {Kg : nnreal} {g : α → β} (hg : lipschitz_with Kg g) :
lipschitz_with (Kf + Kg) (λ x, f x + g x) :=
λ x y,
calc edist (f x + g x) (f y + g y) ≤ edist (f x) (f y) + edist (g x) (g y) :
edist_add_add_le _ _ _ _
... ≤ Kf * edist x y + Kg * edist x y :
add_le_add (hf x y) (hg x y)
... = (Kf + Kg) * edist x y :
(add_mul _ _ _).symm
lemma lipschitz_with.sub {α : Type*} [emetric_space α] {Kf : nnreal} {f : α → β}
(hf : lipschitz_with Kf f) {Kg : nnreal} {g : α → β} (hg : lipschitz_with Kg g) :
lipschitz_with (Kf + Kg) (λ x, f x - g x) :=
by simpa only [sub_eq_add_neg] using hf.add hg.neg
lemma antilipschitz_with.add_lipschitz_with {α : Type*} [metric_space α] {Kf : nnreal} {f : α → β}
(hf : antilipschitz_with Kf f) {Kg : nnreal} {g : α → β} (hg : lipschitz_with Kg g)
(hK : Kg < Kf⁻¹) :
antilipschitz_with (Kf⁻¹ - Kg)⁻¹ (λ x, f x + g x) :=
begin
refine antilipschitz_with.of_le_mul_dist (λ x y, _),
rw [nnreal.coe_inv, ← div_eq_inv_mul],
rw le_div_iff (nnreal.coe_pos.2 $ nnreal.sub_pos.2 hK),
rw [mul_comm, nnreal.coe_sub (le_of_lt hK), sub_mul],
calc ↑Kf⁻¹ * dist x y - Kg * dist x y ≤ dist (f x) (f y) - dist (g x) (g y) :
sub_le_sub (hf.mul_le_dist x y) (hg.dist_le_mul x y)
... ≤ _ : le_trans (le_abs_self _) (abs_dist_sub_le_dist_add_add _ _ _ _)
end
/-- A submodule of a normed group is also a normed group, with the restriction of the norm.
As all instances can be inferred from the submodule `s`, they are put as implicit instead of
typeclasses. -/
instance submodule.normed_group {𝕜 : Type*} {_ : ring 𝕜}
{E : Type*} [normed_group E] {_ : module 𝕜 E} (s : submodule 𝕜 E) : normed_group s :=
{ norm := λx, norm (x : E),
dist_eq := λx y, dist_eq_norm (x : E) (y : E) }
/-- normed group instance on the product of two normed groups, using the sup norm. -/
instance prod.normed_group : normed_group (α × β) :=
{ norm := λx, max ∥x.1∥ ∥x.2∥,
dist_eq := assume (x y : α × β),
show max (dist x.1 y.1) (dist x.2 y.2) = (max ∥(x - y).1∥ ∥(x - y).2∥), by simp [dist_eq_norm] }
lemma prod.norm_def (x : α × β) : ∥x∥ = (max ∥x.1∥ ∥x.2∥) := rfl
lemma prod.nnnorm_def (x : α × β) : nnnorm x = max (nnnorm x.1) (nnnorm x.2) :=
by { have := x.norm_def, simp only [← coe_nnnorm] at this, exact_mod_cast this }
lemma norm_fst_le (x : α × β) : ∥x.1∥ ≤ ∥x∥ :=
le_max_left _ _
lemma norm_snd_le (x : α × β) : ∥x.2∥ ≤ ∥x∥ :=
le_max_right _ _
lemma norm_prod_le_iff {x : α × β} {r : ℝ} :
∥x∥ ≤ r ↔ ∥x.1∥ ≤ r ∧ ∥x.2∥ ≤ r :=
max_le_iff
/-- normed group instance on the product of finitely many normed groups, using the sup norm. -/
instance pi.normed_group {π : ι → Type*} [fintype ι] [∀i, normed_group (π i)] :
normed_group (Πi, π i) :=
{ norm := λf, ((finset.sup finset.univ (λ b, nnnorm (f b)) : nnreal) : ℝ),
dist_eq := assume x y,
congr_arg (coe : ℝ≥0 → ℝ) $ congr_arg (finset.sup finset.univ) $ funext $ assume a,
show nndist (x a) (y a) = nnnorm (x a - y a), from nndist_eq_nnnorm _ _ }
/-- The norm of an element in a product space is `≤ r` if and only if the norm of each
component is. -/
lemma pi_norm_le_iff {π : ι → Type*} [fintype ι] [∀i, normed_group (π i)] {r : ℝ} (hr : 0 ≤ r)
{x : Πi, π i} : ∥x∥ ≤ r ↔ ∀i, ∥x i∥ ≤ r :=
by { simp only [(dist_zero_right _).symm, dist_pi_le_iff hr], refl }
lemma norm_le_pi_norm {π : ι → Type*} [fintype ι] [∀i, normed_group (π i)] (x : Πi, π i) (i : ι) :
∥x i∥ ≤ ∥x∥ :=
(pi_norm_le_iff (norm_nonneg x)).1 (le_refl _) i
lemma tendsto_iff_norm_tendsto_zero {f : ι → β} {a : filter ι} {b : β} :
tendsto f a (𝓝 b) ↔ tendsto (λ e, ∥ f e - b ∥) a (𝓝 0) :=
by rw tendsto_iff_dist_tendsto_zero ; simp only [(dist_eq_norm _ _).symm]
lemma tendsto_zero_iff_norm_tendsto_zero {f : γ → β} {a : filter γ} :
tendsto f a (𝓝 0) ↔ tendsto (λ e, ∥ f e ∥) a (𝓝 0) :=
have tendsto f a (𝓝 0) ↔ tendsto (λ e, ∥ f e - 0 ∥) a (𝓝 0) :=
tendsto_iff_norm_tendsto_zero,
by simpa
/-- Special case of the sandwich theorem: if the norm of `f` is eventually bounded by a real
function `g` which tends to `0`, then `f` tends to `0`.
In this pair of lemmas (`squeeze_zero_norm'` and `squeeze_zero_norm`), following a convention of
similar lemmas in `topology.metric_space.basic` and `topology.algebra.ordered`, the `'` version is
phrased using "eventually" and the non-`'` version is phrased absolutely. -/
lemma squeeze_zero_norm' {f : γ → α} {g : γ → ℝ} {t₀ : filter γ}
(h : ∀ᶠ n in t₀, ∥f n∥ ≤ g n)
(h' : tendsto g t₀ (𝓝 0)) : tendsto f t₀ (𝓝 0) :=
tendsto_zero_iff_norm_tendsto_zero.mpr
(squeeze_zero' (eventually_of_forall (λ n, norm_nonneg _)) h h')
/-- Special case of the sandwich theorem: if the norm of `f` is bounded by a real function `g` which
tends to `0`, then `f` tends to `0`. -/
lemma squeeze_zero_norm {f : γ → α} {g : γ → ℝ} {t₀ : filter γ}
(h : ∀ (n:γ), ∥f n∥ ≤ g n)
(h' : tendsto g t₀ (𝓝 0)) :
tendsto f t₀ (𝓝 0) :=
squeeze_zero_norm' (eventually_of_forall h) h'
lemma lim_norm (x : α) : tendsto (λg : α, ∥g - x∥) (𝓝 x) (𝓝 0) :=
tendsto_iff_norm_tendsto_zero.1 (continuous_iff_continuous_at.1 continuous_id x)
lemma lim_norm_zero : tendsto (λg : α, ∥g∥) (𝓝 0) (𝓝 0) :=
by simpa using lim_norm (0:α)
lemma continuous_norm : continuous (λg:α, ∥g∥) :=
begin
rw continuous_iff_continuous_at,
intro x,
rw [continuous_at, tendsto_iff_dist_tendsto_zero],
exact squeeze_zero (λ t, abs_nonneg _) (λ t, abs_norm_sub_norm_le _ _) (lim_norm x)
end
lemma filter.tendsto.norm {β : Type*} {l : filter β} {f : β → α} {a : α} (h : tendsto f l (𝓝 a)) :
tendsto (λ x, ∥f x∥) l (𝓝 ∥a∥) :=
tendsto.comp continuous_norm.continuous_at h
lemma continuous.norm [topological_space γ] {f : γ → α} (hf : continuous f) :
continuous (λ x, ∥f x∥) :=
continuous_norm.comp hf
lemma continuous_nnnorm : continuous (nnnorm : α → nnreal) :=
continuous_subtype_mk _ continuous_norm
lemma filter.tendsto.nnnorm {β : Type*} {l : filter β} {f : β → α} {a : α} (h : tendsto f l (𝓝 a)) :
tendsto (λ x, nnnorm (f x)) l (𝓝 (nnnorm a)) :=
tendsto.comp continuous_nnnorm.continuous_at h
/-- If `∥y∥→∞`, then we can assume `y≠x` for any fixed `x`. -/
lemma eventually_ne_of_tendsto_norm_at_top {l : filter γ} {f : γ → α}
(h : tendsto (λ y, ∥f y∥) l at_top) (x : α) :
∀ᶠ y in l, f y ≠ x :=
begin
have : ∀ᶠ y in l, 1 + ∥x∥ ≤ ∥f y∥ := h (mem_at_top (1 + ∥x∥)),
refine this.mono (λ y hy hxy, _),
subst x,
exact not_le_of_lt zero_lt_one (add_le_iff_nonpos_left.1 hy)
end
/-- A normed group is a uniform additive group, i.e., addition and subtraction are uniformly
continuous. -/
@[priority 100] -- see Note [lower instance priority]
instance normed_uniform_group : uniform_add_group α :=
⟨(lipschitz_with.prod_fst.sub lipschitz_with.prod_snd).uniform_continuous⟩
@[priority 100] -- see Note [lower instance priority]
instance normed_top_monoid : has_continuous_add α := by apply_instance -- short-circuit type class inference
@[priority 100] -- see Note [lower instance priority]
instance normed_top_group : topological_add_group α := by apply_instance -- short-circuit type class inference
end normed_group
section normed_ring
/-- A normed ring is a ring endowed with a norm which satisfies the inequality `∥x y∥ ≤ ∥x∥ ∥y∥`. -/
class normed_ring (α : Type*) extends has_norm α, ring α, metric_space α :=
(dist_eq : ∀ x y, dist x y = norm (x - y))
(norm_mul : ∀ a b, norm (a * b) ≤ norm a * norm b)
/-- A normed commutative ring is a commutative ring endowed with a norm which satisfies
the inequality `∥x y∥ ≤ ∥x∥ ∥y∥`. -/
class normed_comm_ring (α : Type*) extends normed_ring α :=
(mul_comm : ∀ x y : α, x * y = y * x)
/-- A mixin class with the axiom `∥1∥ = 1`. Many `normed_ring`s and all `normed_field`s satisfy this
axiom. -/
class norm_one_class (α : Type*) [has_norm α] [has_one α] : Prop :=
(norm_one : ∥(1:α)∥ = 1)
export norm_one_class (norm_one)
attribute [simp] norm_one
@[simp] lemma nnnorm_one [normed_group α] [has_one α] [norm_one_class α] : nnnorm (1:α) = 1 :=
nnreal.eq norm_one
@[priority 100] -- see Note [lower instance priority]
instance normed_comm_ring.to_comm_ring [β : normed_comm_ring α] : comm_ring α := { ..β }
@[priority 100] -- see Note [lower instance priority]
instance normed_ring.to_normed_group [β : normed_ring α] : normed_group α := { ..β }
instance prod.norm_one_class [normed_group α] [has_one α] [norm_one_class α]
[normed_group β] [has_one β] [norm_one_class β] :
norm_one_class (α × β) :=
⟨by simp [prod.norm_def]⟩
variables [normed_ring α]
lemma norm_mul_le (a b : α) : (∥a*b∥) ≤ (∥a∥) * (∥b∥) :=
normed_ring.norm_mul _ _
lemma list.norm_prod_le' : ∀ {l : list α}, l ≠ [] → ∥l.prod∥ ≤ (l.map norm).prod
| [] h := (h rfl).elim
| [a] _ := by simp
| (a :: b :: l) _ :=
begin
rw [list.map_cons, list.prod_cons, @list.prod_cons _ _ _ ∥a∥],
refine le_trans (norm_mul_le _ _) (mul_le_mul_of_nonneg_left _ (norm_nonneg _)),
exact list.norm_prod_le' (list.cons_ne_nil b l)
end
lemma list.norm_prod_le [norm_one_class α] : ∀ l : list α, ∥l.prod∥ ≤ (l.map norm).prod
| [] := by simp
| (a::l) := list.norm_prod_le' (list.cons_ne_nil a l)
lemma finset.norm_prod_le' {α : Type*} [normed_comm_ring α] (s : finset ι) (hs : s.nonempty)
(f : ι → α) :
∥∏ i in s, f i∥ ≤ ∏ i in s, ∥f i∥ :=
begin
rcases s with ⟨⟨l⟩, hl⟩,
have : l.map f ≠ [], by simpa using hs,
simpa using list.norm_prod_le' this
end
lemma finset.norm_prod_le {α : Type*} [normed_comm_ring α] [norm_one_class α] (s : finset ι)
(f : ι → α) :
∥∏ i in s, f i∥ ≤ ∏ i in s, ∥f i∥ :=
begin
rcases s with ⟨⟨l⟩, hl⟩,
simpa using (l.map f).norm_prod_le
end
/-- If `α` is a normed ring, then `∥a^n∥≤ ∥a∥^n` for `n > 0`. See also `norm_pow_le`. -/
lemma norm_pow_le' (a : α) : ∀ {n : ℕ}, 0 < n → ∥a^n∥ ≤ ∥a∥^n
| 1 h := by simp
| (n+2) h :=
le_trans (norm_mul_le a (a^(n+1)))
(mul_le_mul (le_refl _)
(norm_pow_le' (nat.succ_pos _)) (norm_nonneg _) (norm_nonneg _))
/-- If `α` is a normed ring with `∥1∥=1`, then `∥a^n∥≤ ∥a∥^n`. See also `norm_pow_le'`. -/
lemma norm_pow_le [norm_one_class α] (a : α) : ∀ (n : ℕ), ∥a^n∥ ≤ ∥a∥^n
| 0 := by simp
| (n+1) := norm_pow_le' a n.zero_lt_succ
lemma eventually_norm_pow_le (a : α) : ∀ᶠ (n:ℕ) in at_top, ∥a ^ n∥ ≤ ∥a∥ ^ n :=
eventually_at_top.mpr ⟨1, λ b h, norm_pow_le' a (nat.succ_le_iff.mp h)⟩
lemma units.norm_pos [nontrivial α] (x : units α) : 0 < ∥(x:α)∥ :=
norm_pos_iff.mpr (units.ne_zero x)
/-- In a normed ring, the left-multiplication `add_monoid_hom` is bounded. -/
lemma mul_left_bound (x : α) :
∀ (y:α), ∥add_monoid_hom.mul_left x y∥ ≤ ∥x∥ * ∥y∥ :=
norm_mul_le x
/-- In a normed ring, the right-multiplication `add_monoid_hom` is bounded. -/
lemma mul_right_bound (x : α) :
∀ (y:α), ∥add_monoid_hom.mul_right x y∥ ≤ ∥x∥ * ∥y∥ :=
λ y, by {rw mul_comm, convert norm_mul_le y x}
/-- Normed ring structure on the product of two normed rings, using the sup norm. -/
instance prod.normed_ring [normed_ring β] : normed_ring (α × β) :=
{ norm_mul := assume x y,
calc
∥x * y∥ = ∥(x.1*y.1, x.2*y.2)∥ : rfl
... = (max ∥x.1*y.1∥ ∥x.2*y.2∥) : rfl
... ≤ (max (∥x.1∥*∥y.1∥) (∥x.2∥*∥y.2∥)) :
max_le_max (norm_mul_le (x.1) (y.1)) (norm_mul_le (x.2) (y.2))
... = (max (∥x.1∥*∥y.1∥) (∥y.2∥*∥x.2∥)) : by simp[mul_comm]
... ≤ (max (∥x.1∥) (∥x.2∥)) * (max (∥y.2∥) (∥y.1∥)) : by { apply max_mul_mul_le_max_mul_max; simp [norm_nonneg] }
... = (max (∥x.1∥) (∥x.2∥)) * (max (∥y.1∥) (∥y.2∥)) : by simp[max_comm]
... = (∥x∥*∥y∥) : rfl,
..prod.normed_group }
end normed_ring
@[priority 100] -- see Note [lower instance priority]
instance normed_ring_top_monoid [normed_ring α] : has_continuous_mul α :=
⟨ continuous_iff_continuous_at.2 $ λ x, tendsto_iff_norm_tendsto_zero.2 $
begin
have : ∀ e : α × α, ∥e.1 * e.2 - x.1 * x.2∥ ≤ ∥e.1∥ * ∥e.2 - x.2∥ + ∥e.1 - x.1∥ * ∥x.2∥,
{ intro e,
calc ∥e.1 * e.2 - x.1 * x.2∥ ≤ ∥e.1 * (e.2 - x.2) + (e.1 - x.1) * x.2∥ :
by rw [mul_sub, sub_mul, sub_add_sub_cancel]
... ≤ ∥e.1∥ * ∥e.2 - x.2∥ + ∥e.1 - x.1∥ * ∥x.2∥ :
norm_add_le_of_le (norm_mul_le _ _) (norm_mul_le _ _) },
refine squeeze_zero (λ e, norm_nonneg _) this _,
convert ((continuous_fst.tendsto x).norm.mul ((continuous_snd.tendsto x).sub
tendsto_const_nhds).norm).add
(((continuous_fst.tendsto x).sub tendsto_const_nhds).norm.mul _),
show tendsto _ _ _, from tendsto_const_nhds,
simp
end ⟩
/-- A normed ring is a topological ring. -/
@[priority 100] -- see Note [lower instance priority]
instance normed_top_ring [normed_ring α] : topological_ring α :=
⟨ continuous_iff_continuous_at.2 $ λ x, tendsto_iff_norm_tendsto_zero.2 $
have ∀ e : α, -e - -x = -(e - x), by intro; simp,
by simp only [this, norm_neg]; apply lim_norm ⟩
/-- A normed field is a field with a norm satisfying ∥x y∥ = ∥x∥ ∥y∥. -/
class normed_field (α : Type*) extends has_norm α, field α, metric_space α :=
(dist_eq : ∀ x y, dist x y = norm (x - y))
(norm_mul' : ∀ a b, norm (a * b) = norm a * norm b)
/-- A nondiscrete normed field is a normed field in which there is an element of norm different from
`0` and `1`. This makes it possible to bring any element arbitrarily close to `0` by multiplication
by the powers of any element, and thus to relate algebra and topology. -/
class nondiscrete_normed_field (α : Type*) extends normed_field α :=
(non_trivial : ∃x:α, 1<∥x∥)
namespace normed_field
section normed_field
variables [normed_field α]
@[simp] lemma norm_mul (a b : α) : ∥a * b∥ = ∥a∥ * ∥b∥ :=
normed_field.norm_mul' a b
@[priority 100] -- see Note [lower instance priority]
instance to_normed_comm_ring : normed_comm_ring α :=
{ norm_mul := λ a b, (norm_mul a b).le, ..‹normed_field α› }
@[priority 900]
instance to_norm_one_class : norm_one_class α :=
⟨mul_left_cancel' (mt norm_eq_zero.1 (@one_ne_zero α _ _)) $
by rw [← norm_mul, mul_one, mul_one]⟩
/-- `norm` as a `monoid_hom`. -/
@[simps] def norm_hom : monoid_with_zero_hom α ℝ := ⟨norm, norm_zero, norm_one, norm_mul⟩
@[simp] lemma norm_pow (a : α) : ∀ (n : ℕ), ∥a ^ n∥ = ∥a∥ ^ n :=
norm_hom.to_monoid_hom.map_pow a
@[simp] lemma norm_prod (s : finset β) (f : β → α) :
∥∏ b in s, f b∥ = ∏ b in s, ∥f b∥ :=
(norm_hom.to_monoid_hom : α →* ℝ).map_prod f s
@[simp] lemma norm_div (a b : α) : ∥a / b∥ = ∥a∥ / ∥b∥ :=
(norm_hom : monoid_with_zero_hom α ℝ).map_div a b
@[simp] lemma norm_inv (a : α) : ∥a⁻¹∥ = ∥a∥⁻¹ :=
(norm_hom : monoid_with_zero_hom α ℝ).map_inv' a
@[simp] lemma nnnorm_inv (a : α) : nnnorm (a⁻¹) = (nnnorm a)⁻¹ :=
nnreal.eq $ by simp
@[simp] lemma norm_fpow : ∀ (a : α) (n : ℤ), ∥a^n∥ = ∥a∥^n :=
(norm_hom : monoid_with_zero_hom α ℝ).map_fpow
@[priority 100] -- see Note [lower instance priority]
instance : has_continuous_inv' α :=
begin
refine ⟨λ r r0, tendsto_iff_norm_tendsto_zero.2 _⟩,
have r0' : 0 < ∥r∥ := norm_pos_iff.2 r0,
rcases exists_between r0' with ⟨ε, ε0, εr⟩,
have : ∀ᶠ e in 𝓝 r, ∥e⁻¹ - r⁻¹∥ ≤ ∥r - e∥ / (∥r∥ * ε),
{ filter_upwards [(is_open_lt continuous_const continuous_norm).eventually_mem εr],
intros e he,
have e0 : e ≠ 0 := norm_pos_iff.1 (ε0.trans he),
calc ∥e⁻¹ - r⁻¹∥ = ∥r - e∥ / (∥r∥ * ∥e∥) :
by simp only [← norm_div, ← norm_mul, sub_div, div_mul_right _ r0, div_mul_left e0, one_div]
... ≤ ∥r - e∥ / (∥r∥ * ε) :
div_le_div_of_le_left (norm_nonneg _) (mul_pos r0' ε0)
(mul_le_mul_of_nonneg_left he.le r0'.le) },
refine squeeze_zero' (eventually_of_forall $ λ _, norm_nonneg _) this _,
rw [← zero_div (∥r∥ * ε), ← @norm_zero α, ← sub_self r],
exact tendsto.mul (tendsto_const_nhds.sub tendsto_id).norm tendsto_const_nhds
end
end normed_field
variables (α) [nondiscrete_normed_field α]
lemma exists_one_lt_norm : ∃x : α, 1 < ∥x∥ := ‹nondiscrete_normed_field α›.non_trivial
lemma exists_norm_lt_one : ∃x : α, 0 < ∥x∥ ∧ ∥x∥ < 1 :=
begin
rcases exists_one_lt_norm α with ⟨y, hy⟩,
refine ⟨y⁻¹, _, _⟩,
{ simp only [inv_eq_zero, ne.def, norm_pos_iff],
rintro rfl,
rw norm_zero at hy,
exact lt_asymm zero_lt_one hy },
{ simp [inv_lt_one hy] }
end
lemma exists_lt_norm (r : ℝ) : ∃ x : α, r < ∥x∥ :=
let ⟨w, hw⟩ := exists_one_lt_norm α in
let ⟨n, hn⟩ := pow_unbounded_of_one_lt r hw in
⟨w^n, by rwa norm_pow⟩
lemma exists_norm_lt {r : ℝ} (hr : 0 < r) : ∃ x : α, 0 < ∥x∥ ∧ ∥x∥ < r :=
let ⟨w, hw⟩ := exists_one_lt_norm α in
let ⟨n, hle, hlt⟩ := exists_int_pow_near' hr hw in
⟨w^n, by { rw norm_fpow; exact fpow_pos_of_pos (lt_trans zero_lt_one hw) _},
by rwa norm_fpow⟩
variable {α}
@[instance]
lemma punctured_nhds_ne_bot (x : α) : ne_bot (𝓝[{x}ᶜ] x) :=
begin
rw [← mem_closure_iff_nhds_within_ne_bot, metric.mem_closure_iff],
rintros ε ε0,
rcases normed_field.exists_norm_lt α ε0 with ⟨b, hb0, hbε⟩,
refine ⟨x + b, mt (set.mem_singleton_iff.trans add_right_eq_self).1 $ norm_pos_iff.1 hb0, _⟩,
rwa [dist_comm, dist_eq_norm, add_sub_cancel'],
end
@[instance]
lemma nhds_within_is_unit_ne_bot : ne_bot (𝓝[{x : α | is_unit x}] 0) :=
by simpa only [is_unit_iff_ne_zero] using punctured_nhds_ne_bot (0:α)
end normed_field
instance : normed_field ℝ :=
{ norm := λ x, abs x,
dist_eq := assume x y, rfl,
norm_mul' := abs_mul }
instance : nondiscrete_normed_field ℝ :=
{ non_trivial := ⟨2, by { unfold norm, rw abs_of_nonneg; norm_num }⟩ }
namespace real
lemma norm_eq_abs (r : ℝ) : ∥r∥ = abs r := rfl
lemma norm_of_nonneg {x : ℝ} (hx : 0 ≤ x) : ∥x∥ = x :=
abs_of_nonneg hx
@[simp] lemma norm_coe_nat (n : ℕ) : ∥(n : ℝ)∥ = n := abs_of_nonneg n.cast_nonneg
@[simp] lemma nnnorm_coe_nat (n : ℕ) : nnnorm (n : ℝ) = n := nnreal.eq $ by simp
@[simp] lemma norm_two : ∥(2:ℝ)∥ = 2 := abs_of_pos (@zero_lt_two ℝ _ _)
@[simp] lemma nnnorm_two : nnnorm (2:ℝ) = 2 := nnreal.eq $ by simp
open_locale nnreal
@[simp] lemma nnreal.norm_eq (x : ℝ≥0) : ∥(x : ℝ)∥ = x :=
by rw [real.norm_eq_abs, x.abs_eq]
lemma nnnorm_coe_eq_self {x : ℝ≥0} : nnnorm (x : ℝ) = x :=
by { ext, exact norm_of_nonneg (zero_le x) }
lemma nnnorm_of_nonneg {x : ℝ} (hx : 0 ≤ x) : nnnorm x = ⟨x, hx⟩ :=
@nnnorm_coe_eq_self ⟨x, hx⟩
lemma ennnorm_eq_of_real {x : ℝ} (hx : 0 ≤ x) : (nnnorm x : ennreal) = ennreal.of_real x :=
by { rw [← of_real_norm_eq_coe_nnnorm, norm_of_nonneg hx] }
end real
@[simp] lemma norm_norm [normed_group α] (x : α) : ∥∥x∥∥ = ∥x∥ :=
by rw [real.norm_of_nonneg (norm_nonneg _)]
@[simp] lemma nnnorm_norm [normed_group α] (a : α) : nnnorm ∥a∥ = nnnorm a :=
by simp only [nnnorm, norm_norm]
instance : normed_comm_ring ℤ :=
{ norm := λ n, ∥(n : ℝ)∥,
norm_mul := λ m n, le_of_eq $ by simp only [norm, int.cast_mul, abs_mul],
dist_eq := λ m n, by simp only [int.dist_eq, norm, int.cast_sub],
mul_comm := mul_comm }
@[norm_cast] lemma int.norm_cast_real (m : ℤ) : ∥(m : ℝ)∥ = ∥m∥ := rfl
instance : norm_one_class ℤ :=
⟨by simp [← int.norm_cast_real]⟩
instance : normed_field ℚ :=
{ norm := λ r, ∥(r : ℝ)∥,
norm_mul' := λ r₁ r₂, by simp only [norm, rat.cast_mul, abs_mul],
dist_eq := λ r₁ r₂, by simp only [rat.dist_eq, norm, rat.cast_sub] }
instance : nondiscrete_normed_field ℚ :=
{ non_trivial := ⟨2, by { unfold norm, rw abs_of_nonneg; norm_num }⟩ }
@[norm_cast, simp] lemma rat.norm_cast_real (r : ℚ) : ∥(r : ℝ)∥ = ∥r∥ := rfl
@[norm_cast, simp] lemma int.norm_cast_rat (m : ℤ) : ∥(m : ℚ)∥ = ∥m∥ :=
by rw [← rat.norm_cast_real, ← int.norm_cast_real]; congr' 1; norm_cast
section normed_space
section prio
set_option extends_priority 920
-- Here, we set a rather high priority for the instance `[normed_space α β] : semimodule α β`
-- to take precedence over `semiring.to_semimodule` as this leads to instance paths with better
-- unification properties.
-- see Note[vector space definition] for why we extend `semimodule`.
/-- A normed space over a normed field is a vector space endowed with a norm which satisfies the
equality `∥c • x∥ = ∥c∥ ∥x∥`. We require only `∥c • x∥ ≤ ∥c∥ ∥x∥` in the definition, then prove
`∥c • x∥ = ∥c∥ ∥x∥` in `norm_smul`. -/
class normed_space (α : Type*) (β : Type*) [normed_field α] [normed_group β]
extends semimodule α β :=
(norm_smul_le : ∀ (a:α) (b:β), ∥a • b∥ ≤ ∥a∥ * ∥b∥)
end prio
variables [normed_field α] [normed_group β]
instance normed_field.to_normed_space : normed_space α α :=
{ norm_smul_le := λ a b, le_of_eq (normed_field.norm_mul a b) }
lemma norm_smul [normed_space α β] (s : α) (x : β) : ∥s • x∥ = ∥s∥ * ∥x∥ :=
begin
classical,
by_cases h : s = 0,
{ simp [h] },
{ refine le_antisymm (normed_space.norm_smul_le s x) _,
calc ∥s∥ * ∥x∥ = ∥s∥ * ∥s⁻¹ • s • x∥ : by rw [inv_smul_smul' h]
... ≤ ∥s∥ * (∥s⁻¹∥ * ∥s • x∥) : _
... = ∥s • x∥ : _,
exact mul_le_mul_of_nonneg_left (normed_space.norm_smul_le _ _) (norm_nonneg _),
rw [normed_field.norm_inv, ← mul_assoc, mul_inv_cancel, one_mul],
rwa [ne.def, norm_eq_zero] }
end
@[simp] lemma abs_norm_eq_norm (z : β) : abs ∥z∥ = ∥z∥ :=
(abs_eq (norm_nonneg z)).mpr (or.inl rfl)
lemma dist_smul [normed_space α β] (s : α) (x y : β) : dist (s • x) (s • y) = ∥s∥ * dist x y :=
by simp only [dist_eq_norm, (norm_smul _ _).symm, smul_sub]
lemma nnnorm_smul [normed_space α β] (s : α) (x : β) : nnnorm (s • x) = nnnorm s * nnnorm x :=
nnreal.eq $ norm_smul s x
lemma nndist_smul [normed_space α β] (s : α) (x y : β) :
nndist (s • x) (s • y) = nnnorm s * nndist x y :=
nnreal.eq $ dist_smul s x y
lemma norm_smul_of_nonneg [normed_space ℝ β] {t : ℝ} (ht : 0 ≤ t) (x : β) : ∥t • x∥ = t * ∥x∥ :=
by rw [norm_smul, real.norm_eq_abs, abs_of_nonneg ht]
variables {E : Type*} {F : Type*}
[normed_group E] [normed_space α E] [normed_group F] [normed_space α F]
@[priority 100] -- see Note [lower instance priority]
instance normed_space.topological_vector_space : topological_vector_space α E :=
begin
refine { continuous_smul := continuous_iff_continuous_at.2 $
λ p, tendsto_iff_norm_tendsto_zero.2 _ },
refine squeeze_zero (λ _, norm_nonneg _) _ _,
{ exact λ q, ∥q.1 - p.1∥ * ∥q.2∥ + ∥p.1∥ * ∥q.2 - p.2∥ },
{ intro q,
rw [← sub_add_sub_cancel, ← norm_smul, ← norm_smul, smul_sub, sub_smul],
exact norm_add_le _ _ },
{ conv { congr, skip, skip, congr, rw [← zero_add (0:ℝ)], congr,
rw [← zero_mul ∥p.2∥], skip, rw [← mul_zero ∥p.1∥] },
exact ((tendsto_iff_norm_tendsto_zero.1 (continuous_fst.tendsto p)).mul
(continuous_snd.tendsto p).norm).add
(tendsto_const_nhds.mul (tendsto_iff_norm_tendsto_zero.1 (continuous_snd.tendsto p))) }
end
theorem closure_ball [normed_space ℝ E] (x : E) {r : ℝ} (hr : 0 < r) :
closure (ball x r) = closed_ball x r :=
begin
refine set.subset.antisymm closure_ball_subset_closed_ball (λ y hy, _),
have : continuous_within_at (λ c : ℝ, c • (y - x) + x) (set.Ico 0 1) 1 :=
((continuous_id.smul continuous_const).add continuous_const).continuous_within_at,
convert this.mem_closure _ _,
{ rw [one_smul, sub_add_cancel] },
{ simp [closure_Ico (@zero_lt_one ℝ _ _), zero_le_one] },
{ rintros c ⟨hc0, hc1⟩,
rw [set.mem_preimage, mem_ball, dist_eq_norm, add_sub_cancel, norm_smul, real.norm_eq_abs,
abs_of_nonneg hc0, mul_comm, ← mul_one r],
rw [mem_closed_ball, dist_eq_norm] at hy,
apply mul_lt_mul'; assumption }
end
theorem frontier_ball [normed_space ℝ E] (x : E) {r : ℝ} (hr : 0 < r) :
frontier (ball x r) = sphere x r :=
begin
rw [frontier, closure_ball x hr, is_open_ball.interior_eq],
ext x, exact (@eq_iff_le_not_lt ℝ _ _ _).symm
end
theorem interior_closed_ball [normed_space ℝ E] (x : E) {r : ℝ} (hr : 0 < r) :
interior (closed_ball x r) = ball x r :=
begin
refine set.subset.antisymm _ ball_subset_interior_closed_ball,
intros y hy,
rcases le_iff_lt_or_eq.1 (mem_closed_ball.1 $ interior_subset hy) with hr|rfl, { exact hr },
set f : ℝ → E := λ c : ℝ, c • (y - x) + x,
suffices : f ⁻¹' closed_ball x (dist y x) ⊆ set.Icc (-1) 1,
{ have hfc : continuous f := (continuous_id.smul continuous_const).add continuous_const,
have hf1 : (1:ℝ) ∈ f ⁻¹' (interior (closed_ball x $ dist y x)), by simpa [f],
have h1 : (1:ℝ) ∈ interior (set.Icc (-1:ℝ) 1) :=
interior_mono this (preimage_interior_subset_interior_preimage hfc hf1),
contrapose h1,
simp },
intros c hc,
rw [set.mem_Icc, ← abs_le, ← real.norm_eq_abs, ← mul_le_mul_right hr],
simpa [f, dist_eq_norm, norm_smul] using hc
end
theorem interior_closed_ball' [normed_space ℝ E] [nontrivial E] (x : E) (r : ℝ) :
interior (closed_ball x r) = ball x r :=
begin
rcases lt_trichotomy r 0 with hr|rfl|hr,
{ simp [closed_ball_eq_empty_iff_neg.2 hr, ball_eq_empty_iff_nonpos.2 (le_of_lt hr)] },
{ suffices : x ∉ interior {x},
{ rw [ball_zero, closed_ball_zero, ← set.subset_empty_iff],
intros y hy,
obtain rfl : y = x := set.mem_singleton_iff.1 (interior_subset hy),
exact this hy },
rw [← set.mem_compl_iff, ← closure_compl],
rcases exists_ne (0 : E) with ⟨z, hz⟩,
suffices : (λ c : ℝ, x + c • z) 0 ∈ closure ({x}ᶜ : set E),
by simpa only [zero_smul, add_zero] using this,
have : (0:ℝ) ∈ closure (set.Ioi (0:ℝ)), by simp [closure_Ioi],
refine (continuous_const.add (continuous_id.smul
continuous_const)).continuous_within_at.mem_closure this _,
intros c hc,
simp [smul_eq_zero, hz, ne_of_gt hc] },
{ exact interior_closed_ball x hr }
end
theorem frontier_closed_ball [normed_space ℝ E] (x : E) {r : ℝ} (hr : 0 < r) :
frontier (closed_ball x r) = sphere x r :=
by rw [frontier, closure_closed_ball, interior_closed_ball x hr,
closed_ball_diff_ball]
theorem frontier_closed_ball' [normed_space ℝ E] [nontrivial E] (x : E) (r : ℝ) :
frontier (closed_ball x r) = sphere x r :=
by rw [frontier, closure_closed_ball, interior_closed_ball' x r, closed_ball_diff_ball]
open normed_field
/-- If there is a scalar `c` with `∥c∥>1`, then any element can be moved by scalar multiplication to
any shell of width `∥c∥`. Also recap information on the norm of the rescaling element that shows
up in applications. -/
lemma rescale_to_shell {c : α} (hc : 1 < ∥c∥) {ε : ℝ} (εpos : 0 < ε) {x : E} (hx : x ≠ 0) :
∃d:α, d ≠ 0 ∧ ∥d • x∥ < ε ∧ (ε/∥c∥ ≤ ∥d • x∥) ∧ (∥d∥⁻¹ ≤ ε⁻¹ * ∥c∥ * ∥x∥) :=
begin
have xεpos : 0 < ∥x∥/ε := div_pos (norm_pos_iff.2 hx) εpos,
rcases exists_int_pow_near xεpos hc with ⟨n, hn⟩,
have cpos : 0 < ∥c∥ := lt_trans (zero_lt_one : (0 :ℝ) < 1) hc,
have cnpos : 0 < ∥c^(n+1)∥ := by { rw norm_fpow, exact lt_trans xεpos hn.2 },
refine ⟨(c^(n+1))⁻¹, _, _, _, _⟩,
show (c ^ (n + 1))⁻¹ ≠ 0,
by rwa [ne.def, inv_eq_zero, ← ne.def, ← norm_pos_iff],
show ∥(c ^ (n + 1))⁻¹ • x∥ < ε,
{ rw [norm_smul, norm_inv, ← div_eq_inv_mul, div_lt_iff cnpos, mul_comm, norm_fpow],
exact (div_lt_iff εpos).1 (hn.2) },
show ε / ∥c∥ ≤ ∥(c ^ (n + 1))⁻¹ • x∥,
{ rw [div_le_iff cpos, norm_smul, norm_inv, norm_fpow, fpow_add (ne_of_gt cpos),
fpow_one, mul_inv_rev', mul_comm, ← mul_assoc, ← mul_assoc, mul_inv_cancel (ne_of_gt cpos),
one_mul, ← div_eq_inv_mul, le_div_iff (fpow_pos_of_pos cpos _), mul_comm],
exact (le_div_iff εpos).1 hn.1 },
show ∥(c ^ (n + 1))⁻¹∥⁻¹ ≤ ε⁻¹ * ∥c∥ * ∥x∥,
{ have : ε⁻¹ * ∥c∥ * ∥x∥ = ε⁻¹ * ∥x∥ * ∥c∥, by ring,
rw [norm_inv, inv_inv', norm_fpow, fpow_add (ne_of_gt cpos), fpow_one, this, ← div_eq_inv_mul],
exact mul_le_mul_of_nonneg_right hn.1 (norm_nonneg _) }
end
/-- The product of two normed spaces is a normed space, with the sup norm. -/
instance : normed_space α (E × F) :=
{ norm_smul_le := λ s x, le_of_eq $ by simp [prod.norm_def, norm_smul, mul_max_of_nonneg],
-- TODO: without the next two lines Lean unfolds `≤` to `real.le`
add_smul := λ r x y, prod.ext (add_smul _ _ _) (add_smul _ _ _),
smul_add := λ r x y, prod.ext (smul_add _ _ _) (smul_add _ _ _),
..prod.normed_group,
..prod.semimodule }
/-- The product of finitely many normed spaces is a normed space, with the sup norm. -/
instance pi.normed_space {E : ι → Type*} [fintype ι] [∀i, normed_group (E i)]
[∀i, normed_space α (E i)] : normed_space α (Πi, E i) :=
{ norm_smul_le := λ a f, le_of_eq $
show (↑(finset.sup finset.univ (λ (b : ι), nnnorm (a • f b))) : ℝ) =
nnnorm a * ↑(finset.sup finset.univ (λ (b : ι), nnnorm (f b))),
by simp only [(nnreal.coe_mul _ _).symm, nnreal.mul_finset_sup, nnnorm_smul] }
/-- A subspace of a normed space is also a normed space, with the restriction of the norm. -/
instance submodule.normed_space {𝕜 : Type*} [normed_field 𝕜]
{E : Type*} [normed_group E] [normed_space 𝕜 E] (s : submodule 𝕜 E) : normed_space 𝕜 s :=
{ norm_smul_le := λc x, le_of_eq $ norm_smul c (x : E) }
end normed_space
section normed_algebra
/-- A normed algebra `𝕜'` over `𝕜` is an algebra endowed with a norm for which the embedding of
`𝕜` in `𝕜'` is an isometry. -/
class normed_algebra (𝕜 : Type*) (𝕜' : Type*) [normed_field 𝕜] [normed_ring 𝕜']
extends algebra 𝕜 𝕜' :=
(norm_algebra_map_eq : ∀x:𝕜, ∥algebra_map 𝕜 𝕜' x∥ = ∥x∥)
@[simp] lemma norm_algebra_map_eq {𝕜 : Type*} (𝕜' : Type*) [normed_field 𝕜] [normed_ring 𝕜']
[h : normed_algebra 𝕜 𝕜'] (x : 𝕜) : ∥algebra_map 𝕜 𝕜' x∥ = ∥x∥ :=
normed_algebra.norm_algebra_map_eq _
variables (𝕜 : Type*) [normed_field 𝕜]
variables (𝕜' : Type*) [normed_ring 𝕜']
@[priority 100]
instance normed_algebra.to_normed_space [h : normed_algebra 𝕜 𝕜'] : normed_space 𝕜 𝕜' :=
{ norm_smul_le := λ s x, calc
∥s • x∥ = ∥((algebra_map 𝕜 𝕜') s) * x∥ : by { rw h.smul_def', refl }
... ≤ ∥algebra_map 𝕜 𝕜' s∥ * ∥x∥ : normed_ring.norm_mul _ _
... = ∥s∥ * ∥x∥ : by rw norm_algebra_map_eq,
..h }
instance normed_algebra.id : normed_algebra 𝕜 𝕜 :=
{ norm_algebra_map_eq := by simp,
.. algebra.id 𝕜}
variables {𝕜'} [normed_algebra 𝕜 𝕜']
include 𝕜
@[simp] lemma normed_algebra.norm_one : ∥(1:𝕜')∥ = 1 :=
by simpa using (norm_algebra_map_eq 𝕜' (1:𝕜))
lemma normed_algebra.norm_one_class : norm_one_class 𝕜' :=
⟨normed_algebra.norm_one 𝕜⟩
lemma normed_algebra.zero_ne_one : (0:𝕜') ≠ 1 :=
begin
refine (norm_pos_iff.mp _).symm,
rw @normed_algebra.norm_one 𝕜, norm_num,
end
lemma normed_algebra.nontrivial : nontrivial 𝕜' :=
⟨⟨0, 1, normed_algebra.zero_ne_one 𝕜⟩⟩
end normed_algebra
section restrict_scalars
variables (𝕜 : Type*) (𝕜' : Type*) [normed_field 𝕜] [normed_field 𝕜'] [normed_algebra 𝕜 𝕜']
(E : Type*) [normed_group E] [normed_space 𝕜' E]
/-- Warning: This declaration should be used judiciously.
Please consider using `is_scalar_tower` instead.
`𝕜`-normed space structure induced by a `𝕜'`-normed space structure when `𝕜'` is a
normed algebra over `𝕜`. Not registered as an instance as `𝕜'` can not be inferred.
The type synonym `semimodule.restrict_scalars 𝕜 𝕜' E` will be endowed with this instance by default.
-/
def normed_space.restrict_scalars : normed_space 𝕜 E :=
{ norm_smul_le := λc x, le_of_eq $ begin
change ∥(algebra_map 𝕜 𝕜' c) • x∥ = ∥c∥ * ∥x∥,
simp [norm_smul]
end,
..restrict_scalars.semimodule 𝕜 𝕜' E }
instance {𝕜 : Type*} {𝕜' : Type*} {E : Type*} [I : normed_group E] :
normed_group (restrict_scalars 𝕜 𝕜' E) := I
instance semimodule.restrict_scalars.normed_space_orig {𝕜 : Type*} {𝕜' : Type*} {E : Type*}
[normed_field 𝕜'] [normed_group E] [I : normed_space 𝕜' E] :
normed_space 𝕜' (restrict_scalars 𝕜 𝕜' E) := I
instance : normed_space 𝕜 (restrict_scalars 𝕜 𝕜' E) :=
(normed_space.restrict_scalars 𝕜 𝕜' E : normed_space 𝕜 E)
end restrict_scalars
section summable
open_locale classical
open finset filter
variables [normed_group α] [normed_group β]
lemma cauchy_seq_finset_iff_vanishing_norm {f : ι → α} :
cauchy_seq (λ s : finset ι, ∑ i in s, f i) ↔
∀ε > (0 : ℝ), ∃s:finset ι, ∀t, disjoint t s → ∥ ∑ i in t, f i ∥ < ε :=
begin
rw [cauchy_seq_finset_iff_vanishing, nhds_basis_ball.forall_iff],
{ simp only [ball_0_eq, set.mem_set_of_eq] },
{ rintros s t hst ⟨s', hs'⟩,
exact ⟨s', λ t' ht', hst $ hs' _ ht'⟩ }
end
lemma summable_iff_vanishing_norm [complete_space α] {f : ι → α} :
summable f ↔ ∀ε > (0 : ℝ), ∃s:finset ι, ∀t, disjoint t s → ∥ ∑ i in t, f i ∥ < ε :=
by rw [summable_iff_cauchy_seq_finset, cauchy_seq_finset_iff_vanishing_norm]
lemma cauchy_seq_finset_of_norm_bounded {f : ι → α} (g : ι → ℝ) (hg : summable g)
(h : ∀i, ∥f i∥ ≤ g i) : cauchy_seq (λ s : finset ι, ∑ i in s, f i) :=
cauchy_seq_finset_iff_vanishing_norm.2 $ assume ε hε,
let ⟨s, hs⟩ := summable_iff_vanishing_norm.1 hg ε hε in
⟨s, assume t ht,
have ∥∑ i in t, g i∥ < ε := hs t ht,
have nn : 0 ≤ ∑ i in t, g i := finset.sum_nonneg (assume a _, le_trans (norm_nonneg _) (h a)),
lt_of_le_of_lt (norm_sum_le_of_le t (λ i _, h i)) $
by rwa [real.norm_eq_abs, abs_of_nonneg nn] at this⟩
lemma cauchy_seq_finset_of_summable_norm {f : ι → α} (hf : summable (λa, ∥f a∥)) :
cauchy_seq (λ s : finset ι, ∑ a in s, f a) :=
cauchy_seq_finset_of_norm_bounded _ hf (assume i, le_refl _)
/-- If a function `f` is summable in norm, and along some sequence of finsets exhausting the space
its sum is converging to a limit `a`, then this holds along all finsets, i.e., `f` is summable
with sum `a`. -/
lemma has_sum_of_subseq_of_summable {f : ι → α} (hf : summable (λa, ∥f a∥))
{s : γ → finset ι} {p : filter γ} [ne_bot p]
(hs : tendsto s p at_top) {a : α} (ha : tendsto (λ b, ∑ i in s b, f i) p (𝓝 a)) :
has_sum f a :=
tendsto_nhds_of_cauchy_seq_of_subseq (cauchy_seq_finset_of_summable_norm hf) hs ha
/-- If `∑' i, ∥f i∥` is summable, then `∥(∑' i, f i)∥ ≤ (∑' i, ∥f i∥)`. Note that we do not assume
that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete space. -/
lemma norm_tsum_le_tsum_norm {f : ι → α} (hf : summable (λi, ∥f i∥)) :
∥(∑'i, f i)∥ ≤ (∑' i, ∥f i∥) :=
begin
by_cases h : summable f,
{ have h₁ : tendsto (λs:finset ι, ∥∑ i in s, f i∥) at_top (𝓝 ∥(∑' i, f i)∥) :=
(continuous_norm.tendsto _).comp h.has_sum,
have h₂ : tendsto (λs:finset ι, ∑ i in s, ∥f i∥) at_top (𝓝 (∑' i, ∥f i∥)) :=
hf.has_sum,
exact le_of_tendsto_of_tendsto' h₁ h₂ (assume s, norm_sum_le _ _) },
{ rw tsum_eq_zero_of_not_summable h,
simp [tsum_nonneg] }
end
lemma has_sum_iff_tendsto_nat_of_summable_norm {f : ℕ → α} {a : α} (hf : summable (λi, ∥f i∥)) :
has_sum f a ↔ tendsto (λn:ℕ, ∑ i in range n, f i) at_top (𝓝 a) :=
⟨λ h, h.tendsto_sum_nat,
λ h, has_sum_of_subseq_of_summable hf tendsto_finset_range h⟩
/-- The direct comparison test for series: if the norm of `f` is bounded by a real function `g`
which is summable, then `f` is summable. -/
lemma summable_of_norm_bounded
[complete_space α] {f : ι → α} (g : ι → ℝ) (hg : summable g) (h : ∀i, ∥f i∥ ≤ g i) :
summable f :=
by { rw summable_iff_cauchy_seq_finset, exact cauchy_seq_finset_of_norm_bounded g hg h }
/-- Quantitative result associated to the direct comparison test for series: If `∑' i, g i` is
summable, and for all `i`, `∥f i∥ ≤ g i`, then `∥(∑' i, f i)∥ ≤ (∑' i, g i)`. Note that we do not
assume that `∑' i, f i` is summable, and it might not be the case if `α` is not a complete space. -/
lemma tsum_of_norm_bounded {f : ι → α} {g : ι → ℝ} {a : ℝ} (hg : has_sum g a) (h : ∀i, ∥f i∥ ≤ g i) :
∥(∑' (i:ι), f i)∥ ≤ a :=
begin
have h' : summable (λ (i : ι), ∥f i∥),
{ let f' : ι → ℝ := λ i, ∥f i∥,
have h'' : ∀ i, ∥f' i∥ ≤ g i,
{ intros i,
convert h i,
simp },
simpa [f'] using summable_of_norm_bounded g hg.summable h'' },
have h1 : ∥(∑' (i:ι), f i)∥ ≤ ∑' (i:ι), ∥f i∥ := by simpa using norm_tsum_le_tsum_norm h',
have h2 := tsum_le_tsum h h' hg.summable,
have h3 : a = ∑' (i:ι), g i := (has_sum.tsum_eq hg).symm,
linarith
end
variable [complete_space α]
/-- Variant of the direct comparison test for series: if the norm of `f` is eventually bounded by a
real function `g` which is summable, then `f` is summable. -/
lemma summable_of_norm_bounded_eventually {f : ι → α} (g : ι → ℝ) (hg : summable g)
(h : ∀ᶠ i in cofinite, ∥f i∥ ≤ g i) : summable f :=
begin
replace h := mem_cofinite.1 h,
refine h.summable_compl_iff.mp _,
refine summable_of_norm_bounded _ (h.summable_compl_iff.mpr hg) _,
rintros ⟨a, h'⟩,
simpa using h'
end
lemma summable_of_nnnorm_bounded {f : ι → α} (g : ι → nnreal) (hg : summable g)
(h : ∀i, nnnorm (f i) ≤ g i) : summable f :=
summable_of_norm_bounded (λ i, (g i : ℝ)) (nnreal.summable_coe.2 hg) (λ i, by exact_mod_cast h i)
lemma summable_of_summable_norm {f : ι → α} (hf : summable (λa, ∥f a∥)) : summable f :=
summable_of_norm_bounded _ hf (assume i, le_refl _)
lemma summable_of_summable_nnnorm {f : ι → α} (hf : summable (λa, nnnorm (f a))) : summable f :=
summable_of_nnnorm_bounded _ hf (assume i, le_refl _)
end summable
|
e60847e6586b1fc6c128f3e1ac589389bec97d47
|
0c1546a496eccfb56620165cad015f88d56190c5
|
/library/init/meta/comp_value_tactics.lean
|
1cb517a09843f420a1afe2a3a032c3331ddcbf2b
|
[
"Apache-2.0"
] |
permissive
|
Solertis/lean
|
491e0939957486f664498fbfb02546e042699958
|
84188c5aa1673fdf37a082b2de8562dddf53df3f
|
refs/heads/master
| 1,610,174,257,606
| 1,486,263,620,000
| 1,486,263,620,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,273
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.meta.tactic
meta constant mk_nat_val_ne_proof : expr → expr → option expr
meta constant mk_nat_val_lt_proof : expr → expr → option expr
meta constant mk_nat_val_le_proof : expr → expr → option expr
meta constant mk_fin_val_ne_proof : expr → expr → option expr
meta constant mk_char_val_ne_proof : expr → expr → option expr
meta constant mk_string_val_ne_proof : expr → expr → option expr
meta constant mk_int_val_ne_proof : expr → expr → option expr
namespace tactic
open expr
meta def comp_val : tactic unit :=
do t ← target,
guard (is_app t),
type ← infer_type t^.app_arg,
(do is_def_eq type (const `nat []),
(do (a, b) ← returnopt (is_ne t),
pr ← returnopt (mk_nat_val_ne_proof a b),
exact pr)
<|>
(do (a, b) ← returnopt (is_lt t),
pr ← returnopt (mk_nat_val_lt_proof a b),
exact pr)
<|>
(do (a, b) ← returnopt (is_gt t),
pr ← returnopt (mk_nat_val_lt_proof b a),
exact pr)
<|>
(do (a, b) ← returnopt (is_le t),
pr ← returnopt (mk_nat_val_le_proof a b),
exact pr)
<|>
(do (a, b) ← returnopt (is_ge t),
pr ← returnopt (mk_nat_val_le_proof b a),
exact pr))
<|>
(do is_def_eq type (const `char []),
(a, b) ← returnopt (is_ne t),
pr ← returnopt (mk_char_val_ne_proof a b),
exact pr)
<|>
(do is_def_eq type (const `string []),
(a, b) ← returnopt (is_ne t),
pr ← returnopt (mk_string_val_ne_proof a b),
exact pr)
<|>
(do is_def_eq type (const `int []),
(a, b) ← returnopt (is_ne t),
pr ← returnopt (mk_int_val_ne_proof a b),
exact pr)
<|>
(do type ← whnf type,
guard (is_napp_of type `fin 1),
(a, b) ← returnopt (is_ne t),
pr ← returnopt (mk_fin_val_ne_proof a b),
exact pr)
<|>
(do (a, b) ← returnopt (is_eq t),
unify a b, to_expr `(eq.refl %%a) >>= exact)
end tactic
|
57010c03a079585283a1956b1aec6a2ec03e96ed
|
d9d511f37a523cd7659d6f573f990e2a0af93c6f
|
/src/measure_theory/function/l1_space.lean
|
c239bec2e7f4a99c676b82286b43e15b480a9777
|
[
"Apache-2.0"
] |
permissive
|
hikari0108/mathlib
|
b7ea2b7350497ab1a0b87a09d093ecc025a50dfa
|
a9e7d333b0cfd45f13a20f7b96b7d52e19fa2901
|
refs/heads/master
| 1,690,483,608,260
| 1,631,541,580,000
| 1,631,541,580,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 38,732
|
lean
|
/-
Copyright (c) 2019 Zhouhang Zhou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Zhouhang Zhou
-/
import measure_theory.function.lp_space
/-!
# Integrable functions and `L¹` space
In the first part of this file, the predicate `integrable` is defined and basic properties of
integrable functions are proved.
Such a predicate is already available under the name `mem_ℒp 1`. We give a direct definition which
is easier to use, and show that it is equivalent to `mem_ℒp 1`
In the second part, we establish an API between `integrable` and the space `L¹` of equivalence
classes of integrable functions, already defined as a special case of `L^p` spaces for `p = 1`.
## Notation
* `α →₁[μ] β` is the type of `L¹` space, where `α` is a `measure_space` and `β` is a `normed_group`
with a `second_countable_topology`. `f : α →ₘ β` is a "function" in `L¹`. In comments, `[f]` is
also used to denote an `L¹` function.
`₁` can be typed as `\1`.
## Main definitions
* Let `f : α → β` be a function, where `α` is a `measure_space` and `β` a `normed_group`.
Then `has_finite_integral f` means `(∫⁻ a, nnnorm (f a)) < ∞`.
* If `β` is moreover a `measurable_space` then `f` is called `integrable` if
`f` is `measurable` and `has_finite_integral f` holds.
## Implementation notes
To prove something for an arbitrary integrable function, a useful theorem is
`integrable.induction` in the file `set_integral`.
## Tags
integrable, function space, l1
-/
noncomputable theory
open_locale classical topological_space big_operators ennreal measure_theory nnreal
open set filter topological_space ennreal emetric measure_theory
variables {α β γ δ : Type*} {m : measurable_space α} {μ ν : measure α}
variables [normed_group β]
variables [normed_group γ]
namespace measure_theory
/-! ### Some results about the Lebesgue integral involving a normed group -/
lemma lintegral_nnnorm_eq_lintegral_edist (f : α → β) :
∫⁻ a, nnnorm (f a) ∂μ = ∫⁻ a, edist (f a) 0 ∂μ :=
by simp only [edist_eq_coe_nnnorm]
lemma lintegral_norm_eq_lintegral_edist (f : α → β) :
∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ = ∫⁻ a, edist (f a) 0 ∂μ :=
by simp only [of_real_norm_eq_coe_nnnorm, edist_eq_coe_nnnorm]
lemma lintegral_edist_triangle [second_countable_topology β] [measurable_space β]
[opens_measurable_space β] {f g h : α → β}
(hf : ae_measurable f μ) (hg : ae_measurable g μ) (hh : ae_measurable h μ) :
∫⁻ a, edist (f a) (g a) ∂μ ≤ ∫⁻ a, edist (f a) (h a) ∂μ + ∫⁻ a, edist (g a) (h a) ∂μ :=
begin
rw ← lintegral_add' (hf.edist hh) (hg.edist hh),
refine lintegral_mono (λ a, _),
apply edist_triangle_right
end
lemma lintegral_nnnorm_zero : ∫⁻ a : α, nnnorm (0 : β) ∂μ = 0 := by simp
lemma lintegral_nnnorm_add [measurable_space β] [opens_measurable_space β]
[measurable_space γ] [opens_measurable_space γ]
{f : α → β} {g : α → γ} (hf : ae_measurable f μ) (hg : ae_measurable g μ) :
∫⁻ a, nnnorm (f a) + nnnorm (g a) ∂μ = ∫⁻ a, nnnorm (f a) ∂μ + ∫⁻ a, nnnorm (g a) ∂μ :=
lintegral_add' hf.ennnorm hg.ennnorm
lemma lintegral_nnnorm_neg {f : α → β} :
∫⁻ a, nnnorm ((-f) a) ∂μ = ∫⁻ a, nnnorm (f a) ∂μ :=
by simp only [pi.neg_apply, nnnorm_neg]
/-! ### The predicate `has_finite_integral` -/
/-- `has_finite_integral f μ` means that the integral `∫⁻ a, ∥f a∥ ∂μ` is finite.
`has_finite_integral f` means `has_finite_integral f volume`. -/
def has_finite_integral {m : measurable_space α} (f : α → β) (μ : measure α . volume_tac) : Prop :=
∫⁻ a, nnnorm (f a) ∂μ < ∞
lemma has_finite_integral_iff_norm (f : α → β) :
has_finite_integral f μ ↔ ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ < ∞ :=
by simp only [has_finite_integral, of_real_norm_eq_coe_nnnorm]
lemma has_finite_integral_iff_edist (f : α → β) :
has_finite_integral f μ ↔ ∫⁻ a, edist (f a) 0 ∂μ < ∞ :=
by simp only [has_finite_integral_iff_norm, edist_dist, dist_zero_right]
lemma has_finite_integral_iff_of_real {f : α → ℝ} (h : 0 ≤ᵐ[μ] f) :
has_finite_integral f μ ↔ ∫⁻ a, ennreal.of_real (f a) ∂μ < ∞ :=
have lintegral_eq : ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ = ∫⁻ a, ennreal.of_real (f a) ∂μ :=
begin
refine lintegral_congr_ae (h.mono $ λ a h, _),
rwa [real.norm_eq_abs, abs_of_nonneg]
end,
by rw [has_finite_integral_iff_norm, lintegral_eq]
lemma has_finite_integral_iff_of_nnreal {f : α → ℝ≥0} :
has_finite_integral (λ x, (f x : ℝ)) μ ↔ ∫⁻ a, f a ∂μ < ∞ :=
by simp [has_finite_integral_iff_norm]
lemma has_finite_integral.mono {f : α → β} {g : α → γ} (hg : has_finite_integral g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ ∥g a∥) : has_finite_integral f μ :=
begin
simp only [has_finite_integral_iff_norm] at *,
calc ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ ≤ ∫⁻ (a : α), (ennreal.of_real ∥g a∥) ∂μ :
lintegral_mono_ae (h.mono $ assume a h, of_real_le_of_real h)
... < ∞ : hg
end
lemma has_finite_integral.mono' {f : α → β} {g : α → ℝ} (hg : has_finite_integral g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ g a) : has_finite_integral f μ :=
hg.mono $ h.mono $ λ x hx, le_trans hx (le_abs_self _)
lemma has_finite_integral.congr' {f : α → β} {g : α → γ} (hf : has_finite_integral f μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
has_finite_integral g μ :=
hf.mono $ eventually_eq.le $ eventually_eq.symm h
lemma has_finite_integral_congr' {f : α → β} {g : α → γ} (h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) :
has_finite_integral f μ ↔ has_finite_integral g μ :=
⟨λ hf, hf.congr' h, λ hg, hg.congr' $ eventually_eq.symm h⟩
lemma has_finite_integral.congr {f g : α → β} (hf : has_finite_integral f μ) (h : f =ᵐ[μ] g) :
has_finite_integral g μ :=
hf.congr' $ h.fun_comp norm
lemma has_finite_integral_congr {f g : α → β} (h : f =ᵐ[μ] g) :
has_finite_integral f μ ↔ has_finite_integral g μ :=
has_finite_integral_congr' $ h.fun_comp norm
lemma has_finite_integral_const_iff {c : β} :
has_finite_integral (λ x : α, c) μ ↔ c = 0 ∨ μ univ < ∞ :=
begin
simp only [has_finite_integral, lintegral_const],
by_cases hc : c = 0,
{ simp [hc] },
{ simp only [hc, false_or],
refine ⟨λ h, _, λ h, mul_lt_top coe_lt_top h⟩,
replace h := mul_lt_top (@coe_lt_top $ (nnnorm c)⁻¹) h,
rwa [← mul_assoc, ← coe_mul, _root_.inv_mul_cancel, coe_one, one_mul] at h,
rwa [ne.def, nnnorm_eq_zero] }
end
lemma has_finite_integral_const [is_finite_measure μ] (c : β) :
has_finite_integral (λ x : α, c) μ :=
has_finite_integral_const_iff.2 (or.inr $ measure_lt_top _ _)
lemma has_finite_integral_of_bounded [is_finite_measure μ] {f : α → β} {C : ℝ}
(hC : ∀ᵐ a ∂μ, ∥f a∥ ≤ C) : has_finite_integral f μ :=
(has_finite_integral_const C).mono' hC
lemma has_finite_integral.mono_measure {f : α → β} (h : has_finite_integral f ν) (hμ : μ ≤ ν) :
has_finite_integral f μ :=
lt_of_le_of_lt (lintegral_mono' hμ (le_refl _)) h
lemma has_finite_integral.add_measure {f : α → β} (hμ : has_finite_integral f μ)
(hν : has_finite_integral f ν) : has_finite_integral f (μ + ν) :=
begin
simp only [has_finite_integral, lintegral_add_measure] at *,
exact add_lt_top.2 ⟨hμ, hν⟩
end
lemma has_finite_integral.left_of_add_measure {f : α → β} (h : has_finite_integral f (μ + ν)) :
has_finite_integral f μ :=
h.mono_measure $ measure.le_add_right $ le_refl _
lemma has_finite_integral.right_of_add_measure {f : α → β} (h : has_finite_integral f (μ + ν)) :
has_finite_integral f ν :=
h.mono_measure $ measure.le_add_left $ le_refl _
@[simp] lemma has_finite_integral_add_measure {f : α → β} :
has_finite_integral f (μ + ν) ↔ has_finite_integral f μ ∧ has_finite_integral f ν :=
⟨λ h, ⟨h.left_of_add_measure, h.right_of_add_measure⟩, λ h, h.1.add_measure h.2⟩
lemma has_finite_integral.smul_measure {f : α → β} (h : has_finite_integral f μ) {c : ℝ≥0∞}
(hc : c < ∞) : has_finite_integral f (c • μ) :=
begin
simp only [has_finite_integral, lintegral_smul_measure] at *,
exact mul_lt_top hc h
end
@[simp] lemma has_finite_integral_zero_measure {m : measurable_space α} (f : α → β) :
has_finite_integral f (0 : measure α) :=
by simp only [has_finite_integral, lintegral_zero_measure, with_top.zero_lt_top]
variables (α β μ)
@[simp] lemma has_finite_integral_zero : has_finite_integral (λa:α, (0:β)) μ :=
by simp [has_finite_integral]
variables {α β μ}
lemma has_finite_integral.neg {f : α → β} (hfi : has_finite_integral f μ) :
has_finite_integral (-f) μ :=
by simpa [has_finite_integral] using hfi
@[simp] lemma has_finite_integral_neg_iff {f : α → β} :
has_finite_integral (-f) μ ↔ has_finite_integral f μ :=
⟨λ h, neg_neg f ▸ h.neg, has_finite_integral.neg⟩
lemma has_finite_integral.norm {f : α → β} (hfi : has_finite_integral f μ) :
has_finite_integral (λa, ∥f a∥) μ :=
have eq : (λa, (nnnorm ∥f a∥ : ℝ≥0∞)) = λa, (nnnorm (f a) : ℝ≥0∞),
by { funext, rw nnnorm_norm },
by { rwa [has_finite_integral, eq] }
lemma has_finite_integral_norm_iff (f : α → β) :
has_finite_integral (λa, ∥f a∥) μ ↔ has_finite_integral f μ :=
has_finite_integral_congr' $ eventually_of_forall $ λ x, norm_norm (f x)
section dominated_convergence
variables {F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
lemma all_ae_of_real_F_le_bound (h : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a) :
∀ n, ∀ᵐ a ∂μ, ennreal.of_real ∥F n a∥ ≤ ennreal.of_real (bound a) :=
λn, (h n).mono $ λ a h, ennreal.of_real_le_of_real h
lemma all_ae_tendsto_of_real_norm (h : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top $ 𝓝 $ f a) :
∀ᵐ a ∂μ, tendsto (λn, ennreal.of_real ∥F n a∥) at_top $ 𝓝 $ ennreal.of_real ∥f a∥ :=
h.mono $
λ a h, tendsto_of_real $ tendsto.comp (continuous.tendsto continuous_norm _) h
lemma all_ae_of_real_f_le_bound (h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
∀ᵐ a ∂μ, ennreal.of_real ∥f a∥ ≤ ennreal.of_real (bound a) :=
begin
have F_le_bound := all_ae_of_real_F_le_bound h_bound,
rw ← ae_all_iff at F_le_bound,
apply F_le_bound.mp ((all_ae_tendsto_of_real_norm h_lim).mono _),
assume a tendsto_norm F_le_bound,
exact le_of_tendsto' tendsto_norm (F_le_bound)
end
lemma has_finite_integral_of_dominated_convergence {F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
(bound_has_finite_integral : has_finite_integral bound μ)
(h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
has_finite_integral f μ :=
/- `∥F n a∥ ≤ bound a` and `∥F n a∥ --> ∥f a∥` implies `∥f a∥ ≤ bound a`,
and so `∫ ∥f∥ ≤ ∫ bound < ∞` since `bound` is has_finite_integral -/
begin
rw has_finite_integral_iff_norm,
calc ∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ ≤ ∫⁻ a, ennreal.of_real (bound a) ∂μ :
lintegral_mono_ae $ all_ae_of_real_f_le_bound h_bound h_lim
... < ∞ :
begin
rw ← has_finite_integral_iff_of_real,
{ exact bound_has_finite_integral },
exact (h_bound 0).mono (λ a h, le_trans (norm_nonneg _) h)
end
end
lemma tendsto_lintegral_norm_of_dominated_convergence [measurable_space β]
[borel_space β] [second_countable_topology β]
{F : ℕ → α → β} {f : α → β} {bound : α → ℝ}
(F_measurable : ∀ n, ae_measurable (F n) μ)
(f_measurable : ae_measurable f μ)
(bound_has_finite_integral : has_finite_integral bound μ)
(h_bound : ∀ n, ∀ᵐ a ∂μ, ∥F n a∥ ≤ bound a)
(h_lim : ∀ᵐ a ∂μ, tendsto (λ n, F n a) at_top (𝓝 (f a))) :
tendsto (λn, ∫⁻ a, (ennreal.of_real ∥F n a - f a∥) ∂μ) at_top (𝓝 0) :=
let b := λa, 2 * ennreal.of_real (bound a) in
/- `∥F n a∥ ≤ bound a` and `F n a --> f a` implies `∥f a∥ ≤ bound a`, and thus by the
triangle inequality, have `∥F n a - f a∥ ≤ 2 * (bound a). -/
have hb : ∀ n, ∀ᵐ a ∂μ, ennreal.of_real ∥F n a - f a∥ ≤ b a,
begin
assume n,
filter_upwards [all_ae_of_real_F_le_bound h_bound n, all_ae_of_real_f_le_bound h_bound h_lim],
assume a h₁ h₂,
calc ennreal.of_real ∥F n a - f a∥ ≤ (ennreal.of_real ∥F n a∥) + (ennreal.of_real ∥f a∥) :
begin
rw [← ennreal.of_real_add],
apply of_real_le_of_real,
{ apply norm_sub_le }, { exact norm_nonneg _ }, { exact norm_nonneg _ }
end
... ≤ (ennreal.of_real (bound a)) + (ennreal.of_real (bound a)) : add_le_add h₁ h₂
... = b a : by rw ← two_mul
end,
/- On the other hand, `F n a --> f a` implies that `∥F n a - f a∥ --> 0` -/
have h : ∀ᵐ a ∂μ, tendsto (λ n, ennreal.of_real ∥F n a - f a∥) at_top (𝓝 0),
begin
rw ← ennreal.of_real_zero,
refine h_lim.mono (λ a h, (continuous_of_real.tendsto _).comp _),
rwa ← tendsto_iff_norm_tendsto_zero
end,
/- Therefore, by the dominated convergence theorem for nonnegative integration, have
` ∫ ∥f a - F n a∥ --> 0 ` -/
begin
suffices h : tendsto (λn, ∫⁻ a, (ennreal.of_real ∥F n a - f a∥) ∂μ) at_top (𝓝 (∫⁻ (a:α), 0 ∂μ)),
{ rwa lintegral_zero at h },
-- Using the dominated convergence theorem.
refine tendsto_lintegral_of_dominated_convergence' _ _ hb _ _,
-- Show `λa, ∥f a - F n a∥` is almost everywhere measurable for all `n`
{ exact λn, measurable_of_real.comp_ae_measurable ((F_measurable n).sub f_measurable).norm },
-- Show `2 * bound` is has_finite_integral
{ rw has_finite_integral_iff_of_real at bound_has_finite_integral,
{ calc ∫⁻ a, b a ∂μ = 2 * ∫⁻ a, ennreal.of_real (bound a) ∂μ :
by { rw lintegral_const_mul', exact coe_ne_top }
... < ∞ : mul_lt_top (coe_lt_top) bound_has_finite_integral },
filter_upwards [h_bound 0] λ a h, le_trans (norm_nonneg _) h },
-- Show `∥f a - F n a∥ --> 0`
{ exact h }
end
end dominated_convergence
section pos_part
/-! Lemmas used for defining the positive part of a `L¹` function -/
lemma has_finite_integral.max_zero {f : α → ℝ} (hf : has_finite_integral f μ) :
has_finite_integral (λa, max (f a) 0) μ :=
hf.mono $ eventually_of_forall $ λ x, by simp [real.norm_eq_abs, abs_le, abs_nonneg, le_abs_self]
lemma has_finite_integral.min_zero {f : α → ℝ} (hf : has_finite_integral f μ) :
has_finite_integral (λa, min (f a) 0) μ :=
hf.mono $ eventually_of_forall $ λ x,
by simp [real.norm_eq_abs, abs_le, abs_nonneg, neg_le, neg_le_abs_self]
end pos_part
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β]
lemma has_finite_integral.smul (c : 𝕜) {f : α → β} : has_finite_integral f μ →
has_finite_integral (c • f) μ :=
begin
simp only [has_finite_integral], assume hfi,
calc
∫⁻ (a : α), nnnorm (c • f a) ∂μ = ∫⁻ (a : α), (nnnorm c) * nnnorm (f a) ∂μ :
by simp only [nnnorm_smul, ennreal.coe_mul]
... < ∞ :
begin
rw lintegral_const_mul',
exacts [mul_lt_top coe_lt_top hfi, coe_ne_top]
end
end
lemma has_finite_integral_smul_iff {c : 𝕜} (hc : c ≠ 0) (f : α → β) :
has_finite_integral (c • f) μ ↔ has_finite_integral f μ :=
begin
split,
{ assume h,
simpa only [smul_smul, inv_mul_cancel hc, one_smul] using h.smul c⁻¹ },
exact has_finite_integral.smul _
end
lemma has_finite_integral.const_mul {f : α → ℝ} (h : has_finite_integral f μ) (c : ℝ) :
has_finite_integral (λ x, c * f x) μ :=
(has_finite_integral.smul c h : _)
lemma has_finite_integral.mul_const {f : α → ℝ} (h : has_finite_integral f μ) (c : ℝ) :
has_finite_integral (λ x, f x * c) μ :=
by simp_rw [mul_comm, h.const_mul _]
end normed_space
/-! ### The predicate `integrable` -/
variables [measurable_space β] [measurable_space γ] [measurable_space δ]
/-- `integrable f μ` means that `f` is measurable and that the integral `∫⁻ a, ∥f a∥ ∂μ` is finite.
`integrable f` means `integrable f volume`. -/
def integrable {α} {m : measurable_space α} (f : α → β) (μ : measure α . volume_tac) : Prop :=
ae_measurable f μ ∧ has_finite_integral f μ
lemma integrable.ae_measurable {f : α → β} (hf : integrable f μ) : ae_measurable f μ := hf.1
lemma integrable.has_finite_integral {f : α → β} (hf : integrable f μ) : has_finite_integral f μ :=
hf.2
lemma integrable.mono {f : α → β} {g : α → γ} (hg : integrable g μ) (hf : ae_measurable f μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ ∥g a∥) : integrable f μ :=
⟨hf, hg.has_finite_integral.mono h⟩
lemma integrable.mono' {f : α → β} {g : α → ℝ} (hg : integrable g μ) (hf : ae_measurable f μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ ≤ g a) : integrable f μ :=
⟨hf, hg.has_finite_integral.mono' h⟩
lemma integrable.congr' {f : α → β} {g : α → γ} (hf : integrable f μ) (hg : ae_measurable g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) : integrable g μ :=
⟨hg, hf.has_finite_integral.congr' h⟩
lemma integrable_congr' {f : α → β} {g : α → γ} (hf : ae_measurable f μ) (hg : ae_measurable g μ)
(h : ∀ᵐ a ∂μ, ∥f a∥ = ∥g a∥) : integrable f μ ↔ integrable g μ :=
⟨λ h2f, h2f.congr' hg h, λ h2g, h2g.congr' hf $ eventually_eq.symm h⟩
lemma integrable.congr {f g : α → β} (hf : integrable f μ) (h : f =ᵐ[μ] g) :
integrable g μ :=
⟨hf.1.congr h, hf.2.congr h⟩
lemma integrable_congr {f g : α → β} (h : f =ᵐ[μ] g) :
integrable f μ ↔ integrable g μ :=
⟨λ hf, hf.congr h, λ hg, hg.congr h.symm⟩
lemma integrable_const_iff {c : β} : integrable (λ x : α, c) μ ↔ c = 0 ∨ μ univ < ∞ :=
begin
have : ae_measurable (λ (x : α), c) μ := measurable_const.ae_measurable,
rw [integrable, and_iff_right this, has_finite_integral_const_iff]
end
lemma integrable_const [is_finite_measure μ] (c : β) : integrable (λ x : α, c) μ :=
integrable_const_iff.2 $ or.inr $ measure_lt_top _ _
lemma integrable.mono_measure {f : α → β} (h : integrable f ν) (hμ : μ ≤ ν) : integrable f μ :=
⟨h.ae_measurable.mono_measure hμ, h.has_finite_integral.mono_measure hμ⟩
lemma integrable.add_measure {f : α → β} (hμ : integrable f μ) (hν : integrable f ν) :
integrable f (μ + ν) :=
⟨hμ.ae_measurable.add_measure hν.ae_measurable,
hμ.has_finite_integral.add_measure hν.has_finite_integral⟩
lemma integrable.left_of_add_measure {f : α → β} (h : integrable f (μ + ν)) : integrable f μ :=
h.mono_measure $ measure.le_add_right $ le_refl _
lemma integrable.right_of_add_measure {f : α → β} (h : integrable f (μ + ν)) : integrable f ν :=
h.mono_measure $ measure.le_add_left $ le_refl _
@[simp] lemma integrable_add_measure {f : α → β} :
integrable f (μ + ν) ↔ integrable f μ ∧ integrable f ν :=
⟨λ h, ⟨h.left_of_add_measure, h.right_of_add_measure⟩, λ h, h.1.add_measure h.2⟩
lemma integrable.smul_measure {f : α → β} (h : integrable f μ) {c : ℝ≥0∞} (hc : c < ∞) :
integrable f (c • μ) :=
⟨h.ae_measurable.smul_measure c, h.has_finite_integral.smul_measure hc⟩
lemma integrable_map_measure [opens_measurable_space β] {f : α → δ} {g : δ → β}
(hg : ae_measurable g (measure.map f μ)) (hf : measurable f) :
integrable g (measure.map f μ) ↔ integrable (g ∘ f) μ :=
by simp [integrable, hg, hg.comp_measurable hf, has_finite_integral, lintegral_map' hg.ennnorm hf]
lemma lintegral_edist_lt_top [second_countable_topology β] [opens_measurable_space β] {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) :
∫⁻ a, edist (f a) (g a) ∂μ < ∞ :=
lt_of_le_of_lt
(lintegral_edist_triangle hf.ae_measurable hg.ae_measurable
(measurable_const.ae_measurable : ae_measurable (λa, (0 : β)) μ))
(ennreal.add_lt_top.2 $ by { simp_rw ← has_finite_integral_iff_edist,
exact ⟨hf.has_finite_integral, hg.has_finite_integral⟩ })
variables (α β μ)
@[simp] lemma integrable_zero : integrable (λ _, (0 : β)) μ :=
by simp [integrable, measurable_const.ae_measurable]
variables {α β μ}
lemma integrable.add' [opens_measurable_space β] {f g : α → β} (hf : integrable f μ)
(hg : integrable g μ) :
has_finite_integral (f + g) μ :=
calc ∫⁻ a, nnnorm (f a + g a) ∂μ ≤ ∫⁻ a, nnnorm (f a) + nnnorm (g a) ∂μ :
lintegral_mono (λ a, by exact_mod_cast nnnorm_add_le _ _)
... = _ : lintegral_nnnorm_add hf.ae_measurable hg.ae_measurable
... < ∞ : add_lt_top.2 ⟨hf.has_finite_integral, hg.has_finite_integral⟩
lemma integrable.add [borel_space β] [second_countable_topology β]
{f g : α → β} (hf : integrable f μ) (hg : integrable g μ) : integrable (f + g) μ :=
⟨hf.ae_measurable.add hg.ae_measurable, hf.add' hg⟩
lemma integrable_finset_sum {ι} [borel_space β] [second_countable_topology β] (s : finset ι)
{f : ι → α → β} (hf : ∀ i, integrable (f i) μ) : integrable (λ a, ∑ i in s, f i a) μ :=
begin
refine finset.induction_on s _ _,
{ simp only [finset.sum_empty, integrable_zero] },
{ assume i s his ih, simp only [his, finset.sum_insert, not_false_iff],
exact (hf _).add ih }
end
lemma integrable.neg [borel_space β] {f : α → β} (hf : integrable f μ) : integrable (-f) μ :=
⟨hf.ae_measurable.neg, hf.has_finite_integral.neg⟩
@[simp] lemma integrable_neg_iff [borel_space β] {f : α → β} :
integrable (-f) μ ↔ integrable f μ :=
⟨λ h, neg_neg f ▸ h.neg, integrable.neg⟩
lemma integrable.sub' [opens_measurable_space β] {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) : has_finite_integral (f - g) μ :=
calc ∫⁻ a, nnnorm (f a - g a) ∂μ ≤ ∫⁻ a, nnnorm (f a) + nnnorm (-g a) ∂μ :
lintegral_mono (assume a, by { simp only [sub_eq_add_neg], exact_mod_cast nnnorm_add_le _ _ } )
... = _ :
by { simp only [nnnorm_neg], exact lintegral_nnnorm_add hf.ae_measurable hg.ae_measurable }
... < ∞ : add_lt_top.2 ⟨hf.has_finite_integral, hg.has_finite_integral⟩
lemma integrable.sub [borel_space β] [second_countable_topology β] {f g : α → β}
(hf : integrable f μ) (hg : integrable g μ) : integrable (f - g) μ :=
by simpa only [sub_eq_add_neg] using hf.add hg.neg
lemma integrable.norm [opens_measurable_space β] {f : α → β} (hf : integrable f μ) :
integrable (λa, ∥f a∥) μ :=
⟨hf.ae_measurable.norm, hf.has_finite_integral.norm⟩
lemma integrable_norm_iff [opens_measurable_space β] {f : α → β} (hf : ae_measurable f μ) :
integrable (λa, ∥f a∥) μ ↔ integrable f μ :=
by simp_rw [integrable, and_iff_right hf, and_iff_right hf.norm, has_finite_integral_norm_iff]
lemma integrable_of_norm_sub_le [opens_measurable_space β] {f₀ f₁ : α → β} {g : α → ℝ}
(hf₁_m : ae_measurable f₁ μ)
(hf₀_i : integrable f₀ μ)
(hg_i : integrable g μ)
(h : ∀ᵐ a ∂μ, ∥f₀ a - f₁ a∥ ≤ g a) :
integrable f₁ μ :=
begin
have : ∀ᵐ a ∂μ, ∥f₁ a∥ ≤ ∥f₀ a∥ + g a,
{ apply h.mono,
intros a ha,
calc ∥f₁ a∥ ≤ ∥f₀ a∥ + ∥f₀ a - f₁ a∥ : norm_le_insert _ _
... ≤ ∥f₀ a∥ + g a : add_le_add_left ha _ },
exact integrable.mono' (hf₀_i.norm.add hg_i) hf₁_m this
end
lemma integrable.prod_mk [opens_measurable_space β] [opens_measurable_space γ] {f : α → β}
{g : α → γ} (hf : integrable f μ) (hg : integrable g μ) :
integrable (λ x, (f x, g x)) μ :=
⟨hf.ae_measurable.prod_mk hg.ae_measurable,
(hf.norm.add' hg.norm).mono $ eventually_of_forall $ λ x,
calc max ∥f x∥ ∥g x∥ ≤ ∥f x∥ + ∥g x∥ : max_le_add_of_nonneg (norm_nonneg _) (norm_nonneg _)
... ≤ ∥(∥f x∥ + ∥g x∥)∥ : le_abs_self _⟩
lemma mem_ℒp_one_iff_integrable {f : α → β} : mem_ℒp f 1 μ ↔ integrable f μ :=
by simp_rw [integrable, has_finite_integral, mem_ℒp, snorm_one_eq_lintegral_nnnorm]
lemma mem_ℒp.integrable [borel_space β] {q : ℝ≥0∞} (hq1 : 1 ≤ q) {f : α → β} [is_finite_measure μ]
(hfq : mem_ℒp f q μ) : integrable f μ :=
mem_ℒp_one_iff_integrable.mp (hfq.mem_ℒp_of_exponent_le hq1)
lemma lipschitz_with.integrable_comp_iff_of_antilipschitz [complete_space β] [borel_space β]
[borel_space γ] {K K'} {f : α → β} {g : β → γ} (hg : lipschitz_with K g)
(hg' : antilipschitz_with K' g) (g0 : g 0 = 0) :
integrable (g ∘ f) μ ↔ integrable f μ :=
by simp [← mem_ℒp_one_iff_integrable, hg.mem_ℒp_comp_iff_of_antilipschitz hg' g0]
lemma integrable.real_to_nnreal {f : α → ℝ} (hf : integrable f μ) :
integrable (λ x, ((f x).to_nnreal : ℝ)) μ :=
begin
refine ⟨hf.ae_measurable.real_to_nnreal.coe_nnreal_real, _⟩,
rw has_finite_integral_iff_norm,
refine lt_of_le_of_lt _ ((has_finite_integral_iff_norm _).1 hf.has_finite_integral),
apply lintegral_mono,
assume x,
simp [real.norm_eq_abs, ennreal.of_real_le_of_real, abs_le, abs_nonneg, le_abs_self],
end
section pos_part
/-! ### Lemmas used for defining the positive part of a `L¹` function -/
lemma integrable.max_zero {f : α → ℝ} (hf : integrable f μ) : integrable (λa, max (f a) 0) μ :=
⟨hf.ae_measurable.max measurable_const.ae_measurable, hf.has_finite_integral.max_zero⟩
lemma integrable.min_zero {f : α → ℝ} (hf : integrable f μ) : integrable (λa, min (f a) 0) μ :=
⟨hf.ae_measurable.min measurable_const.ae_measurable, hf.has_finite_integral.min_zero⟩
end pos_part
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β] [measurable_space 𝕜]
[opens_measurable_space 𝕜]
lemma integrable.smul [borel_space β] (c : 𝕜) {f : α → β}
(hf : integrable f μ) : integrable (c • f) μ :=
⟨hf.ae_measurable.const_smul c, hf.has_finite_integral.smul c⟩
lemma integrable_smul_iff [borel_space β] {c : 𝕜} (hc : c ≠ 0) (f : α → β) :
integrable (c • f) μ ↔ integrable f μ :=
and_congr (ae_measurable_const_smul_iff' hc) (has_finite_integral_smul_iff hc f)
lemma integrable.const_mul {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (λ x, c * f x) μ :=
integrable.smul c h
lemma integrable.mul_const {f : α → ℝ} (h : integrable f μ) (c : ℝ) :
integrable (λ x, f x * c) μ :=
by simp_rw [mul_comm, h.const_mul _]
end normed_space
section normed_space_over_complete_field
variables {𝕜 : Type*} [nondiscrete_normed_field 𝕜] [complete_space 𝕜] [measurable_space 𝕜]
variables [borel_space 𝕜]
variables {E : Type*} [normed_group E] [normed_space 𝕜 E] [measurable_space E] [borel_space E]
lemma integrable_smul_const {f : α → 𝕜} {c : E} (hc : c ≠ 0) :
integrable (λ x, f x • c) μ ↔ integrable f μ :=
begin
simp_rw [integrable, ae_measurable_smul_const hc, and.congr_right_iff, has_finite_integral,
nnnorm_smul, ennreal.coe_mul],
intro hf, rw [lintegral_mul_const' _ _ ennreal.coe_ne_top, ennreal.mul_lt_top_iff],
have : ∀ x : ℝ≥0∞, x = 0 → x < ∞ := by simp,
simp [hc, or_iff_left_of_imp (this _)]
end
end normed_space_over_complete_field
section is_R_or_C
variables {𝕜 : Type*} [is_R_or_C 𝕜] [measurable_space 𝕜] [opens_measurable_space 𝕜] {f : α → 𝕜}
lemma integrable.re (hf : integrable f μ) : integrable (λ x, is_R_or_C.re (f x)) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.re, }
lemma integrable.im (hf : integrable f μ) : integrable (λ x, is_R_or_C.im (f x)) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.im, }
end is_R_or_C
section inner_product
variables {𝕜 E : Type*} [is_R_or_C 𝕜] [measurable_space 𝕜] [borel_space 𝕜]
[inner_product_space 𝕜 E]
[measurable_space E] [opens_measurable_space E] [second_countable_topology E]
{f : α → E}
local notation `⟪`x`, `y`⟫` := @inner 𝕜 E _ x y
lemma integrable.const_inner (c : E) (hf : integrable f μ) : integrable (λ x, ⟪c, f x⟫) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.const_inner c, }
lemma integrable.inner_const (hf : integrable f μ) (c : E) : integrable (λ x, ⟪f x, c⟫) μ :=
by { rw ← mem_ℒp_one_iff_integrable at hf ⊢, exact hf.inner_const c, }
end inner_product
section trim
variables {H : Type*} [normed_group H] [measurable_space H] [opens_measurable_space H]
{m0 : measurable_space α} {μ' : measure α} {f : α → H}
lemma integrable.trim (hm : m ≤ m0) (hf_int : integrable f μ') (hf : @measurable _ _ m _ f) :
integrable f (μ'.trim hm) :=
begin
refine ⟨measurable.ae_measurable hf, _⟩,
rw [has_finite_integral, lintegral_trim hm _],
{ exact hf_int.2, },
{ exact @measurable.coe_nnreal_ennreal α m _ (@measurable.nnnorm _ α _ _ _ m _ hf), },
end
lemma integrable_of_integrable_trim (hm : m ≤ m0) (hf_int : integrable f (μ'.trim hm)) :
integrable f μ' :=
begin
obtain ⟨hf_meas_ae, hf⟩ := hf_int,
refine ⟨ae_measurable_of_ae_measurable_trim hm hf_meas_ae, _⟩,
rw has_finite_integral at hf ⊢,
rwa lintegral_trim_ae hm _ at hf,
exact @ae_measurable.coe_nnreal_ennreal α m _ _
(@ae_measurable.nnnorm H α _ _ _ m _ _ hf_meas_ae),
end
end trim
section sigma_finite
variables {E : Type*} {m0 : measurable_space α} [normed_group E] [measurable_space E]
[opens_measurable_space E]
lemma integrable_of_forall_fin_meas_le' {μ : measure α} (hm : m ≤ m0)
[sigma_finite (μ.trim hm)] (C : ℝ≥0∞) (hC : C < ∞) {f : α → E} (hf_meas : ae_measurable f μ)
(hf : ∀ s, measurable_set[m] s → μ s ≠ ∞ → ∫⁻ x in s, nnnorm (f x) ∂μ ≤ C) :
integrable f μ :=
⟨hf_meas,
(lintegral_le_of_forall_fin_meas_le' hm C hf_meas.nnnorm.coe_nnreal_ennreal hf).trans_lt hC⟩
lemma integrable_of_forall_fin_meas_le [sigma_finite μ]
(C : ℝ≥0∞) (hC : C < ∞) {f : α → E} (hf_meas : ae_measurable f μ)
(hf : ∀ s : set α, measurable_set s → μ s ≠ ∞ → ∫⁻ x in s, nnnorm (f x) ∂μ ≤ C) :
integrable f μ :=
@integrable_of_forall_fin_meas_le' _ _ _ _ _ _ _ _ le_rfl (by rwa trim_eq_self) C hC _ hf_meas hf
end sigma_finite
/-! ### The predicate `integrable` on measurable functions modulo a.e.-equality -/
namespace ae_eq_fun
section
/-- A class of almost everywhere equal functions is `integrable` if its function representative
is integrable. -/
def integrable (f : α →ₘ[μ] β) : Prop := integrable f μ
lemma integrable_mk {f : α → β} (hf : ae_measurable f μ ) :
(integrable (mk f hf : α →ₘ[μ] β)) ↔ measure_theory.integrable f μ :=
begin
simp [integrable],
apply integrable_congr,
exact coe_fn_mk f hf
end
lemma integrable_coe_fn {f : α →ₘ[μ] β} : (measure_theory.integrable f μ) ↔ integrable f :=
by rw [← integrable_mk, mk_coe_fn]
lemma integrable_zero : integrable (0 : α →ₘ[μ] β) :=
(integrable_zero α β μ).congr (coe_fn_mk _ _).symm
end
section
variables [borel_space β]
lemma integrable.neg {f : α →ₘ[μ] β} : integrable f → integrable (-f) :=
induction_on f $ λ f hfm hfi, (integrable_mk _).2 ((integrable_mk hfm).1 hfi).neg
section
variable [second_countable_topology β]
lemma integrable_iff_mem_L1 {f : α →ₘ[μ] β} : integrable f ↔ f ∈ (α →₁[μ] β) :=
by rw [← integrable_coe_fn, ← mem_ℒp_one_iff_integrable, Lp.mem_Lp_iff_mem_ℒp]
lemma integrable.add {f g : α →ₘ[μ] β} : integrable f → integrable g → integrable (f + g) :=
begin
refine induction_on₂ f g (λ f hf g hg hfi hgi, _),
simp only [integrable_mk, mk_add_mk] at hfi hgi ⊢,
exact hfi.add hgi
end
lemma integrable.sub {f g : α →ₘ[μ] β} (hf : integrable f) (hg : integrable g) :
integrable (f - g) :=
(sub_eq_add_neg f g).symm ▸ hf.add hg.neg
end
section normed_space
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β] [measurable_space 𝕜]
[opens_measurable_space 𝕜]
lemma integrable.smul {c : 𝕜} {f : α →ₘ[μ] β} : integrable f → integrable (c • f) :=
induction_on f $ λ f hfm hfi, (integrable_mk _).2 $ ((integrable_mk hfm).1 hfi).smul _
end normed_space
end
end ae_eq_fun
namespace L1
variables [second_countable_topology β] [borel_space β]
lemma integrable_coe_fn (f : α →₁[μ] β) :
integrable f μ :=
by { rw ← mem_ℒp_one_iff_integrable, exact Lp.mem_ℒp f }
lemma has_finite_integral_coe_fn (f : α →₁[μ] β) :
has_finite_integral f μ :=
(integrable_coe_fn f).has_finite_integral
lemma measurable_coe_fn (f : α →₁[μ] β) :
measurable f := Lp.measurable f
lemma ae_measurable_coe_fn (f : α →₁[μ] β) :
ae_measurable f μ := Lp.ae_measurable f
lemma edist_def (f g : α →₁[μ] β) :
edist f g = ∫⁻ a, edist (f a) (g a) ∂μ :=
by { simp [Lp.edist_def, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
lemma dist_def (f g : α →₁[μ] β) :
dist f g = (∫⁻ a, edist (f a) (g a) ∂μ).to_real :=
by { simp [Lp.dist_def, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
lemma norm_def (f : α →₁[μ] β) :
∥f∥ = (∫⁻ a, nnnorm (f a) ∂μ).to_real :=
by { simp [Lp.norm_def, snorm, snorm'] }
/-- Computing the norm of a difference between two L¹-functions. Note that this is not a
special case of `norm_def` since `(f - g) x` and `f x - g x` are not equal
(but only a.e.-equal). -/
lemma norm_sub_eq_lintegral (f g : α →₁[μ] β) :
∥f - g∥ = (∫⁻ x, (nnnorm (f x - g x) : ℝ≥0∞) ∂μ).to_real :=
begin
rw [norm_def],
congr' 1,
rw lintegral_congr_ae,
filter_upwards [Lp.coe_fn_sub f g],
assume a ha,
simp only [ha, pi.sub_apply],
end
lemma of_real_norm_eq_lintegral (f : α →₁[μ] β) :
ennreal.of_real ∥f∥ = ∫⁻ x, (nnnorm (f x) : ℝ≥0∞) ∂μ :=
by { rw [norm_def, ennreal.of_real_to_real], rw [← ennreal.lt_top_iff_ne_top],
exact has_finite_integral_coe_fn f }
/-- Computing the norm of a difference between two L¹-functions. Note that this is not a
special case of `of_real_norm_eq_lintegral` since `(f - g) x` and `f x - g x` are not equal
(but only a.e.-equal). -/
lemma of_real_norm_sub_eq_lintegral (f g : α →₁[μ] β) :
ennreal.of_real ∥f - g∥ = ∫⁻ x, (nnnorm (f x - g x) : ℝ≥0∞) ∂μ :=
begin
simp_rw [of_real_norm_eq_lintegral, ← edist_eq_coe_nnnorm],
apply lintegral_congr_ae,
filter_upwards [Lp.coe_fn_sub f g],
assume a ha,
simp only [ha, pi.sub_apply],
end
end L1
namespace integrable
variables [second_countable_topology β] [borel_space β]
/-- Construct the equivalence class `[f]` of an integrable function `f`, as a member of the
space `L1 β 1 μ`. -/
def to_L1 (f : α → β) (hf : integrable f μ) : α →₁[μ] β :=
(mem_ℒp_one_iff_integrable.2 hf).to_Lp f
@[simp] lemma to_L1_coe_fn (f : α →₁[μ] β) (hf : integrable f μ) : hf.to_L1 f = f :=
by simp [integrable.to_L1]
lemma coe_fn_to_L1 {f : α → β} (hf : integrable f μ) : hf.to_L1 f =ᵐ[μ] f :=
ae_eq_fun.coe_fn_mk _ _
@[simp] lemma to_L1_zero (h : integrable (0 : α → β) μ) : h.to_L1 0 = 0 := rfl
@[simp] lemma to_L1_eq_mk (f : α → β) (hf : integrable f μ) :
(hf.to_L1 f : α →ₘ[μ] β) = ae_eq_fun.mk f hf.ae_measurable :=
rfl
@[simp] lemma to_L1_eq_to_L1_iff (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 f hf = to_L1 g hg ↔ f =ᵐ[μ] g :=
mem_ℒp.to_Lp_eq_to_Lp_iff _ _
lemma to_L1_add (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 (f + g) (hf.add hg) = to_L1 f hf + to_L1 g hg := rfl
lemma to_L1_neg (f : α → β) (hf : integrable f μ) :
to_L1 (- f) (integrable.neg hf) = - to_L1 f hf := rfl
lemma to_L1_sub (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
to_L1 (f - g) (hf.sub hg) = to_L1 f hf - to_L1 g hg := rfl
lemma norm_to_L1 (f : α → β) (hf : integrable f μ) :
∥hf.to_L1 f∥ = ennreal.to_real (∫⁻ a, edist (f a) 0 ∂μ) :=
by { simp [to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm] }
lemma norm_to_L1_eq_lintegral_norm (f : α → β) (hf : integrable f μ) :
∥hf.to_L1 f∥ = ennreal.to_real (∫⁻ a, (ennreal.of_real ∥f a∥) ∂μ) :=
by { rw [norm_to_L1, lintegral_norm_eq_lintegral_edist] }
@[simp] lemma edist_to_L1_to_L1 (f g : α → β) (hf : integrable f μ) (hg : integrable g μ) :
edist (hf.to_L1 f) (hg.to_L1 g) = ∫⁻ a, edist (f a) (g a) ∂μ :=
by { simp [integrable.to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm_sub] }
@[simp] lemma edist_to_L1_zero (f : α → β) (hf : integrable f μ) :
edist (hf.to_L1 f) 0 = ∫⁻ a, edist (f a) 0 ∂μ :=
by { simp [integrable.to_L1, snorm, snorm'], simp [edist_eq_coe_nnnorm] }
variables {𝕜 : Type*} [normed_field 𝕜] [normed_space 𝕜 β] [measurable_space 𝕜]
[opens_measurable_space 𝕜]
lemma to_L1_smul (f : α → β) (hf : integrable f μ) (k : 𝕜) :
to_L1 (λ a, k • f a) (hf.smul k) = k • to_L1 f hf := rfl
lemma to_L1_smul' (f : α → β) (hf : integrable f μ) (k : 𝕜) :
to_L1 (k • f) (hf.smul k) = k • to_L1 f hf := rfl
end integrable
end measure_theory
open measure_theory
lemma integrable_zero_measure {m : measurable_space α} [measurable_space β] {f : α → β} :
integrable f (0 : measure α) :=
begin
apply (integrable_zero _ _ _).congr,
change (0 : measure α) {x | 0 ≠ f x} = 0,
refl,
end
variables {E : Type*} [normed_group E] [measurable_space E] [borel_space E] [normed_space ℝ E]
{H : Type*} [normed_group H] [normed_space ℝ H]
lemma measure_theory.integrable.apply_continuous_linear_map {φ : α → H →L[ℝ] E}
(φ_int : integrable φ μ) (v : H) : integrable (λ a, φ a v) μ :=
(φ_int.norm.mul_const ∥v∥).mono' (φ_int.ae_measurable.apply_continuous_linear_map v)
(eventually_of_forall $ λ a, (φ a).le_op_norm v)
variables {𝕜 : Type*} [is_R_or_C 𝕜]
{G : Type*} [normed_group G] [normed_space 𝕜 G] [measurable_space G] [borel_space G]
{F : Type*} [normed_group F] [normed_space 𝕜 F] [measurable_space F] [opens_measurable_space F]
lemma continuous_linear_map.integrable_comp {φ : α → F} (L : F →L[𝕜] G)
(φ_int : integrable φ μ) : integrable (λ (a : α), L (φ a)) μ :=
((integrable.norm φ_int).const_mul ∥L∥).mono' (L.measurable.comp_ae_measurable φ_int.ae_measurable)
(eventually_of_forall $ λ a, L.le_op_norm (φ a))
|
037adb94094a98c32e3c6f540e6f8f344da523cc
|
624f6f2ae8b3b1adc5f8f67a365c51d5126be45a
|
/stage0/src/Init/Lean/Meta/Tactic.lean
|
e1ff2d49479ed6bc2e6f260827af71e27f826dce
|
[
"Apache-2.0"
] |
permissive
|
mhuisi/lean4
|
28d35a4febc2e251c7f05492e13f3b05d6f9b7af
|
dda44bc47f3e5d024508060dac2bcb59fd12e4c0
|
refs/heads/master
| 1,621,225,489,283
| 1,585,142,689,000
| 1,585,142,689,000
| 250,590,438
| 0
| 2
|
Apache-2.0
| 1,602,443,220,000
| 1,585,327,814,000
|
C
|
UTF-8
|
Lean
| false
| false
| 615
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Lean.Meta.Tactic.Intro
import Init.Lean.Meta.Tactic.Assumption
import Init.Lean.Meta.Tactic.Apply
import Init.Lean.Meta.Tactic.Revert
import Init.Lean.Meta.Tactic.Clear
import Init.Lean.Meta.Tactic.Assert
import Init.Lean.Meta.Tactic.Target
import Init.Lean.Meta.Tactic.Rewrite
import Init.Lean.Meta.Tactic.Generalize
import Init.Lean.Meta.Tactic.LocalDecl
import Init.Lean.Meta.Tactic.Induction
import Init.Lean.Meta.Tactic.Cases
|
0ccab4572104f695ec2367034f3502af2acd2565
|
30b012bb72d640ec30c8fdd4c45fdfa67beb012c
|
/ring_theory/ideal_operations.lean
|
561bf70e90fa9688c919f8dabff338da17b523de
|
[
"Apache-2.0"
] |
permissive
|
kckennylau/mathlib
|
21fb810b701b10d6606d9002a4004f7672262e83
|
47b3477e20ffb5a06588dd3abb01fe0fe3205646
|
refs/heads/master
| 1,634,976,409,281
| 1,542,042,832,000
| 1,542,319,733,000
| 109,560,458
| 0
| 0
|
Apache-2.0
| 1,542,369,208,000
| 1,509,867,494,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 18,358
|
lean
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
More operations on modules and ideals.
-/
import ring_theory.ideals data.nat.choose order.zorn
import linear_algebra.tensor_product
universes u v w x
open lattice
namespace submodule
variables {R : Type u} {M : Type v}
variables [comm_ring R] [add_comm_group M] [module R M]
instance has_scalar' : has_scalar (ideal R) (submodule R M) :=
⟨λ I N, ⨆ r : I, N.map (r.1 • linear_map.id)⟩
def annihilator (N : submodule R M) : ideal R :=
(linear_map.lsmul R N).ker
def colon (N P : submodule R M) : ideal R :=
annihilator (P.map N.mkq)
variables {I J : ideal R} {N N₁ N₂ P P₁ P₂ : submodule R M}
theorem mem_annihilator {r} : r ∈ N.annihilator ↔ ∀ n ∈ N, r • n = (0:M) :=
⟨λ hr n hn, congr_arg subtype.val (linear_map.ext_iff.1 (linear_map.mem_ker.1 hr) ⟨n, hn⟩),
λ h, linear_map.mem_ker.2 $ linear_map.ext $ λ n, subtype.eq $ h n.1 n.2⟩
theorem mem_annihilator' {r} : r ∈ N.annihilator ↔ N ≤ comap (r • linear_map.id) ⊥ :=
mem_annihilator.trans ⟨λ H n hn, mem_bot.2 $ H n hn, λ H n hn, mem_bot.1 $ H hn⟩
theorem annihilator_bot : (⊥ : submodule R M).annihilator = ⊤ :=
(ideal.eq_top_iff_one _).2 $ mem_annihilator'.2 bot_le
theorem annihilator_eq_top_iff : N.annihilator = ⊤ ↔ N = ⊥ :=
⟨λ H, eq_bot_iff.2 $ λ n hn, mem_bot.2 $ one_smul n ▸ mem_annihilator.1 ((ideal.eq_top_iff_one _).1 H) n hn,
λ H, H.symm ▸ annihilator_bot⟩
theorem annihilator_mono (h : N ≤ P) : P.annihilator ≤ N.annihilator :=
λ r hrp, mem_annihilator.2 $ λ n hn, mem_annihilator.1 hrp n $ h hn
theorem annihilator_supr (ι : Type w) (f : ι → submodule R M) :
(annihilator ⨆ i, f i) = ⨅ i, annihilator (f i) :=
le_antisymm (le_infi $ λ i, annihilator_mono $ le_supr _ _)
(λ r H, mem_annihilator'.2 $ supr_le $ λ i,
have _ := (mem_infi _).1 H i, mem_annihilator'.1 this)
theorem mem_colon {r} : r ∈ N.colon P ↔ ∀ p ∈ P, r • p ∈ N :=
mem_annihilator.trans ⟨λ H p hp, (quotient.mk_eq_zero N).1 (H (quotient.mk p) (mem_map_of_mem hp)),
λ H m ⟨p, hp, hpm⟩, hpm ▸ (N.mkq).map_smul r p ▸ (quotient.mk_eq_zero N).2 $ H p hp⟩
theorem mem_colon' {r} : r ∈ N.colon P ↔ P ≤ comap (r • linear_map.id) N :=
mem_colon
theorem colon_mono (hn : N₁ ≤ N₂) (hp : P₁ ≤ P₂) : N₁.colon P₂ ≤ N₂.colon P₁ :=
λ r hrnp, mem_colon.2 $ λ p₁ hp₁, hn $ mem_colon.1 hrnp p₁ $ hp hp₁
theorem infi_colon_supr (ι₁ : Type w) (f : ι₁ → submodule R M)
(ι₂ : Type x) (g : ι₂ → submodule R M) :
(⨅ i, f i).colon (⨆ j, g j) = ⨅ i j, (f i).colon (g j) :=
le_antisymm (le_infi $ λ i, le_infi $ λ j, colon_mono (infi_le _ _) (le_supr _ _))
(λ r H, mem_colon'.2 $ supr_le $ λ j, map_le_iff_le_comap.1 $ le_infi $ λ i,
map_le_iff_le_comap.2 $ mem_colon'.1 $ have _ := ((mem_infi _).1 H i),
have _ := ((mem_infi _).1 this j), this)
theorem smul_mem_smul {r} {n} (hr : r ∈ I) (hn : n ∈ N) : r • n ∈ I • N :=
(le_supr _ ⟨r, hr⟩ : _ ≤ I • N) ⟨n, hn, rfl⟩
theorem smul_le {P : submodule R M} : I • N ≤ P ↔ ∀ (r ∈ I) (n ∈ N), r • n ∈ P :=
⟨λ H r hr n hn, H $ smul_mem_smul hr hn,
λ H, supr_le $ λ r, map_le_iff_le_comap.2 $ λ n hn, H r.1 r.2 n hn⟩
@[elab_as_eliminator]
theorem smul_induction_on {p : M → Prop} {x} (H : x ∈ I • N)
(Hb : ∀ (r ∈ I) (n ∈ N), p (r • n)) (H0 : p 0)
(H1 : ∀ x y, p x → p y → p (x + y))
(H2 : ∀ c n, p n → p (c • n)) : p x :=
(@smul_le _ _ _ _ _ _ _ ⟨p, H0, H1, H2⟩).2 Hb H
theorem smul_le_right : I • N ≤ N :=
smul_le.2 $ λ r hr n, N.smul_mem r
theorem smul_mono (hij : I ≤ J) (hnp : N ≤ P) : I • N ≤ J • P :=
smul_le.2 $ λ r hr n hn, smul_mem_smul (hij hr) (hnp hn)
theorem smul_mono_left (h : I ≤ J) : I • N ≤ J • N :=
smul_mono h (le_refl N)
theorem smul_mono_right (h : N ≤ P) : I • N ≤ I • P :=
smul_mono (le_refl I) h
variables (I J N P)
theorem smul_bot : I • (⊥ : submodule R M) = ⊥ :=
eq_bot_iff.2 $ smul_le.2 $ λ r hri s hsb,
submodule.mem_bot.2 $ (submodule.mem_bot.1 hsb).symm ▸ smul_zero r
theorem bot_smul : ⊥ • N = ⊥ :=
eq_bot_iff.2 $ smul_le.2 $ λ r hrb s hsi,
submodule.mem_bot.2 $ (submodule.mem_bot.1 hrb).symm ▸ zero_smul s
theorem top_smul : ⊤ • N = N :=
le_antisymm smul_le_right $ λ r hri, one_smul r ▸ smul_mem_smul mem_top hri
theorem smul_sup : I • (N ⊔ P) = I • N ⊔ I • P :=
le_antisymm (smul_le.2 $ λ r hri m hmnp, let ⟨n, hn, p, hp, hnpm⟩ := mem_sup.1 hmnp in
mem_sup.2 ⟨_, smul_mem_smul hri hn, _, smul_mem_smul hri hp, hnpm ▸ (smul_add _ _ _).symm⟩)
(sup_le (smul_mono_right le_sup_left)
(smul_mono_right le_sup_right))
theorem sup_smul : (I ⊔ J) • N = I • N ⊔ J • N :=
le_antisymm (smul_le.2 $ λ r hrij n hn, let ⟨ri, hri, rj, hrj, hrijr⟩ := mem_sup.1 hrij in
mem_sup.2 ⟨_, smul_mem_smul hri hn, _, smul_mem_smul hrj hn, hrijr ▸ (add_smul _ _ _).symm⟩)
(sup_le (smul_mono_left le_sup_left)
(smul_mono_left le_sup_right))
theorem smul_assoc : (I • J) • N = I • (J • N) :=
le_antisymm (smul_le.2 $ λ rs hrsij t htn,
smul_induction_on hrsij
(λ r hr s hs, (@smul_eq_mul R _ r s).symm ▸ smul_smul r s t ▸ smul_mem_smul hr (smul_mem_smul hs htn))
((zero_smul t).symm ▸ submodule.zero_mem _)
(λ x y, (add_smul x y t).symm ▸ submodule.add_mem _)
(λ r s h, (@smul_eq_mul R _ r s).symm ▸ smul_smul r s t ▸ submodule.smul_mem _ _ h))
(smul_le.2 $ λ r hr sn hsn, suffices J • N ≤ submodule.comap (r • linear_map.id) ((I • J) • N), from this hsn,
smul_le.2 $ λ s hs n hn, show r • (s • n) ∈ (I • J) • N, from mul_smul r s n ▸ smul_mem_smul (smul_mem_smul hr hs) hn)
variables (S : set R) (T : set M)
theorem span_smul_span : (ideal.span S) • (span T) =
span (⋃ (s ∈ S) (t ∈ T), {s • t}) :=
le_antisymm (smul_le.2 $ λ r hrS n hnT, span_induction hrS
(λ r hrS, span_induction hnT
(λ n hnT, subset_span $ set.mem_bUnion hrS $
set.mem_bUnion hnT $ set.mem_singleton _)
((smul_zero r : r • 0 = (0:M)).symm ▸ submodule.zero_mem _)
(λ x y, (smul_add r x y).symm ▸ submodule.add_mem _)
(λ c m, by rw [smul_smul, mul_comm, mul_smul]; exact submodule.smul_mem _ _))
((zero_smul n).symm ▸ submodule.zero_mem _)
(λ r s, (add_smul r s n).symm ▸ submodule.add_mem _)
(λ c r, by rw [smul_eq_mul, mul_smul]; exact submodule.smul_mem _ _)) $
span_le.2 $ set.bUnion_subset $ λ r hrS, set.bUnion_subset $ λ n hnT, set.singleton_subset_iff.2 $
smul_mem_smul (subset_span hrS) (subset_span hnT)
end submodule
namespace ideal
section mul_and_radical
variables {R : Type u} [comm_ring R]
variables {I J K L: ideal R}
instance : has_mul (ideal R) := ⟨(•)⟩
theorem mul_mem_mul {r s} (hr : r ∈ I) (hs : s ∈ J) : r * s ∈ I * J :=
submodule.smul_mem_smul hr hs
theorem mul_mem_mul_rev {r s} (hr : r ∈ I) (hs : s ∈ J) : s * r ∈ I * J :=
mul_comm r s ▸ mul_mem_mul hr hs
theorem mul_le : I * J ≤ K ↔ ∀ (r ∈ I) (s ∈ J), r * s ∈ K :=
submodule.smul_le
variables (I J K)
protected theorem mul_comm : I * J = J * I :=
le_antisymm (mul_le.2 $ λ r hrI s hsJ, mul_mem_mul_rev hsJ hrI)
(mul_le.2 $ λ r hrJ s hsI, mul_mem_mul_rev hsI hrJ)
protected theorem mul_assoc : (I * J) * K = I * (J * K) :=
submodule.smul_assoc I J K
theorem span_mul_span (S T : set R) : span S * span T =
span ⋃ (s ∈ S) (t ∈ T), {s * t} :=
submodule.span_smul_span S T
variables {I J K}
theorem mul_le_inf : I * J ≤ I ⊓ J :=
mul_le.2 $ λ r hri s hsj, ⟨I.mul_mem_right hri, J.mul_mem_left hsj⟩
theorem mul_eq_inf_of_coprime (h : I ⊔ J = ⊤) : I * J = I ⊓ J :=
le_antisymm mul_le_inf $ λ r ⟨hri, hrj⟩,
let ⟨s, hsi, t, htj, hst⟩ := submodule.mem_sup.1 ((eq_top_iff_one _).1 h) in
mul_one r ▸ hst ▸ (mul_add r s t).symm ▸ ideal.add_mem (I * J) (mul_mem_mul_rev hsi hrj) (mul_mem_mul hri htj)
variables (I)
theorem mul_bot : I * ⊥ = ⊥ :=
submodule.smul_bot I
theorem bot_mul : ⊥ * I = ⊥ :=
submodule.bot_smul I
theorem mul_top : I * ⊤ = I :=
ideal.mul_comm ⊤ I ▸ submodule.top_smul I
theorem top_mul : ⊤ * I = I :=
submodule.top_smul I
variables {I}
theorem mul_mono (hik : I ≤ K) (hjl : J ≤ L) : I * J ≤ K * L :=
submodule.smul_mono hik hjl
theorem mul_mono_left (h : I ≤ J) : I * K ≤ J * K :=
submodule.smul_mono_left h
theorem mul_mono_right (h : J ≤ K) : I * J ≤ I * K :=
submodule.smul_mono_right h
variables (I J K)
theorem mul_sup : I * (J ⊔ K) = I * J ⊔ I * K :=
submodule.smul_sup I J K
theorem sup_mul : (I ⊔ J) * K = I * K ⊔ J * K :=
submodule.sup_smul I J K
variables {I J K}
def radical (I : ideal R) : ideal R :=
{ carrier := { r | ∃ n : ℕ, r ^ n ∈ I },
zero := ⟨1, (pow_one (0:R)).symm ▸ I.zero_mem⟩,
add := λ x y ⟨m, hxmi⟩ ⟨n, hyni⟩, ⟨m + n,
(add_pow x y (m + n)).symm ▸ I.sum_mem $
show ∀ c ∈ finset.range (nat.succ (m + n)), x ^ c * y ^ (m + n - c) * (choose (m + n) c) ∈ I,
from λ c hc, or.cases_on (le_total c m)
(λ hcm, I.mul_mem_right $ I.mul_mem_left $ nat.add_comm n m ▸ (nat.add_sub_assoc hcm n).symm ▸
(pow_add y n (m-c)).symm ▸ I.mul_mem_right hyni)
(λ hmc, I.mul_mem_right $ I.mul_mem_right $ nat.add_sub_cancel' hmc ▸
(pow_add x m (c-m)).symm ▸ I.mul_mem_right hxmi)⟩,
smul := λ r s ⟨n, hsni⟩, ⟨n, show (r * s)^n ∈ I,
from (mul_pow r s n).symm ▸ I.mul_mem_left hsni⟩ }
theorem le_radical : I ≤ radical I :=
λ r hri, ⟨1, (pow_one r).symm ▸ hri⟩
variables (R)
theorem radical_top : (radical ⊤ : ideal R) = ⊤ :=
(eq_top_iff_one _).2 ⟨0, submodule.mem_top⟩
variables {R}
theorem radical_mono (H : I ≤ J) : radical I ≤ radical J :=
λ r ⟨n, hrni⟩, ⟨n, H hrni⟩
variables (I)
theorem radical_idem : radical (radical I) = radical I :=
le_antisymm (λ r ⟨n, k, hrnki⟩, ⟨n * k, (pow_mul r n k).symm ▸ hrnki⟩) le_radical
variables {I}
theorem radical_eq_top : radical I = ⊤ ↔ I = ⊤ :=
⟨λ h, (eq_top_iff_one _).2 $ let ⟨n, hn⟩ := (eq_top_iff_one _).1 h in
@one_pow R _ n ▸ hn, λ h, h.symm ▸ radical_top R⟩
theorem is_prime.radical (H : is_prime I) : radical I = I :=
le_antisymm (λ r ⟨n, hrni⟩, H.mem_of_pow_mem n hrni) le_radical
variables (I J)
theorem radical_sup : radical (I ⊔ J) = radical (radical I ⊔ radical J) :=
le_antisymm (radical_mono $ sup_le_sup le_radical le_radical) $
λ r ⟨n, hrnij⟩, let ⟨s, hs, t, ht, hst⟩ := submodule.mem_sup.1 hrnij in
@radical_idem _ _ (I ⊔ J) ▸ ⟨n, hst ▸ ideal.add_mem _
(radical_mono le_sup_left hs) (radical_mono le_sup_right ht)⟩
theorem radical_inf : radical (I ⊓ J) = radical I ⊓ radical J :=
le_antisymm (le_inf (radical_mono inf_le_left) (radical_mono inf_le_right))
(λ r ⟨⟨m, hrm⟩, ⟨n, hrn⟩⟩, ⟨m + n, (pow_add r m n).symm ▸ I.mul_mem_right hrm,
(pow_add r m n).symm ▸ J.mul_mem_left hrn⟩)
theorem radical_mul : radical (I * J) = radical I ⊓ radical J :=
le_antisymm (radical_inf I J ▸ radical_mono $ @mul_le_inf _ _ I J)
(λ r ⟨⟨m, hrm⟩, ⟨n, hrn⟩⟩, ⟨m + n, (pow_add r m n).symm ▸ mul_mem_mul hrm hrn⟩)
variables {I J}
theorem is_prime.radical_le_iff (hj : is_prime J) :
radical I ≤ J ↔ I ≤ J :=
⟨le_trans le_radical, λ hij r ⟨n, hrni⟩, hj.mem_of_pow_mem n $ hij hrni⟩
theorem radical_eq_Inf (I : ideal R) :
radical I = Inf { J : ideal R | I ≤ J ∧ is_prime J } :=
le_antisymm (le_Inf $ λ J hJ, hJ.2.radical_le_iff.2 hJ.1) $
λ r hr, classical.by_contradiction $ λ hri,
let ⟨m, (hrm : r ∉ radical m), him, hm⟩ := zorn.zorn_partial_order₀ {K : ideal R | r ∉ radical K}
(λ c hc hcc y hyc, ⟨Sup c, λ ⟨n, hrnc⟩, let ⟨y, hyc, hrny⟩ :=
submodule.mem_Sup_of_directed hrnc y hyc hcc.directed_on in hc hyc ⟨n, hrny⟩,
λ z, le_Sup⟩) I hri in
have ∀ x ∉ m, r ∈ radical (m ⊔ span {x}) := λ x hxm, classical.by_contradiction $ λ hrmx, hxm $
hm (m ⊔ span {x}) hrmx le_sup_left ▸ (le_sup_right : _ ≤ m ⊔ span {x}) (subset_span $ set.mem_singleton _),
have is_prime m, from ⟨by rintro rfl; rw radical_top at hrm; exact hrm trivial,
λ x y hxym, classical.or_iff_not_imp_left.2 $ λ hxm, classical.by_contradiction $ λ hym,
let ⟨n, hrn⟩ := this _ hxm, ⟨p, hpm, q, hq, hpqrn⟩ := submodule.mem_sup.1 hrn, ⟨c, hcxq⟩ := mem_span_singleton'.1 hq in
let ⟨k, hrk⟩ := this _ hym, ⟨f, hfm, g, hg, hfgrk⟩ := submodule.mem_sup.1 hrk, ⟨d, hdyg⟩ := mem_span_singleton'.1 hg in
hrm ⟨n + k, by rw [pow_add, ← hpqrn, ← hcxq, ← hfgrk, ← hdyg, add_mul, mul_add (c*x), mul_assoc c x (d*y), mul_left_comm x, ← mul_assoc];
refine m.add_mem (m.mul_mem_right hpm) (m.add_mem (m.mul_mem_left hfm) (m.mul_mem_left hxym))⟩⟩,
hrm $ this.radical.symm ▸ (Inf_le ⟨him, this⟩ : Inf {J : ideal R | I ≤ J ∧ is_prime J} ≤ m) hr
instance : semiring (ideal R) :=
{ add := (⊔),
add_assoc := λ _ _ _, sup_assoc,
zero := ⊥,
zero_add := λ _, bot_sup_eq,
add_zero := λ _, sup_bot_eq,
add_comm := λ _ _, sup_comm,
mul := (*),
mul_assoc := ideal.mul_assoc,
zero_mul := bot_mul,
mul_zero := mul_bot,
one := ⊤,
one_mul := top_mul,
mul_one := mul_top,
left_distrib := mul_sup,
right_distrib := sup_mul }
@[simp] lemma add_eq_sup : I + J = I ⊔ J := rfl
@[simp] lemma zero_eq_bot : (0 : ideal R) = ⊥ := rfl
@[simp] lemma one_eq_top : (1 : ideal R) = ⊤ := rfl
variables (I)
theorem radical_pow (n : ℕ) (H : n > 0) : radical (I^n) = radical I :=
nat.rec_on n (not.elim dec_trivial) (λ n ih H,
or.cases_on (lt_or_eq_of_le $ nat.le_of_lt_succ H)
(λ H, calc radical (I^(n+1))
= radical I ⊓ radical (I^n) : radical_mul _ _
... = radical I ⊓ radical I : by rw ih H
... = radical I : inf_idem)
(λ H, H ▸ (pow_one I).symm ▸ rfl)) H
end mul_and_radical
section map_and_comap
variables {R : Type u} {S : Type v} [comm_ring R] [comm_ring S]
variables (f : R → S) [is_ring_hom f]
variables {I J : ideal R} {K L : ideal S}
def map (I : ideal R) : ideal S :=
span (f '' I)
def comap (I : ideal S) : ideal R :=
{ carrier := f ⁻¹' I,
zero := show f 0 ∈ I, by rw is_ring_hom.map_zero f; exact I.zero_mem,
add := λ x y hx hy, show f (x + y) ∈ I, by rw is_ring_hom.map_add f; exact I.add_mem hx hy,
smul := λ c x hx, show f (c * x) ∈ I, by rw is_ring_hom.map_mul f; exact I.mul_mem_left hx }
variables {f}
theorem map_mono (h : I ≤ J) : map f I ≤ map f J :=
span_mono $ set.image_subset _ h
theorem mem_map_of_mem {x} (h : x ∈ I) : f x ∈ map f I :=
subset_span ⟨x, h, rfl⟩
theorem map_le_iff_le_comap :
map f I ≤ K ↔ I ≤ comap f K :=
span_le.trans set.image_subset_iff
@[simp] theorem mem_comap {x} : x ∈ comap f K ↔ f x ∈ K := iff.rfl
theorem comap_mono (h : K ≤ L) : comap f K ≤ comap f L :=
set.preimage_mono h
variables (f)
theorem comap_ne_top (hK : K ≠ ⊤) : comap f K ≠ ⊤ :=
(ne_top_iff_one _).2 $ by rw [mem_comap, is_ring_hom.map_one f];
exact (ne_top_iff_one _).1 hK
instance is_prime.comap {hK : K.is_prime} : (comap f K).is_prime :=
⟨comap_ne_top _ hK.1, λ x y,
by simp only [mem_comap, is_ring_hom.map_mul f]; apply hK.2⟩
variables (I J K L)
theorem map_bot : map f ⊥ = ⊥ :=
le_antisymm (map_le_iff_le_comap.2 bot_le) bot_le
theorem map_top : map f ⊤ = ⊤ :=
(eq_top_iff_one _).2 $ subset_span ⟨1, trivial, is_ring_hom.map_one f⟩
theorem comap_top : comap f ⊤ = ⊤ :=
(eq_top_iff_one _).2 trivial
theorem map_sup : map f (I ⊔ J) = map f I ⊔ map f J :=
le_antisymm (map_le_iff_le_comap.2 $ sup_le
(map_le_iff_le_comap.1 le_sup_left)
(map_le_iff_le_comap.1 le_sup_right))
(sup_le (map_mono le_sup_left) (map_mono le_sup_right))
theorem map_mul : map f (I * J) = map f I * map f J :=
le_antisymm (map_le_iff_le_comap.2 $ mul_le.2 $ λ r hri s hsj,
show f (r * s) ∈ _, by rw is_ring_hom.map_mul f;
exact mul_mem_mul (mem_map_of_mem hri) (mem_map_of_mem hsj))
(trans_rel_right _ (span_mul_span _ _) $ span_le.2 $
set.bUnion_subset $ λ i ⟨r, hri, hfri⟩,
set.bUnion_subset $ λ j ⟨s, hsj, hfsj⟩,
set.singleton_subset_iff.2 $ hfri ▸ hfsj ▸
by rw [← is_ring_hom.map_mul f];
exact mem_map_of_mem (mul_mem_mul hri hsj))
theorem comap_inf : comap f (K ⊓ L) = comap f K ⊓ comap f L := rfl
theorem comap_radical : comap f (radical K) = radical (comap f K) :=
le_antisymm (λ r ⟨n, hfrnk⟩, ⟨n, show f (r ^ n) ∈ K,
from (is_semiring_hom.map_pow f r n).symm ▸ hfrnk⟩)
(λ r ⟨n, hfrnk⟩, ⟨n, is_semiring_hom.map_pow f r n ▸ hfrnk⟩)
variables {I J K L}
theorem map_inf_le : map f (I ⊓ J) ≤ map f I ⊓ map f J :=
map_le_iff_le_comap.2 $ (comap_inf f (map f I) (map f J)).symm ▸
inf_le_inf (map_le_iff_le_comap.1 $ le_refl _) (map_le_iff_le_comap.1 $ le_refl _)
theorem map_radical_le : map f (radical I) ≤ radical (map f I) :=
map_le_iff_le_comap.2 $ λ r ⟨n, hrni⟩, ⟨n, is_semiring_hom.map_pow f r n ▸ mem_map_of_mem hrni⟩
theorem le_comap_sup : comap f K ⊔ comap f L ≤ comap f (K ⊔ L) :=
map_le_iff_le_comap.1 $ (map_sup f (comap f K) (comap f L)).symm ▸
sup_le_sup (map_le_iff_le_comap.2 $ le_refl _) (map_le_iff_le_comap.2 $ le_refl _)
theorem le_comap_mul : comap f K * comap f L ≤ comap f (K * L) :=
map_le_iff_le_comap.1 $ (map_mul f (comap f K) (comap f L)).symm ▸
mul_mono (map_le_iff_le_comap.2 $ le_refl _) (map_le_iff_le_comap.2 $ le_refl _)
end map_and_comap
end ideal
namespace submodule
variables {R : Type u} {M : Type v}
variables [comm_ring R] [add_comm_group M] [module R M]
variables (I J : ideal R) (N P : submodule R M)
instance : add_comm_monoid (submodule R M) :=
{ add := (⊔),
add_assoc := λ _ _ _, sup_assoc,
zero := ⊥,
zero_add := λ _, bot_sup_eq,
add_zero := λ _, sup_bot_eq,
add_comm := λ _ _, sup_comm }
@[simp] lemma add_eq_sup : N + P = N ⊔ P := rfl
@[simp] lemma zero_eq_bot : (0 : submodule R M) = ⊥ := rfl
instance : semimodule (ideal R) (submodule R M) :=
{ smul_add := smul_sup,
add_smul := sup_smul,
mul_smul := smul_assoc,
one_smul := top_smul,
zero_smul := bot_smul,
smul_zero := smul_bot }
end submodule
|
df7c338ba85c3579fd0b8c902035b873aa2444c8
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/analysis/analytic/composition.lean
|
83396029efcdcd5c4493998b4f6202075d7d427e
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 58,927
|
lean
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johan Commelin
-/
import analysis.analytic.basic
import combinatorics.composition
/-!
# Composition of analytic functions
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we prove that the composition of analytic functions is analytic.
The argument is the following. Assume `g z = ∑' qₙ (z, ..., z)` and `f y = ∑' pₖ (y, ..., y)`. Then
`g (f y) = ∑' qₙ (∑' pₖ (y, ..., y), ..., ∑' pₖ (y, ..., y))
= ∑' qₙ (p_{i₁} (y, ..., y), ..., p_{iₙ} (y, ..., y))`.
For each `n` and `i₁, ..., iₙ`, define a `i₁ + ... + iₙ` multilinear function mapping
`(y₀, ..., y_{i₁ + ... + iₙ - 1})` to
`qₙ (p_{i₁} (y₀, ..., y_{i₁-1}), p_{i₂} (y_{i₁}, ..., y_{i₁ + i₂ - 1}), ..., p_{iₙ} (....)))`.
Then `g ∘ f` is obtained by summing all these multilinear functions.
To formalize this, we use compositions of an integer `N`, i.e., its decompositions into
a sum `i₁ + ... + iₙ` of positive integers. Given such a composition `c` and two formal
multilinear series `q` and `p`, let `q.comp_along_composition p c` be the above multilinear
function. Then the `N`-th coefficient in the power series expansion of `g ∘ f` is the sum of these
terms over all `c : composition N`.
To complete the proof, we need to show that this power series has a positive radius of convergence.
This follows from the fact that `composition N` has cardinality `2^(N-1)` and estimates on
the norm of `qₙ` and `pₖ`, which give summability. We also need to show that it indeed converges to
`g ∘ f`. For this, we note that the composition of partial sums converges to `g ∘ f`, and that it
corresponds to a part of the whole sum, on a subset that increases to the whole space. By
summability of the norms, this implies the overall convergence.
## Main results
* `q.comp p` is the formal composition of the formal multilinear series `q` and `p`.
* `has_fpower_series_at.comp` states that if two functions `g` and `f` admit power series expansions
`q` and `p`, then `g ∘ f` admits a power series expansion given by `q.comp p`.
* `analytic_at.comp` states that the composition of analytic functions is analytic.
* `formal_multilinear_series.comp_assoc` states that composition is associative on formal
multilinear series.
## Implementation details
The main technical difficulty is to write down things. In particular, we need to define precisely
`q.comp_along_composition p c` and to show that it is indeed a continuous multilinear
function. This requires a whole interface built on the class `composition`. Once this is set,
the main difficulty is to reorder the sums, writing the composition of the partial sums as a sum
over some subset of `Σ n, composition n`. We need to check that the reordering is a bijection,
running over difficulties due to the dependent nature of the types under consideration, that are
controlled thanks to the interface for `composition`.
The associativity of composition on formal multilinear series is a nontrivial result: it does not
follow from the associativity of composition of analytic functions, as there is no uniqueness for
the formal multilinear series representing a function (and also, it holds even when the radius of
convergence of the series is `0`). Instead, we give a direct proof, which amounts to reordering
double sums in a careful way. The change of variables is a canonical (combinatorial) bijection
`composition.sigma_equiv_sigma_pi` between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, and is described
in more details below in the paragraph on associativity.
-/
noncomputable theory
variables {𝕜 : Type*} {E F G H : Type*}
open filter list
open_locale topology big_operators classical nnreal ennreal
section topological
variables [comm_ring 𝕜] [add_comm_group E] [add_comm_group F] [add_comm_group G]
variables [module 𝕜 E] [module 𝕜 F] [module 𝕜 G]
variables [topological_space E] [topological_space F] [topological_space G]
/-! ### Composing formal multilinear series -/
namespace formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
variables [topological_add_group G] [has_continuous_const_smul 𝕜 G]
/-!
In this paragraph, we define the composition of formal multilinear series, by summing over all
possible compositions of `n`.
-/
/-- Given a formal multilinear series `p`, a composition `c` of `n` and the index `i` of a
block of `c`, we may define a function on `fin n → E` by picking the variables in the `i`-th block
of `n`, and applying the corresponding coefficient of `p` to these variables. This function is
called `p.apply_composition c v i` for `v : fin n → E` and `i : fin c.length`. -/
def apply_composition
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n) :
(fin n → E) → (fin (c.length) → F) :=
λ v i, p (c.blocks_fun i) (v ∘ (c.embedding i))
lemma apply_composition_ones (p : formal_multilinear_series 𝕜 E F) (n : ℕ) :
p.apply_composition (composition.ones n) =
λ v i, p 1 (λ _, v (fin.cast_le (composition.length_le _) i)) :=
begin
funext v i,
apply p.congr (composition.ones_blocks_fun _ _),
intros j hjn hj1,
obtain rfl : j = 0, { linarith },
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, composition.ones_embedding, fin.coe_mk],
end
lemma apply_composition_single (p : formal_multilinear_series 𝕜 E F) {n : ℕ} (hn : 0 < n)
(v : fin n → E) : p.apply_composition (composition.single n hn) v = λ j, p n v :=
begin
ext j,
refine p.congr (by simp) (λ i hi1 hi2, _),
dsimp,
congr' 1,
convert composition.single_embedding hn ⟨i, hi2⟩,
cases j,
have : j_val = 0 := le_bot_iff.1 (nat.lt_succ_iff.1 j_property),
unfold_coes,
congr; try { assumption <|> simp },
end
@[simp] lemma remove_zero_apply_composition
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n) :
p.remove_zero.apply_composition c = p.apply_composition c :=
begin
ext v i,
simp [apply_composition, zero_lt_one.trans_le (c.one_le_blocks_fun i), remove_zero_of_pos],
end
/-- Technical lemma stating how `p.apply_composition` commutes with updating variables. This
will be the key point to show that functions constructed from `apply_composition` retain
multilinearity. -/
lemma apply_composition_update
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n)
(j : fin n) (v : fin n → E) (z : E) :
p.apply_composition c (function.update v j z) =
function.update (p.apply_composition c v) (c.index j)
(p (c.blocks_fun (c.index j))
(function.update (v ∘ (c.embedding (c.index j))) (c.inv_embedding j) z)) :=
begin
ext k,
by_cases h : k = c.index j,
{ rw h,
let r : fin (c.blocks_fun (c.index j)) → fin n := c.embedding (c.index j),
simp only [function.update_same],
change p (c.blocks_fun (c.index j)) ((function.update v j z) ∘ r) = _,
let j' := c.inv_embedding j,
suffices B : (function.update v j z) ∘ r = function.update (v ∘ r) j' z,
by rw B,
suffices C : (function.update v (r j') z) ∘ r = function.update (v ∘ r) j' z,
by { convert C, exact (c.embedding_comp_inv j).symm },
exact function.update_comp_eq_of_injective _ (c.embedding _).injective _ _ },
{ simp only [h, function.update_eq_self, function.update_noteq, ne.def, not_false_iff],
let r : fin (c.blocks_fun k) → fin n := c.embedding k,
change p (c.blocks_fun k) ((function.update v j z) ∘ r) = p (c.blocks_fun k) (v ∘ r),
suffices B : (function.update v j z) ∘ r = v ∘ r, by rw B,
apply function.update_comp_eq_of_not_mem_range,
rwa c.mem_range_embedding_iff' }
end
@[simp] lemma comp_continuous_linear_map_apply_composition {n : ℕ}
(p : formal_multilinear_series 𝕜 F G) (f : E →L[𝕜] F) (c : composition n) (v : fin n → E) :
(p.comp_continuous_linear_map f).apply_composition c v = p.apply_composition c (f ∘ v) :=
by simp [apply_composition]
end formal_multilinear_series
namespace continuous_multilinear_map
open formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
/-- Given a formal multilinear series `p`, a composition `c` of `n` and a continuous multilinear
map `f` in `c.length` variables, one may form a continuous multilinear map in `n` variables by
applying the right coefficient of `p` to each block of the composition, and then applying `f` to
the resulting vector. It is called `f.comp_along_composition p c`. -/
def comp_along_composition {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) :
continuous_multilinear_map 𝕜 (λ i : fin n, E) G :=
{ to_fun := λ v, f (p.apply_composition c v),
map_add' := λ _ v i x y, by
{ cases subsingleton.elim ‹_› (fin.decidable_eq _),
simp only [apply_composition_update, continuous_multilinear_map.map_add] },
map_smul' := λ _ v i c x, by
{ cases subsingleton.elim ‹_› (fin.decidable_eq _),
simp only [apply_composition_update, continuous_multilinear_map.map_smul] },
cont := f.cont.comp $ continuous_pi $ λ i, (coe_continuous _).comp $ continuous_pi $ λ j,
continuous_apply _, }
@[simp] lemma comp_along_composition_apply {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) (v : fin n → E) :
(f.comp_along_composition p c) v = f (p.apply_composition c v) := rfl
end continuous_multilinear_map
namespace formal_multilinear_series
variables [topological_add_group E] [has_continuous_const_smul 𝕜 E]
variables [topological_add_group F] [has_continuous_const_smul 𝕜 F]
variables [topological_add_group G] [has_continuous_const_smul 𝕜 G]
/-- Given two formal multilinear series `q` and `p` and a composition `c` of `n`, one may
form a continuous multilinear map in `n` variables by applying the right coefficient of `p` to each
block of the composition, and then applying `q c.length` to the resulting vector. It is
called `q.comp_along_composition p c`. -/
def comp_along_composition {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) : continuous_multilinear_map 𝕜 (λ i : fin n, E) G :=
(q c.length).comp_along_composition p c
@[simp] lemma comp_along_composition_apply {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) (v : fin n → E) :
(q.comp_along_composition p c) v = q c.length (p.apply_composition c v) := rfl
/-- Formal composition of two formal multilinear series. The `n`-th coefficient in the composition
is defined to be the sum of `q.comp_along_composition p c` over all compositions of
`n`. In other words, this term (as a multilinear function applied to `v_0, ..., v_{n-1}`) is
`∑'_{k} ∑'_{i₁ + ... + iₖ = n} qₖ (p_{i_1} (...), ..., p_{i_k} (...))`, where one puts all variables
`v_0, ..., v_{n-1}` in increasing order in the dots.
In general, the composition `q ∘ p` only makes sense when the constant coefficient of `p` vanishes.
We give a general formula but which ignores the value of `p 0` instead.
-/
protected def comp (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) :
formal_multilinear_series 𝕜 E G :=
λ n, ∑ c : composition n, q.comp_along_composition p c
/-- The `0`-th coefficient of `q.comp p` is `q 0`. Since these maps are multilinear maps in zero
variables, but on different spaces, we can not state this directly, so we state it when applied to
arbitrary vectors (which have to be the zero vector). -/
lemma comp_coeff_zero (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(v : fin 0 → E) (v' : fin 0 → F) :
(q.comp p) 0 v = q 0 v' :=
begin
let c : composition 0 := composition.ones 0,
dsimp [formal_multilinear_series.comp],
have : {c} = (finset.univ : finset (composition 0)),
{ apply finset.eq_of_subset_of_card_le; simp [finset.card_univ, composition_card 0] },
rw [← this, finset.sum_singleton, comp_along_composition_apply],
symmetry, congr'
end
@[simp] lemma comp_coeff_zero'
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (v : fin 0 → E) :
(q.comp p) 0 v = q 0 (λ i, 0) :=
q.comp_coeff_zero p v _
/-- The `0`-th coefficient of `q.comp p` is `q 0`. When `p` goes from `E` to `E`, this can be
expressed as a direct equality -/
lemma comp_coeff_zero'' (q : formal_multilinear_series 𝕜 E F)
(p : formal_multilinear_series 𝕜 E E) :
(q.comp p) 0 = q 0 :=
by { ext v, exact q.comp_coeff_zero p _ _ }
/-- The first coefficient of a composition of formal multilinear series is the composition of the
first coefficients seen as continuous linear maps. -/
lemma comp_coeff_one (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(v : fin 1 → E) : (q.comp p) 1 v = q 1 (λ i, p 1 v) :=
begin
have : {composition.ones 1} = (finset.univ : finset (composition 1)) :=
finset.eq_univ_of_card _ (by simp [composition_card]),
simp only [formal_multilinear_series.comp, comp_along_composition_apply, ← this,
finset.sum_singleton],
refine q.congr (by simp) (λ i hi1 hi2, _),
simp only [apply_composition_ones],
exact p.congr rfl (λ j hj1 hj2, by congr)
end
/-- Only `0`-th coefficient of `q.comp p` depends on `q 0`. -/
lemma remove_zero_comp_of_pos (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (hn : 0 < n) :
q.remove_zero.comp p n = q.comp p n :=
begin
ext v,
simp only [formal_multilinear_series.comp, comp_along_composition,
continuous_multilinear_map.comp_along_composition_apply, continuous_multilinear_map.sum_apply],
apply finset.sum_congr rfl (λ c hc, _),
rw remove_zero_of_pos _ (c.length_pos_of_pos hn)
end
@[simp] lemma comp_remove_zero (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) :
q.comp p.remove_zero = q.comp p :=
by { ext n, simp [formal_multilinear_series.comp] }
end formal_multilinear_series
end topological
variables [nontrivially_normed_field 𝕜]
[normed_add_comm_group E] [normed_space 𝕜 E]
[normed_add_comm_group F] [normed_space 𝕜 F]
[normed_add_comm_group G] [normed_space 𝕜 G]
[normed_add_comm_group H] [normed_space 𝕜 H]
namespace formal_multilinear_series
/-- The norm of `f.comp_along_composition p c` is controlled by the product of
the norms of the relevant bits of `f` and `p`. -/
lemma comp_along_composition_bound {n : ℕ}
(p : formal_multilinear_series 𝕜 E F) (c : composition n)
(f : continuous_multilinear_map 𝕜 (λ (i : fin c.length), F) G) (v : fin n → E) :
‖f.comp_along_composition p c v‖ ≤
‖f‖ * (∏ i, ‖p (c.blocks_fun i)‖) * (∏ i : fin n, ‖v i‖) :=
calc ‖f.comp_along_composition p c v‖ = ‖f (p.apply_composition c v)‖ : rfl
... ≤ ‖f‖ * ∏ i, ‖p.apply_composition c v i‖ : continuous_multilinear_map.le_op_norm _ _
... ≤ ‖f‖ * ∏ i, ‖p (c.blocks_fun i)‖ *
∏ j : fin (c.blocks_fun i), ‖(v ∘ (c.embedding i)) j‖ :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
refine finset.prod_le_prod (λ i hi, norm_nonneg _) (λ i hi, _),
apply continuous_multilinear_map.le_op_norm,
end
... = ‖f‖ * (∏ i, ‖p (c.blocks_fun i)‖) *
∏ i (j : fin (c.blocks_fun i)), ‖(v ∘ (c.embedding i)) j‖ :
by rw [finset.prod_mul_distrib, mul_assoc]
... = ‖f‖ * (∏ i, ‖p (c.blocks_fun i)‖) * (∏ i : fin n, ‖v i‖) :
by { rw [← c.blocks_fin_equiv.prod_comp, ← finset.univ_sigma_univ, finset.prod_sigma],
congr }
/-- The norm of `q.comp_along_composition p c` is controlled by the product of
the norms of the relevant bits of `q` and `p`. -/
lemma comp_along_composition_norm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
‖q.comp_along_composition p c‖ ≤ ‖q c.length‖ * ∏ i, ‖p (c.blocks_fun i)‖ :=
continuous_multilinear_map.op_norm_le_bound _
(mul_nonneg (norm_nonneg _) (finset.prod_nonneg (λ i hi, norm_nonneg _)))
(comp_along_composition_bound _ _ _)
lemma comp_along_composition_nnnorm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
‖q.comp_along_composition p c‖₊ ≤ ‖q c.length‖₊ * ∏ i, ‖p (c.blocks_fun i)‖₊ :=
by { rw ← nnreal.coe_le_coe, push_cast, exact q.comp_along_composition_norm p c }
/-!
### The identity formal power series
We will now define the identity power series, and show that it is a neutral element for left and
right composition.
-/
section
variables (𝕜 E)
/-- The identity formal multilinear series, with all coefficients equal to `0` except for `n = 1`
where it is (the continuous multilinear version of) the identity. -/
def id : formal_multilinear_series 𝕜 E E
| 0 := 0
| 1 := (continuous_multilinear_curry_fin1 𝕜 E E).symm (continuous_linear_map.id 𝕜 E)
| _ := 0
/-- The first coefficient of `id 𝕜 E` is the identity. -/
@[simp] lemma id_apply_one (v : fin 1 → E) : (formal_multilinear_series.id 𝕜 E) 1 v = v 0 := rfl
/-- The `n`th coefficient of `id 𝕜 E` is the identity when `n = 1`. We state this in a dependent
way, as it will often appear in this form. -/
lemma id_apply_one' {n : ℕ} (h : n = 1) (v : fin n → E) :
(id 𝕜 E) n v = v ⟨0, h.symm ▸ zero_lt_one⟩ :=
begin
subst n,
apply id_apply_one
end
/-- For `n ≠ 1`, the `n`-th coefficient of `id 𝕜 E` is zero, by definition. -/
@[simp] lemma id_apply_ne_one {n : ℕ} (h : n ≠ 1) : (formal_multilinear_series.id 𝕜 E) n = 0 :=
by { cases n, { refl }, cases n, { contradiction }, refl }
end
@[simp] theorem comp_id (p : formal_multilinear_series 𝕜 E F) : p.comp (id 𝕜 E) = p :=
begin
ext1 n,
dsimp [formal_multilinear_series.comp],
rw finset.sum_eq_single (composition.ones n),
show comp_along_composition p (id 𝕜 E) (composition.ones n) = p n,
{ ext v,
rw comp_along_composition_apply,
apply p.congr (composition.ones_length n),
intros,
rw apply_composition_ones,
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, fin.coe_mk, fin.coe_mk], },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.ones n → comp_along_composition p (id 𝕜 E) b = 0,
{ assume b _ hb,
obtain ⟨k, hk, lt_k⟩ : ∃ (k : ℕ) (H : k ∈ composition.blocks b), 1 < k :=
composition.ne_ones_iff.1 hb,
obtain ⟨i, i_lt, hi⟩ : ∃ (i : ℕ) (h : i < b.blocks.length), b.blocks.nth_le i h = k :=
nth_le_of_mem hk,
let j : fin b.length := ⟨i, b.blocks_length ▸ i_lt⟩,
have A : 1 < b.blocks_fun j := by convert lt_k,
ext v,
rw [comp_along_composition_apply, continuous_multilinear_map.zero_apply],
apply continuous_multilinear_map.map_coord_zero _ j,
dsimp [apply_composition],
rw id_apply_ne_one _ _ (ne_of_gt A),
refl },
{ simp }
end
@[simp] theorem id_comp (p : formal_multilinear_series 𝕜 E F) (h : p 0 = 0) : (id 𝕜 F).comp p = p :=
begin
ext1 n,
by_cases hn : n = 0,
{ rw [hn, h],
ext v,
rw [comp_coeff_zero', id_apply_ne_one _ _ zero_ne_one],
refl },
{ dsimp [formal_multilinear_series.comp],
have n_pos : 0 < n := bot_lt_iff_ne_bot.mpr hn,
rw finset.sum_eq_single (composition.single n n_pos),
show comp_along_composition (id 𝕜 F) p (composition.single n n_pos) = p n,
{ ext v,
rw [comp_along_composition_apply, id_apply_one' _ _ (composition.single_length n_pos)],
dsimp [apply_composition],
refine p.congr rfl (λ i him hin, congr_arg v $ _),
ext, simp },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.single n n_pos → comp_along_composition (id 𝕜 F) p b = 0,
{ assume b _ hb,
have A : b.length ≠ 1, by simpa [composition.eq_single_iff_length] using hb,
ext v,
rw [comp_along_composition_apply, id_apply_ne_one _ _ A],
refl },
{ simp } }
end
/-! ### Summability properties of the composition of formal power series-/
section
/-- If two formal multilinear series have positive radius of convergence, then the terms appearing
in the definition of their composition are also summable (when multiplied by a suitable positive
geometric term). -/
theorem comp_summable_nnreal
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(hq : 0 < q.radius) (hp : 0 < p.radius) :
∃ r > (0 : ℝ≥0),
summable (λ i : Σ n, composition n, ‖q.comp_along_composition p i.2‖₊ * r ^ i.1) :=
begin
/- This follows from the fact that the growth rate of `‖qₙ‖` and `‖pₙ‖` is at most geometric,
giving a geometric bound on each `‖q.comp_along_composition p op‖`, together with the
fact that there are `2^(n-1)` compositions of `n`, giving at most a geometric loss. -/
rcases ennreal.lt_iff_exists_nnreal_btwn.1 (lt_min zero_lt_one hq) with ⟨rq, rq_pos, hrq⟩,
rcases ennreal.lt_iff_exists_nnreal_btwn.1 (lt_min zero_lt_one hp) with ⟨rp, rp_pos, hrp⟩,
simp only [lt_min_iff, ennreal.coe_lt_one_iff, ennreal.coe_pos] at hrp hrq rp_pos rq_pos,
obtain ⟨Cq, hCq0, hCq⟩ : ∃ Cq > 0, ∀ n, ‖q n‖₊ * rq^n ≤ Cq :=
q.nnnorm_mul_pow_le_of_lt_radius hrq.2,
obtain ⟨Cp, hCp1, hCp⟩ : ∃ Cp ≥ 1, ∀ n, ‖p n‖₊ * rp^n ≤ Cp,
{ rcases p.nnnorm_mul_pow_le_of_lt_radius hrp.2 with ⟨Cp, -, hCp⟩,
exact ⟨max Cp 1, le_max_right _ _, λ n, (hCp n).trans (le_max_left _ _)⟩ },
let r0 : ℝ≥0 := (4 * Cp)⁻¹,
have r0_pos : 0 < r0 := inv_pos.2 (mul_pos zero_lt_four (zero_lt_one.trans_le hCp1)),
set r : ℝ≥0 := rp * rq * r0,
have r_pos : 0 < r := mul_pos (mul_pos rp_pos rq_pos) r0_pos,
have I : ∀ (i : Σ (n : ℕ), composition n),
‖q.comp_along_composition p i.2‖₊ * r ^ i.1 ≤ Cq / 4 ^ i.1,
{ rintros ⟨n, c⟩,
have A,
calc ‖q c.length‖₊ * rq ^ n ≤ ‖q c.length‖₊* rq ^ c.length :
mul_le_mul' le_rfl (pow_le_pow_of_le_one rq.2 hrq.1.le c.length_le)
... ≤ Cq : hCq _,
have B,
calc ((∏ i, ‖p (c.blocks_fun i)‖₊) * rp ^ n)
= ∏ i, ‖p (c.blocks_fun i)‖₊ * rp ^ c.blocks_fun i :
by simp only [finset.prod_mul_distrib, finset.prod_pow_eq_pow_sum, c.sum_blocks_fun]
... ≤ ∏ i : fin c.length, Cp : finset.prod_le_prod' (λ i _, hCp _)
... = Cp ^ c.length : by simp
... ≤ Cp ^ n : pow_le_pow hCp1 c.length_le,
calc ‖q.comp_along_composition p c‖₊ * r ^ n
≤ (‖q c.length‖₊ * ∏ i, ‖p (c.blocks_fun i)‖₊) * r ^ n :
mul_le_mul' (q.comp_along_composition_nnnorm p c) le_rfl
... = (‖q c.length‖₊ * rq ^ n) * ((∏ i, ‖p (c.blocks_fun i)‖₊) * rp ^ n) * r0 ^ n :
by { simp only [r, mul_pow], ring }
... ≤ Cq * Cp ^ n * r0 ^ n : mul_le_mul' (mul_le_mul' A B) le_rfl
... = Cq / 4 ^ n :
begin
simp only [r0],
field_simp [mul_pow, (zero_lt_one.trans_le hCp1).ne'],
ring
end },
refine ⟨r, r_pos, nnreal.summable_of_le I _⟩,
simp_rw div_eq_mul_inv,
refine summable.mul_left _ _,
have : ∀ n : ℕ, has_sum (λ c : composition n, (4 ^ n : ℝ≥0)⁻¹) (2 ^ (n - 1) / 4 ^ n),
{ intro n,
convert has_sum_fintype (λ c : composition n, (4 ^ n : ℝ≥0)⁻¹),
simp [finset.card_univ, composition_card, div_eq_mul_inv] },
refine nnreal.summable_sigma.2 ⟨λ n, (this n).summable, (nnreal.summable_nat_add_iff 1).1 _⟩,
convert (nnreal.summable_geometric (nnreal.div_lt_one_of_lt one_lt_two)).mul_left (1 / 4),
ext1 n,
rw [(this _).tsum_eq, add_tsub_cancel_right],
field_simp [← mul_assoc, pow_succ', mul_pow, show (4 : ℝ≥0) = 2 * 2, from (two_mul 2).symm,
mul_right_comm]
end
end
/-- Bounding below the radius of the composition of two formal multilinear series assuming
summability over all compositions. -/
theorem le_comp_radius_of_summable
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (r : ℝ≥0)
(hr : summable (λ i : (Σ n, composition n), ‖q.comp_along_composition p i.2‖₊ * r ^ i.1)) :
(r : ℝ≥0∞) ≤ (q.comp p).radius :=
begin
refine le_radius_of_bound_nnreal _
(∑' i : (Σ n, composition n), ‖comp_along_composition q p i.snd‖₊ * r ^ i.fst) (λ n, _),
calc ‖formal_multilinear_series.comp q p n‖₊ * r ^ n ≤
∑' (c : composition n), ‖comp_along_composition q p c‖₊ * r ^ n :
begin
rw [tsum_fintype, ← finset.sum_mul],
exact mul_le_mul' (nnnorm_sum_le _ _) le_rfl
end
... ≤ ∑' (i : Σ (n : ℕ), composition n), ‖comp_along_composition q p i.snd‖₊ * r ^ i.fst :
nnreal.tsum_comp_le_tsum_of_inj hr sigma_mk_injective
end
/-!
### Composing analytic functions
Now, we will prove that the composition of the partial sums of `q` and `p` up to order `N` is
given by a sum over some large subset of `Σ n, composition n` of `q.comp_along_composition p`, to
deduce that the series for `q.comp p` indeed converges to `g ∘ f` when `q` is a power series for
`g` and `p` is a power series for `f`.
This proof is a big reindexing argument of a sum. Since it is a bit involved, we define first
the source of the change of variables (`comp_partial_source`), its target
(`comp_partial_target`) and the change of variables itself (`comp_change_of_variables`) before
giving the main statement in `comp_partial_sum`. -/
/-- Source set in the change of variables to compute the composition of partial sums of formal
power series.
See also `comp_partial_sum`. -/
def comp_partial_sum_source (m M N : ℕ) : finset (Σ n, (fin n) → ℕ) :=
finset.sigma (finset.Ico m M) (λ (n : ℕ), fintype.pi_finset (λ (i : fin n), finset.Ico 1 N) : _)
@[simp] lemma mem_comp_partial_sum_source_iff (m M N : ℕ) (i : Σ n, (fin n) → ℕ) :
i ∈ comp_partial_sum_source m M N ↔
(m ≤ i.1 ∧ i.1 < M) ∧ ∀ (a : fin i.1), 1 ≤ i.2 a ∧ i.2 a < N :=
by simp only [comp_partial_sum_source, finset.mem_Ico, fintype.mem_pi_finset, finset.mem_sigma,
iff_self]
/-- Change of variables appearing to compute the composition of partial sums of formal
power series -/
def comp_change_of_variables (m M N : ℕ) (i : Σ n, (fin n) → ℕ)
(hi : i ∈ comp_partial_sum_source m M N) : (Σ n, composition n) :=
begin
rcases i with ⟨n, f⟩,
rw mem_comp_partial_sum_source_iff at hi,
refine ⟨∑ j, f j, of_fn (λ a, f a), λ i hi', _, by simp [sum_of_fn]⟩,
obtain ⟨j, rfl⟩ : ∃ (j : fin n), f j = i, by rwa [mem_of_fn, set.mem_range] at hi',
exact (hi.2 j).1
end
@[simp] lemma comp_change_of_variables_length
(m M N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source m M N) :
composition.length (comp_change_of_variables m M N i hi).2 = i.1 :=
begin
rcases i with ⟨k, blocks_fun⟩,
dsimp [comp_change_of_variables],
simp only [composition.length, map_of_fn, length_of_fn]
end
lemma comp_change_of_variables_blocks_fun
(m M N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source m M N) (j : fin i.1) :
(comp_change_of_variables m M N i hi).2.blocks_fun
⟨j, (comp_change_of_variables_length m M N hi).symm ▸ j.2⟩ = i.2 j :=
begin
rcases i with ⟨n, f⟩,
dsimp [composition.blocks_fun, composition.blocks, comp_change_of_variables],
simp only [map_of_fn, nth_le_of_fn', function.comp_app],
apply congr_arg,
exact fin.eta _ _
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a set. -/
def comp_partial_sum_target_set (m M N : ℕ) : set (Σ n, composition n) :=
{i | (m ≤ i.2.length) ∧ (i.2.length < M) ∧ (∀ (j : fin i.2.length), i.2.blocks_fun j < N)}
lemma comp_partial_sum_target_subset_image_comp_partial_sum_source
(m M N : ℕ) (i : Σ n, composition n) (hi : i ∈ comp_partial_sum_target_set m M N) :
∃ j (hj : j ∈ comp_partial_sum_source m M N), i = comp_change_of_variables m M N j hj :=
begin
rcases i with ⟨n, c⟩,
refine ⟨⟨c.length, c.blocks_fun⟩, _, _⟩,
{ simp only [comp_partial_sum_target_set, set.mem_set_of_eq] at hi,
simp only [mem_comp_partial_sum_source_iff, hi.left, hi.right, true_and, and_true],
exact λ a, c.one_le_blocks' _ },
{ dsimp [comp_change_of_variables],
rw composition.sigma_eq_iff_blocks_eq,
simp only [composition.blocks_fun, composition.blocks, subtype.coe_eta, nth_le_map'],
conv_lhs { rw ← of_fn_nth_le c.blocks } }
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a finset.
See also `comp_partial_sum`. -/
def comp_partial_sum_target (m M N : ℕ) : finset (Σ n, composition n) :=
set.finite.to_finset $ ((finset.finite_to_set _).dependent_image _).subset $
comp_partial_sum_target_subset_image_comp_partial_sum_source m M N
@[simp] lemma mem_comp_partial_sum_target_iff {m M N : ℕ} {a : Σ n, composition n} :
a ∈ comp_partial_sum_target m M N ↔
m ≤ a.2.length ∧ a.2.length < M ∧ (∀ (j : fin a.2.length), a.2.blocks_fun j < N) :=
by simp [comp_partial_sum_target, comp_partial_sum_target_set]
/-- `comp_change_of_variables m M N` is a bijection between `comp_partial_sum_source m M N`
and `comp_partial_sum_target m M N`, yielding equal sums for functions that correspond to each
other under the bijection. As `comp_change_of_variables m M N` is a dependent function, stating
that it is a bijection is not directly possible, but the consequence on sums can be stated
more easily. -/
lemma comp_change_of_variables_sum {α : Type*} [add_comm_monoid α] (m M N : ℕ)
(f : (Σ (n : ℕ), fin n → ℕ) → α) (g : (Σ n, composition n) → α)
(h : ∀ e (he : e ∈ comp_partial_sum_source m M N),
f e = g (comp_change_of_variables m M N e he)) :
∑ e in comp_partial_sum_source m M N, f e = ∑ e in comp_partial_sum_target m M N, g e :=
begin
apply finset.sum_bij (comp_change_of_variables m M N),
-- We should show that the correspondance we have set up is indeed a bijection
-- between the index sets of the two sums.
-- 1 - show that the image belongs to `comp_partial_sum_target m N N`
{ rintros ⟨k, blocks_fun⟩ H,
rw mem_comp_partial_sum_source_iff at H,
simp only [mem_comp_partial_sum_target_iff, composition.length, composition.blocks, H.left,
map_of_fn, length_of_fn, true_and, comp_change_of_variables],
assume j,
simp only [composition.blocks_fun, (H.right _).right, nth_le_of_fn'] },
-- 2 - show that the composition gives the `comp_along_composition` application
{ rintros ⟨k, blocks_fun⟩ H,
rw h },
-- 3 - show that the map is injective
{ rintros ⟨k, blocks_fun⟩ ⟨k', blocks_fun'⟩ H H' heq,
obtain rfl : k = k',
{ have := (comp_change_of_variables_length m M N H).symm,
rwa [heq, comp_change_of_variables_length] at this, },
congr,
funext i,
calc blocks_fun i = (comp_change_of_variables m M N _ H).2.blocks_fun _ :
(comp_change_of_variables_blocks_fun m M N H i).symm
... = (comp_change_of_variables m M N _ H').2.blocks_fun _ :
begin
apply composition.blocks_fun_congr; try { rw heq },
refl
end
... = blocks_fun' i : comp_change_of_variables_blocks_fun m M N H' i },
-- 4 - show that the map is surjective
{ assume i hi,
apply comp_partial_sum_target_subset_image_comp_partial_sum_source m M N i,
simpa [comp_partial_sum_target] using hi }
end
/-- The auxiliary set corresponding to the composition of partial sums asymptotically contains
all possible compositions. -/
lemma comp_partial_sum_target_tendsto_at_top :
tendsto (λ N, comp_partial_sum_target 0 N N) at_top at_top :=
begin
apply monotone.tendsto_at_top_finset,
{ assume m n hmn a ha,
have : ∀ i, i < m → i < n := λ i hi, lt_of_lt_of_le hi hmn,
tidy },
{ rintros ⟨n, c⟩,
simp only [mem_comp_partial_sum_target_iff],
obtain ⟨n, hn⟩ : bdd_above ↑(finset.univ.image (λ (i : fin c.length), c.blocks_fun i)) :=
finset.bdd_above _,
refine ⟨max n c.length + 1, bot_le, lt_of_le_of_lt (le_max_right n c.length) (lt_add_one _),
λ j, lt_of_le_of_lt (le_trans _ (le_max_left _ _)) (lt_add_one _)⟩,
apply hn,
simp only [finset.mem_image_of_mem, finset.mem_coe, finset.mem_univ] }
end
/-- Composing the partial sums of two multilinear series coincides with the sum over all
compositions in `comp_partial_sum_target 0 N N`. This is precisely the motivation for the
definition of `comp_partial_sum_target`. -/
lemma comp_partial_sum
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (N : ℕ) (z : E) :
q.partial_sum N (∑ i in finset.Ico 1 N, p i (λ j, z)) =
∑ i in comp_partial_sum_target 0 N N, q.comp_along_composition p i.2 (λ j, z) :=
begin
-- we expand the composition, using the multilinearity of `q` to expand along each coordinate.
suffices H : ∑ n in finset.range N, ∑ r in fintype.pi_finset (λ (i : fin n), finset.Ico 1 N),
q n (λ (i : fin n), p (r i) (λ j, z)) =
∑ i in comp_partial_sum_target 0 N N, q.comp_along_composition p i.2 (λ j, z),
by simpa only [formal_multilinear_series.partial_sum,
continuous_multilinear_map.map_sum_finset] using H,
-- rewrite the first sum as a big sum over a sigma type, in the finset
-- `comp_partial_sum_target 0 N N`
rw [finset.range_eq_Ico, finset.sum_sigma'],
-- use `comp_change_of_variables_sum`, saying that this change of variables respects sums
apply comp_change_of_variables_sum 0 N N,
rintros ⟨k, blocks_fun⟩ H,
apply congr _ (comp_change_of_variables_length 0 N N H).symm,
intros,
rw ← comp_change_of_variables_blocks_fun 0 N N H,
refl
end
end formal_multilinear_series
open formal_multilinear_series
/-- If two functions `g` and `f` have power series `q` and `p` respectively at `f x` and `x`, then
`g ∘ f` admits the power series `q.comp p` at `x`. -/
theorem has_fpower_series_at.comp {g : F → G} {f : E → F}
{q : formal_multilinear_series 𝕜 F G} {p : formal_multilinear_series 𝕜 E F} {x : E}
(hg : has_fpower_series_at g q (f x)) (hf : has_fpower_series_at f p x) :
has_fpower_series_at (g ∘ f) (q.comp p) x :=
begin
/- Consider `rf` and `rg` such that `f` and `g` have power series expansion on the disks
of radius `rf` and `rg`. -/
rcases hg with ⟨rg, Hg⟩,
rcases hf with ⟨rf, Hf⟩,
/- The terms defining `q.comp p` are geometrically summable in a disk of some radius `r`. -/
rcases q.comp_summable_nnreal p Hg.radius_pos Hf.radius_pos with ⟨r, r_pos : 0 < r, hr⟩,
/- We will consider `y` which is smaller than `r` and `rf`, and also small enough that
`f (x + y)` is close enough to `f x` to be in the disk where `g` is well behaved. Let
`min (r, rf, δ)` be this new radius.-/
have : continuous_at f x := Hf.analytic_at.continuous_at,
obtain ⟨δ, δpos, hδ⟩ : ∃ (δ : ℝ≥0∞) (H : 0 < δ),
∀ {z : E}, z ∈ emetric.ball x δ → f z ∈ emetric.ball (f x) rg,
{ have : emetric.ball (f x) rg ∈ 𝓝 (f x) := emetric.ball_mem_nhds _ Hg.r_pos,
rcases emetric.mem_nhds_iff.1 (Hf.analytic_at.continuous_at this) with ⟨δ, δpos, Hδ⟩,
exact ⟨δ, δpos, λ z hz, Hδ hz⟩ },
let rf' := min rf δ,
have min_pos : 0 < min rf' r,
by simp only [r_pos, Hf.r_pos, δpos, lt_min_iff, ennreal.coe_pos, and_self],
/- We will show that `g ∘ f` admits the power series `q.comp p` in the disk of
radius `min (r, rf', δ)`. -/
refine ⟨min rf' r, _⟩,
refine ⟨le_trans (min_le_right rf' r)
(formal_multilinear_series.le_comp_radius_of_summable q p r hr), min_pos, λ y hy, _⟩,
/- Let `y` satisfy `‖y‖ < min (r, rf', δ)`. We want to show that `g (f (x + y))` is the sum of
`q.comp p` applied to `y`. -/
-- First, check that `y` is small enough so that estimates for `f` and `g` apply.
have y_mem : y ∈ emetric.ball (0 : E) rf :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_left _ _))) hy,
have fy_mem : f (x + y) ∈ emetric.ball (f x) rg,
{ apply hδ,
have : y ∈ emetric.ball (0 : E) δ :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_right _ _))) hy,
simpa [edist_eq_coe_nnnorm_sub, edist_eq_coe_nnnorm] },
/- Now the proof starts. To show that the sum of `q.comp p` at `y` is `g (f (x + y))`, we will
write `q.comp p` applied to `y` as a big sum over all compositions. Since the sum is
summable, to get its convergence it suffices to get the convergence along some increasing sequence
of sets. We will use the sequence of sets `comp_partial_sum_target 0 n n`, along which the sum is
exactly the composition of the partial sums of `q` and `p`, by design. To show that it converges
to `g (f (x + y))`, pointwise convergence would not be enough, but we have uniform convergence
to save the day. -/
-- First step: the partial sum of `p` converges to `f (x + y)`.
have A : tendsto (λ n, ∑ a in finset.Ico 1 n, p a (λ b, y)) at_top (𝓝 (f (x + y) - f x)),
{ have L : ∀ᶠ n in at_top, ∑ a in finset.range n, p a (λ b, y) - f x =
∑ a in finset.Ico 1 n, p a (λ b, y),
{ rw eventually_at_top,
refine ⟨1, λ n hn, _⟩,
symmetry,
rw [eq_sub_iff_add_eq', finset.range_eq_Ico, ← Hf.coeff_zero (λi, y),
finset.sum_eq_sum_Ico_succ_bot hn] },
have : tendsto (λ n, ∑ a in finset.range n, p a (λ b, y) - f x) at_top (𝓝 (f (x + y) - f x)) :=
(Hf.has_sum y_mem).tendsto_sum_nat.sub tendsto_const_nhds,
exact tendsto.congr' L this },
-- Second step: the composition of the partial sums of `q` and `p` converges to `g (f (x + y))`.
have B : tendsto (λ n, q.partial_sum n (∑ a in finset.Ico 1 n, p a (λ b, y)))
at_top (𝓝 (g (f (x + y)))),
{ -- we use the fact that the partial sums of `q` converge locally uniformly to `g`, and that
-- composition passes to the limit under locally uniform convergence.
have B₁ : continuous_at (λ (z : F), g (f x + z)) (f (x + y) - f x),
{ refine continuous_at.comp _ (continuous_const.add continuous_id).continuous_at,
simp only [add_sub_cancel'_right, id.def],
exact Hg.continuous_on.continuous_at (is_open.mem_nhds (emetric.is_open_ball) fy_mem) },
have B₂ : f (x + y) - f x ∈ emetric.ball (0 : F) rg,
by simpa [edist_eq_coe_nnnorm, edist_eq_coe_nnnorm_sub] using fy_mem,
rw [← emetric.is_open_ball.nhds_within_eq B₂] at A,
convert Hg.tendsto_locally_uniformly_on.tendsto_comp B₁.continuous_within_at B₂ A,
simp only [add_sub_cancel'_right] },
-- Third step: the sum over all compositions in `comp_partial_sum_target 0 n n` converges to
-- `g (f (x + y))`. As this sum is exactly the composition of the partial sum, this is a direct
-- consequence of the second step
have C : tendsto (λ n,
∑ i in comp_partial_sum_target 0 n n, q.comp_along_composition p i.2 (λ j, y))
at_top (𝓝 (g (f (x + y)))),
by simpa [comp_partial_sum] using B,
-- Fourth step: the sum over all compositions is `g (f (x + y))`. This follows from the
-- convergence along a subsequence proved in the third step, and the fact that the sum is Cauchy
-- thanks to the summability properties.
have D : has_sum (λ i : (Σ n, composition n),
q.comp_along_composition p i.2 (λ j, y)) (g (f (x + y))),
{ have cau : cauchy_seq (λ (s : finset (Σ n, composition n)),
∑ i in s, q.comp_along_composition p i.2 (λ j, y)),
{ apply cauchy_seq_finset_of_norm_bounded _ (nnreal.summable_coe.2 hr) _,
simp only [coe_nnnorm, nnreal.coe_mul, nnreal.coe_pow],
rintros ⟨n, c⟩,
calc ‖(comp_along_composition q p c) (λ (j : fin n), y)‖
≤ ‖comp_along_composition q p c‖ * ∏ j : fin n, ‖y‖ :
by apply continuous_multilinear_map.le_op_norm
... ≤ ‖comp_along_composition q p c‖ * (r : ℝ) ^ n :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
rw [finset.prod_const, finset.card_fin],
apply pow_le_pow_of_le_left (norm_nonneg _),
rw [emetric.mem_ball, edist_eq_coe_nnnorm] at hy,
have := (le_trans (le_of_lt hy) (min_le_right _ _)),
rwa [ennreal.coe_le_coe, ← nnreal.coe_le_coe, coe_nnnorm] at this
end },
exact tendsto_nhds_of_cauchy_seq_of_subseq cau
comp_partial_sum_target_tendsto_at_top C },
-- Fifth step: the sum over `n` of `q.comp p n` can be expressed as a particular resummation of
-- the sum over all compositions, by grouping together the compositions of the same
-- integer `n`. The convergence of the whole sum therefore implies the converence of the sum
-- of `q.comp p n`
have E : has_sum (λ n, (q.comp p) n (λ j, y)) (g (f (x + y))),
{ apply D.sigma,
assume n,
dsimp [formal_multilinear_series.comp],
convert has_sum_fintype _,
simp only [continuous_multilinear_map.sum_apply],
refl },
exact E
end
/-- If two functions `g` and `f` are analytic respectively at `f x` and `x`, then `g ∘ f` is
analytic at `x`. -/
theorem analytic_at.comp {g : F → G} {f : E → F} {x : E}
(hg : analytic_at 𝕜 g (f x)) (hf : analytic_at 𝕜 f x) : analytic_at 𝕜 (g ∘ f) x :=
let ⟨q, hq⟩ := hg, ⟨p, hp⟩ := hf in (hq.comp hp).analytic_at
/-!
### Associativity of the composition of formal multilinear series
In this paragraph, we prove the associativity of the composition of formal power series.
By definition,
```
(r.comp q).comp p n v
= ∑_{i₁ + ... + iₖ = n} (r.comp q)ₖ (p_{i₁} (v₀, ..., v_{i₁ -1}), p_{i₂} (...), ..., p_{iₖ}(...))
= ∑_{a : composition n} (r.comp q) a.length (apply_composition p a v)
```
decomposing `r.comp q` in the same way, we get
```
(r.comp q).comp p n v
= ∑_{a : composition n} ∑_{b : composition a.length}
r b.length (apply_composition q b (apply_composition p a v))
```
On the other hand,
```
r.comp (q.comp p) n v = ∑_{c : composition n} r c.length (apply_composition (q.comp p) c v)
```
Here, `apply_composition (q.comp p) c v` is a vector of length `c.length`, whose `i`-th term is
given by `(q.comp p) (c.blocks_fun i) (v_l, v_{l+1}, ..., v_{m-1})` where `{l, ..., m-1}` is the
`i`-th block in the composition `c`, of length `c.blocks_fun i` by definition. To compute this term,
we expand it as `∑_{dᵢ : composition (c.blocks_fun i)} q dᵢ.length (apply_composition p dᵢ v')`,
where `v' = (v_l, v_{l+1}, ..., v_{m-1})`. Therefore, we get
```
r.comp (q.comp p) n v =
∑_{c : composition n} ∑_{d₀ : composition (c.blocks_fun 0),
..., d_{c.length - 1} : composition (c.blocks_fun (c.length - 1))}
r c.length (λ i, q dᵢ.length (apply_composition p dᵢ v'ᵢ))
```
To show that these terms coincide, we need to explain how to reindex the sums to put them in
bijection (and then the terms we are summing will correspond to each other). Suppose we have a
composition `a` of `n`, and a composition `b` of `a.length`. Then `b` indicates how to group
together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of blocks
can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by saying that
each `dᵢ` is one single block. Conversely, if one starts from `c` and the `dᵢ`s, one can concatenate
the `dᵢ`s to obtain a composition `a` of `n`, and register the lengths of the `dᵢ`s in a composition
`b` of `a.length`.
An example might be enlightening. Suppose `a = [2, 2, 3, 4, 2]`. It is a composition of
length 5 of 13. The content of the blocks may be represented as `0011222333344`.
Now take `b = [2, 3]` as a composition of `a.length = 5`. It says that the first 2 blocks of `a`
should be merged, and the last 3 blocks of `a` should be merged, giving a new composition of `13`
made of two blocks of length `4` and `9`, i.e., `c = [4, 9]`. But one can also remember that
the new first block was initially made of two blocks of size `2`, so `d₀ = [2, 2]`, and the new
second block was initially made of three blocks of size `3`, `4` and `2`, so `d₁ = [3, 4, 2]`.
This equivalence is called `composition.sigma_equiv_sigma_pi n` below.
We start with preliminary results on compositions, of a very specialized nature, then define the
equivalence `composition.sigma_equiv_sigma_pi n`, and we deduce finally the associativity of
composition of formal multilinear series in `formal_multilinear_series.comp_assoc`.
-/
namespace composition
variable {n : ℕ}
/-- Rewriting equality in the dependent type `Σ (a : composition n), composition a.length)` in
non-dependent terms with lists, requiring that the blocks coincide. -/
lemma sigma_composition_eq_iff (i j : Σ (a : composition n), composition a.length) :
i = j ↔ i.1.blocks = j.1.blocks ∧ i.2.blocks = j.2.blocks :=
begin
refine ⟨by rintro rfl; exact ⟨rfl, rfl⟩, _⟩,
rcases i with ⟨a, b⟩,
rcases j with ⟨a', b'⟩,
rintros ⟨h, h'⟩,
have H : a = a', by { ext1, exact h },
induction H, congr, ext1, exact h'
end
/-- Rewriting equality in the dependent type
`Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)` in
non-dependent terms with lists, requiring that the lists of blocks coincide. -/
lemma sigma_pi_composition_eq_iff
(u v : Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :
u = v ↔ of_fn (λ i, (u.2 i).blocks) = of_fn (λ i, (v.2 i).blocks) :=
begin
refine ⟨λ H, by rw H, λ H, _⟩,
rcases u with ⟨a, b⟩,
rcases v with ⟨a', b'⟩,
dsimp at H,
have h : a = a',
{ ext1,
have : map list.sum (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) =
map list.sum (of_fn (λ (i : fin (composition.length a')), (b' i).blocks)), by rw H,
simp only [map_of_fn] at this,
change of_fn (λ (i : fin (composition.length a)), (b i).blocks.sum) =
of_fn (λ (i : fin (composition.length a')), (b' i).blocks.sum) at this,
simpa [composition.blocks_sum, composition.of_fn_blocks_fun] using this },
induction h,
simp only [true_and, eq_self_iff_true, heq_iff_eq],
ext i : 2,
have : nth_le (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) i (by simp [i.is_lt]) =
nth_le (of_fn (λ (i : fin (composition.length a)), (b' i).blocks)) i (by simp [i.is_lt]) :=
nth_le_of_eq H _,
rwa [nth_le_of_fn, nth_le_of_fn] at this
end
/-- When `a` is a composition of `n` and `b` is a composition of `a.length`, `a.gather b` is the
composition of `n` obtained by gathering all the blocks of `a` corresponding to a block of `b`.
For instance, if `a = [6, 5, 3, 5, 2]` and `b = [2, 3]`, one should gather together
the first two blocks of `a` and its last three blocks, giving `a.gather b = [11, 10]`. -/
def gather (a : composition n) (b : composition a.length) : composition n :=
{ blocks := (a.blocks.split_wrt_composition b).map sum,
blocks_pos :=
begin
rw forall_mem_map_iff,
intros j hj,
suffices H : ∀ i ∈ j, 1 ≤ i, from
calc 0 < j.length : length_pos_of_mem_split_wrt_composition hj
... ≤ j.sum : length_le_sum_of_one_le _ H,
intros i hi,
apply a.one_le_blocks,
rw ← a.blocks.join_split_wrt_composition b,
exact mem_join_of_mem hj hi,
end,
blocks_sum := by { rw [← sum_join, join_split_wrt_composition, a.blocks_sum] } }
lemma length_gather (a : composition n) (b : composition a.length) :
length (a.gather b) = b.length :=
show (map list.sum (a.blocks.split_wrt_composition b)).length = b.blocks.length,
by rw [length_map, length_split_wrt_composition]
/-- An auxiliary function used in the definition of `sigma_equiv_sigma_pi` below, associating to
two compositions `a` of `n` and `b` of `a.length`, and an index `i` bounded by the length of
`a.gather b`, the subcomposition of `a` made of those blocks belonging to the `i`-th block of
`a.gather b`. -/
def sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin (a.gather b).length) :
composition ((a.gather b).blocks_fun i) :=
{ blocks := nth_le (a.blocks.split_wrt_composition b) i
(by { rw [length_split_wrt_composition, ← length_gather], exact i.2 }),
blocks_pos := assume i hi, a.blocks_pos
(by { rw ← a.blocks.join_split_wrt_composition b,
exact mem_join_of_mem (nth_le_mem _ _ _) hi }),
blocks_sum := by simp only [composition.blocks_fun, nth_le_map', composition.gather] }
lemma length_sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin b.length) :
composition.length (composition.sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩) =
composition.blocks_fun b i :=
show list.length (nth_le (split_wrt_composition a.blocks b) i _) = blocks_fun b i,
by { rw [nth_le_map_rev list.length, nth_le_of_eq (map_length_split_wrt_composition _ _)], refl }
lemma blocks_fun_sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin b.length) (j : fin (blocks_fun b i)) :
blocks_fun (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩)
⟨j, (length_sigma_composition_aux a b i).symm ▸ j.2⟩ = blocks_fun a (embedding b i j) :=
show nth_le (nth_le _ _ _) _ _ = nth_le a.blocks _ _,
by { rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take'], refl }
/-- Auxiliary lemma to prove that the composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Grouping together some
blocks of `a` according to `b` as in `a.gather b`, one can compute the total size of the blocks
of `a` up to an index `size_up_to b i + j` (where the `j` corresponds to a set of blocks of `a`
that do not fill a whole block of `a.gather b`). The first part corresponds to a sum of blocks
in `a.gather b`, and the second one to a sum of blocks in the next block of
`sigma_composition_aux a b`. This is the content of this lemma. -/
lemma size_up_to_size_up_to_add (a : composition n) (b : composition a.length)
{i j : ℕ} (hi : i < b.length) (hj : j < blocks_fun b ⟨i, hi⟩) :
size_up_to a (size_up_to b i + j) = size_up_to (a.gather b) i +
(size_up_to (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩) j) :=
begin
induction j with j IHj,
{ show sum (take ((b.blocks.take i).sum) a.blocks) =
sum (take i (map sum (split_wrt_composition a.blocks b))),
induction i with i IH,
{ refl },
{ have A : i < b.length := nat.lt_of_succ_lt hi,
have B : i < list.length (map list.sum (split_wrt_composition a.blocks b)), by simp [A],
have C : 0 < blocks_fun b ⟨i, A⟩ := composition.blocks_pos' _ _ _,
rw [sum_take_succ _ _ B, ← IH A C],
have : take (sum (take i b.blocks)) a.blocks =
take (sum (take i b.blocks)) (take (sum (take (i+1) b.blocks)) a.blocks),
{ rw [take_take, min_eq_left],
apply monotone_sum_take _ (nat.le_succ _) },
rw [this, nth_le_map', nth_le_split_wrt_composition,
← take_append_drop (sum (take i b.blocks))
((take (sum (take (nat.succ i) b.blocks)) a.blocks)), sum_append],
congr,
rw [take_append_drop] } },
{ have A : j < blocks_fun b ⟨i, hi⟩ := lt_trans (lt_add_one j) hj,
have B : j < length (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩),
by { convert A, rw [← length_sigma_composition_aux], refl },
have C : size_up_to b i + j < size_up_to b (i + 1),
{ simp only [size_up_to_succ b hi, add_lt_add_iff_left],
exact A },
have D : size_up_to b i + j < length a := lt_of_lt_of_le C (b.size_up_to_le _),
have : size_up_to b i + nat.succ j = (size_up_to b i + j).succ := rfl,
rw [this, size_up_to_succ _ D, IHj A, size_up_to_succ _ B],
simp only [sigma_composition_aux, add_assoc, add_left_inj, fin.coe_mk],
rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take _ _ C] }
end
/--
Natural equivalence between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, that shows up as a
change of variables in the proof that composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Then `b` indicates how to
group together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of
blocks can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by
saying that each `dᵢ` is one single block. The map `⟨a, b⟩ → ⟨c, (d₀, ..., d_{a.length - 1})⟩` is
the direct map in the equiv.
Conversely, if one starts from `c` and the `dᵢ`s, one can join the `dᵢ`s to obtain a composition
`a` of `n`, and register the lengths of the `dᵢ`s in a composition `b` of `a.length`. This is the
inverse map of the equiv.
-/
def sigma_equiv_sigma_pi (n : ℕ) :
(Σ (a : composition n), composition a.length) ≃
(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :=
{ to_fun := λ i, ⟨i.1.gather i.2, i.1.sigma_composition_aux i.2⟩,
inv_fun := λ i, ⟨
{ blocks := (of_fn (λ j, (i.2 j).blocks)).join,
blocks_pos :=
begin
simp only [and_imp, list.mem_join, exists_imp_distrib, forall_mem_of_fn_iff],
exact λ i j hj, composition.blocks_pos _ hj
end,
blocks_sum := by simp [sum_of_fn, composition.blocks_sum, composition.sum_blocks_fun] },
{ blocks := of_fn (λ j, (i.2 j).length),
blocks_pos := forall_mem_of_fn_iff.2
(λ j, composition.length_pos_of_pos _ (composition.blocks_pos' _ _ _)),
blocks_sum := by { dsimp only [composition.length], simp [sum_of_fn] } }⟩,
left_inv :=
begin
-- the fact that we have a left inverse is essentially `join_split_wrt_composition`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨a, b⟩,
rw sigma_composition_eq_iff,
dsimp,
split,
{ have A := length_map list.sum (split_wrt_composition a.blocks b),
conv_rhs { rw [← join_split_wrt_composition a.blocks b,
← of_fn_nth_le (split_wrt_composition a.blocks b)] },
congr,
{ exact A },
{ exact (fin.heq_fun_iff A).2 (λ i, rfl) } },
{ have B : composition.length (composition.gather a b) = list.length b.blocks :=
composition.length_gather _ _,
conv_rhs { rw [← of_fn_nth_le b.blocks] },
congr' 1,
apply (fin.heq_fun_iff B).2 (λ i, _),
rw [sigma_composition_aux, composition.length, nth_le_map_rev list.length,
nth_le_of_eq (map_length_split_wrt_composition _ _)], refl }
end,
right_inv :=
begin
-- the fact that we have a right inverse is essentially `split_wrt_composition_join`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨c, d⟩,
have : map list.sum (of_fn (λ (i : fin (composition.length c)), (d i).blocks)) = c.blocks,
by simp [map_of_fn, (∘), composition.blocks_sum, composition.of_fn_blocks_fun],
rw sigma_pi_composition_eq_iff,
dsimp,
congr,
{ ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] },
{ rw fin.heq_fun_iff,
{ assume i,
dsimp [composition.sigma_composition_aux],
rw [nth_le_of_eq (split_wrt_composition_join _ _ _)],
{ simp only [nth_le_of_fn'] },
{ simp only [map_of_fn] } },
{ congr,
ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] } }
end }
end composition
namespace formal_multilinear_series
open composition
theorem comp_assoc (r : formal_multilinear_series 𝕜 G H) (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) :
(r.comp q).comp p = r.comp (q.comp p) :=
begin
ext n v,
/- First, rewrite the two compositions appearing in the theorem as two sums over complicated
sigma types, as in the description of the proof above. -/
let f : (Σ (a : composition n), composition a.length) → H :=
λ c, r c.2.length (apply_composition q c.2 (apply_composition p c.1 v)),
let g : (Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) → H :=
λ c, r c.1.length (λ (i : fin c.1.length),
q (c.2 i).length (apply_composition p (c.2 i) (v ∘ c.1.embedding i))),
suffices : ∑ c, f c = ∑ c, g c,
by simpa only [formal_multilinear_series.comp, continuous_multilinear_map.sum_apply,
comp_along_composition_apply, continuous_multilinear_map.map_sum, finset.sum_sigma',
apply_composition],
/- Now, we use `composition.sigma_equiv_sigma_pi n` to change
variables in the second sum, and check that we get exactly the same sums. -/
rw ← (sigma_equiv_sigma_pi n).sum_comp,
/- To check that we have the same terms, we should check that we apply the same component of
`r`, and the same component of `q`, and the same component of `p`, to the same coordinate of
`v`. This is true by definition, but at each step one needs to convince Lean that the types
one considers are the same, using a suitable congruence lemma to avoid dependent type issues.
This dance has to be done three times, one for `r`, one for `q` and one for `p`.-/
apply finset.sum_congr rfl,
rintros ⟨a, b⟩ _,
dsimp [f, g, sigma_equiv_sigma_pi],
-- check that the `r` components are the same. Based on `composition.length_gather`
apply r.congr (composition.length_gather a b).symm,
intros i hi1 hi2,
-- check that the `q` components are the same. Based on `length_sigma_composition_aux`
apply q.congr (length_sigma_composition_aux a b _).symm,
intros j hj1 hj2,
-- check that the `p` components are the same. Based on `blocks_fun_sigma_composition_aux`
apply p.congr (blocks_fun_sigma_composition_aux a b _ _).symm,
intros k hk1 hk2,
-- finally, check that the coordinates of `v` one is using are the same. Based on
-- `size_up_to_size_up_to_add`.
refine congr_arg v (fin.eq_of_veq _),
dsimp [composition.embedding],
rw [size_up_to_size_up_to_add _ _ hi1 hj1, add_assoc],
end
end formal_multilinear_series
|
37a85383b9bc5f0f084ffb1f1667953df07e42f7
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/control/fix.lean
|
d0a1bea6fada355bcbad56a617fecbc53c91dce8
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,945
|
lean
|
/-
Copyright (c) 2020 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Simon Hudon
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.nat.upto
import Mathlib.data.stream.basic
import Mathlib.data.pfun
import Mathlib.PostPort
universes u_3 l u_1 u_2
namespace Mathlib
/-!
# Fixed point
This module defines a generic `fix` operator for defining recursive
computations that are not necessarily well-founded or productive.
An instance is defined for `roption`.
## Main definition
* class `has_fix`
* `roption.fix`
-/
/-- `has_fix α` gives us a way to calculate the fixed point
of function of type `α → α`. -/
class has_fix (α : Type u_3)
where
fix : (α → α) → α
namespace roption
/-- A series of successive, finite approximation of the fixed point of `f`, defined by
`approx f n = f^[n] ⊥`. The limit of this chain is the fixed point of `f`. -/
def fix.approx {α : Type u_1} {β : α → Type u_2} (f : ((a : α) → roption (β a)) → (a : α) → roption (β a)) : stream ((a : α) → roption (β a)) :=
sorry
/-- loop body for finding the fixed point of `f` -/
def fix_aux {α : Type u_1} {β : α → Type u_2} (f : ((a : α) → roption (β a)) → (a : α) → roption (β a)) {p : ℕ → Prop} (i : nat.upto p) (g : (j : nat.upto p) → i < j → (a : α) → roption (β a)) (a : α) : roption (β a) :=
f fun (x : α) => assert (¬p (subtype.val i)) fun (h : ¬p (subtype.val i)) => g (nat.upto.succ i h) sorry x
/-- The least fixed point of `f`.
If `f` is a continuous function (according to complete partial orders),
it satisfies the equations:
1. `fix f = f (fix f)` (is a fixed point)
2. `∀ X, f X ≤ X → fix f ≤ X` (least fixed point)
-/
protected def fix {α : Type u_1} {β : α → Type u_2} (f : ((a : α) → roption (β a)) → (a : α) → roption (β a)) (x : α) : roption (β x) :=
assert (∃ (i : ℕ), dom sorry) fun (h : ∃ (i : ℕ), dom sorry) => well_founded.fix sorry (fix_aux f) nat.upto.zero x
protected theorem fix_def {α : Type u_1} {β : α → Type u_2} (f : ((a : α) → roption (β a)) → (a : α) → roption (β a)) {x : α} (h' : ∃ (i : ℕ), dom (fix.approx f i x)) : roption.fix f x = fix.approx f (Nat.succ (nat.find h')) x := sorry
theorem fix_def' {α : Type u_1} {β : α → Type u_2} (f : ((a : α) → roption (β a)) → (a : α) → roption (β a)) {x : α} (h' : ¬∃ (i : ℕ), dom (fix.approx f i x)) : roption.fix f x = none := sorry
end roption
namespace roption
protected instance has_fix {α : Type u_1} : has_fix (roption α) :=
has_fix.mk fun (f : roption α → roption α) => roption.fix (fun (x : Unit → roption α) (u : Unit) => f (x u)) Unit.unit
end roption
namespace pi
protected instance roption.has_fix {α : Type u_1} {β : Type u_2} : has_fix (α → roption β) :=
has_fix.mk roption.fix
|
c3e39f2c3dcf9a7b561a8d82c2ba42c4867524ef
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/run/section5.lean
|
be38ea3f7551d375ce236be2f060be8c148e8d62
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 254
|
lean
|
section foo
parameter A : Type
variable a : A
definition foo := a
check foo
structure point [class] :=
(x : A) (y : A)
end foo
check foo
definition point_nat [instance] : point nat :=
point.mk nat.zero nat.zero
print classes
check point
|
fed4f4d5e159067f195e1615f9e9d72a50962d80
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/algebra/category/Ring/basic.lean
|
fe15d3d8ab34be950174abd1f382f9c9ea7ae278
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 10,108
|
lean
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Johannes Hölzl, Yury Kudryashov
-/
import algebra.category.Group.basic
import category_theory.concrete_category.reflects_isomorphisms
import category_theory.elementwise
import algebra.ring.equiv
/-!
# Category instances for semiring, ring, comm_semiring, and comm_ring.
We introduce the bundled categories:
* `SemiRing`
* `Ring`
* `CommSemiRing`
* `CommRing`
along with the relevant forgetful functors between them.
-/
universes u v
open category_theory
/-- The category of semirings. -/
def SemiRing : Type (u+1) := bundled semiring
namespace SemiRing
/-- `ring_hom` doesn't actually assume associativity. This alias is needed to make the category
theory machinery work. We use the same trick in `category_theory.Mon.assoc_monoid_hom`. -/
abbreviation assoc_ring_hom (M N : Type*) [semiring M] [semiring N] := ring_hom M N
instance bundled_hom : bundled_hom assoc_ring_hom :=
⟨λ M N [semiring M] [semiring N], by exactI @ring_hom.to_fun M N _ _,
λ M [semiring M], by exactI @ring_hom.id M _,
λ M N P [semiring M] [semiring N] [semiring P], by exactI @ring_hom.comp M N P _ _ _,
λ M N [semiring M] [semiring N], by exactI @ring_hom.coe_inj M N _ _⟩
attribute [derive [large_category, concrete_category]] SemiRing
instance : has_coe_to_sort SemiRing Type* := bundled.has_coe_to_sort
/-- Construct a bundled SemiRing from the underlying type and typeclass. -/
def of (R : Type u) [semiring R] : SemiRing := bundled.of R
/-- Typecheck a `ring_hom` as a morphism in `SemiRing`. -/
def of_hom {R S : Type u} [semiring R] [semiring S] (f : R →+* S) : of R ⟶ of S := f
@[simp] lemma of_hom_apply {R S : Type u} [semiring R] [semiring S] (f : R →+* S) (x : R) :
of_hom f x = f x := rfl
instance : inhabited SemiRing := ⟨of punit⟩
instance (R : SemiRing) : semiring R := R.str
@[simp] lemma coe_of (R : Type u) [semiring R] : (SemiRing.of R : Type u) = R := rfl
instance has_forget_to_Mon : has_forget₂ SemiRing Mon :=
bundled_hom.mk_has_forget₂
(λ R hR, @monoid_with_zero.to_monoid R (@semiring.to_monoid_with_zero R hR))
(λ R₁ R₂, ring_hom.to_monoid_hom) (λ _ _ _, rfl)
instance has_forget_to_AddCommMon : has_forget₂ SemiRing AddCommMon :=
-- can't use bundled_hom.mk_has_forget₂, since AddCommMon is an induced category
{ forget₂ :=
{ obj := λ R, AddCommMon.of R,
map := λ R₁ R₂ f, ring_hom.to_add_monoid_hom f } }
end SemiRing
/-- The category of rings. -/
def Ring : Type (u+1) := bundled ring
namespace Ring
instance : bundled_hom.parent_projection @ring.to_semiring := ⟨⟩
attribute [derive [(λ Ring, has_coe_to_sort Ring Type*), large_category, concrete_category]] Ring
/-- Construct a bundled Ring from the underlying type and typeclass. -/
def of (R : Type u) [ring R] : Ring := bundled.of R
/-- Typecheck a `ring_hom` as a morphism in `Ring`. -/
def of_hom {R S : Type u} [ring R] [ring S] (f : R →+* S) : of R ⟶ of S := f
@[simp] lemma of_hom_apply {R S : Type u} [ring R] [ring S] (f : R →+* S) (x : R) :
of_hom f x = f x := rfl
instance : inhabited Ring := ⟨of punit⟩
instance (R : Ring) : ring R := R.str
@[simp] lemma coe_of (R : Type u) [ring R] : (Ring.of R : Type u) = R := rfl
instance has_forget_to_SemiRing : has_forget₂ Ring SemiRing := bundled_hom.forget₂ _ _
instance has_forget_to_AddCommGroup : has_forget₂ Ring AddCommGroup :=
-- can't use bundled_hom.mk_has_forget₂, since AddCommGroup is an induced category
{ forget₂ :=
{ obj := λ R, AddCommGroup.of R,
map := λ R₁ R₂ f, ring_hom.to_add_monoid_hom f } }
end Ring
/-- The category of commutative semirings. -/
def CommSemiRing : Type (u+1) := bundled comm_semiring
namespace CommSemiRing
instance : bundled_hom.parent_projection @comm_semiring.to_semiring := ⟨⟩
attribute [derive [large_category, concrete_category]] CommSemiRing
instance : has_coe_to_sort CommSemiRing Type* := bundled.has_coe_to_sort
/-- Construct a bundled CommSemiRing from the underlying type and typeclass. -/
def of (R : Type u) [comm_semiring R] : CommSemiRing := bundled.of R
/-- Typecheck a `ring_hom` as a morphism in `CommSemiRing`. -/
def of_hom {R S : Type u} [comm_semiring R] [comm_semiring S] (f : R →+* S) : of R ⟶ of S := f
@[simp]
lemma of_hom_apply {R S : Type u} [comm_semiring R] [comm_semiring S] (f : R →+* S) (x : R) :
of_hom f x = f x := rfl
instance : inhabited CommSemiRing := ⟨of punit⟩
instance (R : CommSemiRing) : comm_semiring R := R.str
@[simp] lemma coe_of (R : Type u) [comm_semiring R] : (CommSemiRing.of R : Type u) = R := rfl
instance has_forget_to_SemiRing : has_forget₂ CommSemiRing SemiRing := bundled_hom.forget₂ _ _
/-- The forgetful functor from commutative rings to (multiplicative) commutative monoids. -/
instance has_forget_to_CommMon : has_forget₂ CommSemiRing CommMon :=
has_forget₂.mk'
(λ R : CommSemiRing, CommMon.of R) (λ R, rfl)
(λ R₁ R₂ f, f.to_monoid_hom) (by tidy)
end CommSemiRing
/-- The category of commutative rings. -/
def CommRing : Type (u+1) := bundled comm_ring
namespace CommRing
instance : bundled_hom.parent_projection @comm_ring.to_ring := ⟨⟩
attribute [derive [large_category, concrete_category]] CommRing
instance : has_coe_to_sort CommRing Type* := bundled.has_coe_to_sort
/-- Construct a bundled CommRing from the underlying type and typeclass. -/
def of (R : Type u) [comm_ring R] : CommRing := bundled.of R
/-- Typecheck a `ring_hom` as a morphism in `CommRing`. -/
def of_hom {R S : Type u} [comm_ring R] [comm_ring S] (f : R →+* S) : of R ⟶ of S := f
@[simp] lemma of_hom_apply {R S : Type u} [comm_ring R] [comm_ring S] (f : R →+* S) (x : R) :
of_hom f x = f x := rfl
instance : inhabited CommRing := ⟨of punit⟩
instance (R : CommRing) : comm_ring R := R.str
@[simp] lemma coe_of (R : Type u) [comm_ring R] : (CommRing.of R : Type u) = R := rfl
instance has_forget_to_Ring : has_forget₂ CommRing Ring := bundled_hom.forget₂ _ _
/-- The forgetful functor from commutative rings to (multiplicative) commutative monoids. -/
instance has_forget_to_CommSemiRing : has_forget₂ CommRing CommSemiRing :=
has_forget₂.mk' (λ R : CommRing, CommSemiRing.of R) (λ R, rfl) (λ R₁ R₂ f, f) (by tidy)
instance : full (forget₂ CommRing CommSemiRing) :=
{ preimage := λ X Y f, f, }
end CommRing
-- This example verifies an improvement possible in Lean 3.8.
-- Before that, to have `add_ring_hom.map_zero` usable by `simp` here,
-- we had to mark all the concrete category `has_coe_to_sort` instances reducible.
-- Now, it just works.
example {R S : CommRing} (i : R ⟶ S) (r : R) (h : r = 0) : i r = 0 :=
by simp [h]
namespace ring_equiv
variables {X Y : Type u}
/-- Build an isomorphism in the category `Ring` from a `ring_equiv` between `ring`s. -/
@[simps] def to_Ring_iso [ring X] [ring Y] (e : X ≃+* Y) : Ring.of X ≅ Ring.of Y :=
{ hom := e.to_ring_hom,
inv := e.symm.to_ring_hom }
/-- Build an isomorphism in the category `CommRing` from a `ring_equiv` between `comm_ring`s. -/
@[simps] def to_CommRing_iso [comm_ring X] [comm_ring Y] (e : X ≃+* Y) :
CommRing.of X ≅ CommRing.of Y :=
{ hom := e.to_ring_hom,
inv := e.symm.to_ring_hom }
end ring_equiv
namespace category_theory.iso
/-- Build a `ring_equiv` from an isomorphism in the category `Ring`. -/
def Ring_iso_to_ring_equiv {X Y : Ring} (i : X ≅ Y) : X ≃+* Y :=
{ to_fun := i.hom,
inv_fun := i.inv,
left_inv := by tidy,
right_inv := by tidy,
map_add' := by tidy,
map_mul' := by tidy }.
/-- Build a `ring_equiv` from an isomorphism in the category `CommRing`. -/
def CommRing_iso_to_ring_equiv {X Y : CommRing} (i : X ≅ Y) : X ≃+* Y :=
{ to_fun := i.hom,
inv_fun := i.inv,
left_inv := by tidy,
right_inv := by tidy,
map_add' := by tidy,
map_mul' := by tidy }.
@[simp]
lemma CommRing_iso_to_ring_equiv_to_ring_hom {X Y : CommRing} (i : X ≅ Y) :
i.CommRing_iso_to_ring_equiv.to_ring_hom = i.hom := by { ext, refl }
@[simp]
lemma CommRing_iso_to_ring_equiv_symm_to_ring_hom {X Y : CommRing} (i : X ≅ Y) :
i.CommRing_iso_to_ring_equiv.symm.to_ring_hom = i.inv := by { ext, refl }
end category_theory.iso
/-- Ring equivalences between `ring`s are the same as (isomorphic to) isomorphisms in `Ring`. -/
def ring_equiv_iso_Ring_iso {X Y : Type u} [ring X] [ring Y] :
(X ≃+* Y) ≅ (Ring.of X ≅ Ring.of Y) :=
{ hom := λ e, e.to_Ring_iso,
inv := λ i, i.Ring_iso_to_ring_equiv, }
/-- Ring equivalences between `comm_ring`s are the same as (isomorphic to) isomorphisms
in `CommRing`. -/
def ring_equiv_iso_CommRing_iso {X Y : Type u} [comm_ring X] [comm_ring Y] :
(X ≃+* Y) ≅ (CommRing.of X ≅ CommRing.of Y) :=
{ hom := λ e, e.to_CommRing_iso,
inv := λ i, i.CommRing_iso_to_ring_equiv, }
instance Ring.forget_reflects_isos : reflects_isomorphisms (forget Ring.{u}) :=
{ reflects := λ X Y f _,
begin
resetI,
let i := as_iso ((forget Ring).map f),
let e : X ≃+* Y := { ..f, ..i.to_equiv },
exact ⟨(is_iso.of_iso e.to_Ring_iso).1⟩,
end }
instance CommRing.forget_reflects_isos : reflects_isomorphisms (forget CommRing.{u}) :=
{ reflects := λ X Y f _,
begin
resetI,
let i := as_iso ((forget CommRing).map f),
let e : X ≃+* Y := { ..f, ..i.to_equiv },
exact ⟨(is_iso.of_iso e.to_CommRing_iso).1⟩,
end }
lemma CommRing.comp_eq_ring_hom_comp {R S T : CommRing} (f : R ⟶ S) (g : S ⟶ T) :
f ≫ g = g.comp f := rfl
lemma CommRing.ring_hom_comp_eq_comp {R S T : Type*} [comm_ring R] [comm_ring S]
[comm_ring T] (f : R →+* S) (g : S →+* T) :
g.comp f = CommRing.of_hom f ≫ CommRing.of_hom g := rfl
-- It would be nice if we could have the following,
-- but it requires making `reflects_isomorphisms_forget₂` an instance,
-- which can cause typeclass loops:
local attribute [priority 50,instance] reflects_isomorphisms_forget₂
example : reflects_isomorphisms (forget₂ Ring AddCommGroup) := by apply_instance
|
7ab7151c78d4692c631cd052a330c4c90796af5a
|
618003631150032a5676f229d13a079ac875ff77
|
/src/topology/instances/ennreal.lean
|
63caca6103efab73cd1b277f10192052b8c75d78
|
[
"Apache-2.0"
] |
permissive
|
awainverse/mathlib
|
939b68c8486df66cfda64d327ad3d9165248c777
|
ea76bd8f3ca0a8bf0a166a06a475b10663dec44a
|
refs/heads/master
| 1,659,592,962,036
| 1,590,987,592,000
| 1,590,987,592,000
| 268,436,019
| 1
| 0
|
Apache-2.0
| 1,590,990,500,000
| 1,590,990,500,000
| null |
UTF-8
|
Lean
| false
| false
| 38,526
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Johannes Hölzl
-/
import topology.instances.nnreal
/-!
# Extended non-negative reals
-/
noncomputable theory
open classical set filter metric
open_locale classical
open_locale topological_space
variables {α : Type*} {β : Type*} {γ : Type*}
open_locale ennreal big_operators
namespace ennreal
variables {a b c d : ennreal} {r p q : nnreal}
variables {x y z : ennreal} {ε ε₁ ε₂ : ennreal} {s : set ennreal}
section topological_space
open topological_space
/-- Topology on `ennreal`.
Note: this is different from the `emetric_space` topology. The `emetric_space` topology has
`is_open {⊤}`, while this topology doesn't have singleton elements. -/
instance : topological_space ennreal := preorder.topology ennreal
instance : order_topology ennreal := ⟨rfl⟩
instance : t2_space ennreal := by apply_instance -- short-circuit type class inference
instance : second_countable_topology ennreal :=
⟨⟨⋃q ≥ (0:ℚ), {{a : ennreal | a < nnreal.of_real q}, {a : ennreal | ↑(nnreal.of_real q) < a}},
(countable_encodable _).bUnion $ assume a ha, (countable_singleton _).insert _,
le_antisymm
(le_generate_from $ by simp [or_imp_distrib, is_open_lt', is_open_gt'] {contextual := tt})
(le_generate_from $ λ s h, begin
rcases h with ⟨a, hs | hs⟩;
[ rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ a < nnreal.of_real q}, {b | ↑(nnreal.of_real q) < b},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn a b, and_assoc]),
rw show s = ⋃q∈{q:ℚ | 0 ≤ q ∧ ↑(nnreal.of_real q) < a}, {b | b < ↑(nnreal.of_real q)},
from set.ext (assume b, by simp [hs, @ennreal.lt_iff_exists_rat_btwn b a, and_comm, and_assoc])];
{ apply is_open_Union, intro q,
apply is_open_Union, intro hq,
exact generate_open.basic _ (mem_bUnion hq.1 $ by simp) }
end)⟩⟩
lemma embedding_coe : embedding (coe : nnreal → ennreal) :=
⟨⟨begin
refine le_antisymm _ _,
{ rw [@order_topology.topology_eq_generate_intervals ennreal _,
← coinduced_le_iff_le_induced],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
show is_open {b : nnreal | a < ↑b},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_lt'] },
show is_open {b : nnreal | ↑b < a},
{ cases a; simp [none_eq_top, some_eq_coe, is_open_gt', is_open_const] } },
{ rw [@order_topology.topology_eq_generate_intervals nnreal _],
refine le_generate_from (assume s ha, _),
rcases ha with ⟨a, rfl | rfl⟩,
exact ⟨Ioi a, is_open_Ioi, by simp [Ioi]⟩,
exact ⟨Iio a, is_open_Iio, by simp [Iio]⟩ }
end⟩,
assume a b, coe_eq_coe.1⟩
lemma is_open_ne_top : is_open {a : ennreal | a ≠ ⊤} := is_open_ne
lemma is_open_Ico_zero : is_open (Ico 0 b) := by { rw ennreal.Ico_eq_Iio, exact is_open_Iio}
lemma coe_range_mem_nhds : range (coe : nnreal → ennreal) ∈ 𝓝 (r : ennreal) :=
have {a : ennreal | a ≠ ⊤} = range (coe : nnreal → ennreal),
from set.ext $ assume a, by cases a; simp [none_eq_top, some_eq_coe],
this ▸ mem_nhds_sets is_open_ne_top coe_ne_top
@[norm_cast] lemma tendsto_coe {f : filter α} {m : α → nnreal} {a : nnreal} :
tendsto (λa, (m a : ennreal)) f (𝓝 ↑a) ↔ tendsto m f (𝓝 a) :=
embedding_coe.tendsto_nhds_iff.symm
lemma continuous_coe {α} [topological_space α] {f : α → nnreal} :
continuous (λa, (f a : ennreal)) ↔ continuous f :=
embedding_coe.continuous_iff.symm
lemma nhds_coe {r : nnreal} : 𝓝 (r : ennreal) = (𝓝 r).map coe :=
by rw [embedding_coe.induced, map_nhds_induced_eq coe_range_mem_nhds]
lemma nhds_coe_coe {r p : nnreal} : 𝓝 ((r : ennreal), (p : ennreal)) =
(𝓝 (r, p)).map (λp:nnreal×nnreal, (p.1, p.2)) :=
begin
rw [(embedding_coe.prod_mk embedding_coe).map_nhds_eq],
rw [← prod_range_range_eq],
exact prod_mem_nhds_sets coe_range_mem_nhds coe_range_mem_nhds
end
lemma continuous_of_real : continuous ennreal.of_real :=
(continuous_coe.2 continuous_id).comp nnreal.continuous_of_real
lemma tendsto_of_real {f : filter α} {m : α → ℝ} {a : ℝ} (h : tendsto m f (𝓝 a)) :
tendsto (λa, ennreal.of_real (m a)) f (𝓝 (ennreal.of_real a)) :=
tendsto.comp (continuous.tendsto continuous_of_real _) h
lemma tendsto_to_nnreal {a : ennreal} : a ≠ ⊤ →
tendsto (ennreal.to_nnreal) (𝓝 a) (𝓝 a.to_nnreal) :=
begin
cases a; simp [some_eq_coe, none_eq_top, nhds_coe, tendsto_map'_iff, (∘)],
exact tendsto_id
end
lemma continuous_on_to_nnreal : continuous_on ennreal.to_nnreal {a | a ≠ ∞} :=
continuous_on_iff_continuous_restrict.2 $ continuous_iff_continuous_at.2 $ λ x,
(tendsto_to_nnreal x.2).comp continuous_at_subtype_val
lemma tendsto_to_real {a : ennreal} : a ≠ ⊤ → tendsto (ennreal.to_real) (𝓝 a) (𝓝 a.to_real) :=
λ ha, tendsto.comp ((@nnreal.tendsto_coe _ (𝓝 a.to_nnreal) id (a.to_nnreal)).2 tendsto_id)
(tendsto_to_nnreal ha)
lemma tendsto_nhds_top {m : α → ennreal} {f : filter α}
(h : ∀ n : ℕ, ∀ᶠ a in f, ↑n < m a) : tendsto m f (𝓝 ⊤) :=
tendsto_nhds_generate_from $ assume s hs,
match s, hs with
| _, ⟨none, or.inl rfl⟩, hr := (lt_irrefl ⊤ hr).elim
| _, ⟨some r, or.inl rfl⟩, hr :=
let ⟨n, hrn⟩ := exists_nat_gt r in
mem_sets_of_superset (h n) $ assume a hnma, show ↑r < m a, from
lt_trans (show (r : ennreal) < n, from (coe_nat n) ▸ coe_lt_coe.2 hrn) hnma
| _, ⟨a, or.inr rfl⟩, hr := (not_top_lt $ show ⊤ < a, from hr).elim
end
lemma tendsto_nat_nhds_top : tendsto (λ n : ℕ, ↑n) at_top (𝓝 ∞) :=
tendsto_nhds_top $ λ n, mem_at_top_sets.2
⟨n+1, λ m hm, ennreal.coe_nat_lt_coe_nat.2 $ nat.lt_of_succ_le hm⟩
lemma nhds_top : 𝓝 ∞ = ⨅a ≠ ∞, principal (Ioi a) :=
nhds_top_order.trans $ by simp [lt_top_iff_ne_top, Ioi]
lemma nhds_zero : 𝓝 (0 : ennreal) = ⨅a ≠ 0, principal (Iio a) :=
nhds_bot_order.trans $ by simp [bot_lt_iff_ne_bot, Iio]
/-- The set of finite `ennreal` numbers is homeomorphic to `nnreal`. -/
def ne_top_homeomorph_nnreal : {a | a ≠ ∞} ≃ₜ nnreal :=
{ to_fun := λ x, ennreal.to_nnreal x,
inv_fun := λ x, ⟨x, coe_ne_top⟩,
left_inv := λ ⟨x, hx⟩, subtype.eq $ coe_to_nnreal hx,
right_inv := λ x, to_nnreal_coe,
continuous_to_fun := continuous_on_iff_continuous_restrict.1 continuous_on_to_nnreal,
continuous_inv_fun := continuous_subtype_mk _ (continuous_coe.2 continuous_id) }
/-- The set of finite `ennreal` numbers is homeomorphic to `nnreal`. -/
def lt_top_homeomorph_nnreal : {a | a < ∞} ≃ₜ nnreal :=
by refine (homeomorph.set_congr $ set.ext $ λ x, _).trans ne_top_homeomorph_nnreal;
simp only [mem_set_of_eq, lt_top_iff_ne_top]
-- using Icc because
-- • don't have 'Ioo (x - ε) (x + ε) ∈ 𝓝 x' unless x > 0
-- • (x - y ≤ ε ↔ x ≤ ε + y) is true, while (x - y < ε ↔ x < ε + y) is not
lemma Icc_mem_nhds : x ≠ ⊤ → ε > 0 → Icc (x - ε) (x + ε) ∈ 𝓝 x :=
begin
assume xt ε0, rw mem_nhds_sets_iff,
by_cases x0 : x = 0,
{ use Iio (x + ε),
have : Iio (x + ε) ⊆ Icc (x - ε) (x + ε), assume a, rw x0, simpa using le_of_lt,
use this, exact ⟨is_open_Iio, mem_Iio_self_add xt ε0⟩ },
{ use Ioo (x - ε) (x + ε), use Ioo_subset_Icc_self,
exact ⟨is_open_Ioo, mem_Ioo_self_sub_add xt x0 ε0 ε0 ⟩ }
end
lemma nhds_of_ne_top : x ≠ ⊤ → 𝓝 x = ⨅ε > 0, principal (Icc (x - ε) (x + ε)) :=
begin
assume xt, refine le_antisymm _ _,
-- first direction
simp only [le_infi_iff, le_principal_iff], assume ε ε0, exact Icc_mem_nhds xt ε0,
-- second direction
rw nhds_generate_from, refine le_infi (assume s, le_infi $ assume hs, _),
simp only [mem_set_of_eq] at hs, rcases hs with ⟨xs, ⟨a, ha⟩⟩,
cases ha,
{ rw ha at *,
rcases dense xs with ⟨b, ⟨ab, bx⟩⟩,
have xb_pos : x - b > 0 := zero_lt_sub_iff_lt.2 bx,
have xxb : x - (x - b) = b := sub_sub_cancel (by rwa lt_top_iff_ne_top) (le_of_lt bx),
refine infi_le_of_le (x - b) (infi_le_of_le xb_pos _),
simp only [mem_principal_sets, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xxb at h₁, calc a < b : ab ... ≤ y : h₁ },
{ rw ha at *,
rcases dense xs with ⟨b, ⟨xb, ba⟩⟩,
have bx_pos : b - x > 0 := zero_lt_sub_iff_lt.2 xb,
have xbx : x + (b - x) = b := add_sub_cancel_of_le (le_of_lt xb),
refine infi_le_of_le (b - x) (infi_le_of_le bx_pos _),
simp only [mem_principal_sets, le_principal_iff],
assume y, rintros ⟨h₁, h₂⟩, rw xbx at h₂, calc y ≤ b : h₂ ... < a : ba },
end
/-- Characterization of neighborhoods for `ennreal` numbers. See also `tendsto_order`
for a version with strict inequalities. -/
protected theorem tendsto_nhds {f : filter α} {u : α → ennreal} {a : ennreal} (ha : a ≠ ⊤) :
tendsto u f (𝓝 a) ↔ ∀ ε > 0, ∀ᶠ x in f, (u x) ∈ Icc (a - ε) (a + ε) :=
by simp only [nhds_of_ne_top ha, tendsto_infi, tendsto_principal, mem_Icc]
protected lemma tendsto_at_top [nonempty β] [semilattice_sup β] {f : β → ennreal} {a : ennreal}
(ha : a ≠ ⊤) : tendsto f at_top (𝓝 a) ↔ ∀ε>0, ∃N, ∀n≥N, (f n) ∈ Icc (a - ε) (a + ε) :=
by simp only [ennreal.tendsto_nhds ha, mem_at_top_sets, mem_set_of_eq, filter.eventually]
lemma tendsto_coe_nnreal_nhds_top {α} {l : filter α} {f : α → nnreal} (h : tendsto f l at_top) :
tendsto (λa, (f a : ennreal)) l (𝓝 ∞) :=
tendsto_nhds_top $ assume n,
have ∀ᶠ a in l, ↑(n+1) ≤ f a := h $ mem_at_top _,
mem_sets_of_superset this $ assume a (ha : ↑(n+1) ≤ f a),
begin
rw [← coe_nat],
dsimp,
exact coe_lt_coe.2 (lt_of_lt_of_le (nat.cast_lt.2 (nat.lt_succ_self _)) ha)
end
instance : topological_add_monoid ennreal :=
⟨ continuous_iff_continuous_at.2 $
have hl : ∀a:ennreal, tendsto (λ (p : ennreal × ennreal), p.fst + p.snd) (𝓝 (⊤, a)) (𝓝 ⊤), from
assume a, tendsto_nhds_top $ assume n,
have set.prod {a | ↑n < a } univ ∈ 𝓝 ((⊤:ennreal), a), from
prod_mem_nhds_sets (lt_mem_nhds $ coe_nat n ▸ coe_lt_top) univ_mem_sets,
show {a : ennreal × ennreal | ↑n < a.fst + a.snd} ∈ 𝓝 (⊤, a),
begin filter_upwards [this] assume ⟨a₁, a₂⟩ ⟨h₁, h₂⟩, lt_of_lt_of_le h₁ (le_add_right $ le_refl _) end,
begin
rintro ⟨a₁, a₂⟩,
cases a₁, { simp [continuous_at, none_eq_top, hl a₂], },
cases a₂, { simp [continuous_at, none_eq_top, some_eq_coe, nhds_swap (a₁ : ennreal) ⊤,
tendsto_map'_iff, (∘)], convert hl a₁, simp [add_comm] },
simp [continuous_at, some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_add.symm, tendsto_coe, tendsto_add]
end ⟩
protected lemma tendsto_mul (ha : a ≠ 0 ∨ b ≠ ⊤) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λp:ennreal×ennreal, p.1 * p.2) (𝓝 (a, b)) (𝓝 (a * b)) :=
have ht : ∀b:ennreal, b ≠ 0 → tendsto (λp:ennreal×ennreal, p.1 * p.2) (𝓝 ((⊤:ennreal), b)) (𝓝 ⊤),
begin
refine assume b hb, tendsto_nhds_top $ assume n, _,
rcases dense (zero_lt_iff_ne_zero.2 hb) with ⟨ε', hε', hεb'⟩,
rcases ennreal.lt_iff_exists_coe.1 hεb' with ⟨ε, rfl, h⟩,
rcases exists_nat_gt (↑n / ε) with ⟨m, hm⟩,
have hε : ε > 0, from coe_lt_coe.1 hε',
refine mem_sets_of_superset (prod_mem_nhds_sets (lt_mem_nhds $ @coe_lt_top m) (lt_mem_nhds $ h)) _,
rintros ⟨a₁, a₂⟩ ⟨h₁, h₂⟩,
dsimp at h₁ h₂ ⊢,
calc (n:ennreal) = ↑(((n:nnreal) / ε) * ε) :
begin
simp [nnreal.div_def],
rw [mul_assoc, ← coe_mul, nnreal.inv_mul_cancel, coe_one, ← coe_nat, mul_one],
exact zero_lt_iff_ne_zero.1 hε
end
... < (↑m * ε : nnreal) : coe_lt_coe.2 $ mul_lt_mul hm (le_refl _) hε (nat.cast_nonneg _)
... ≤ a₁ * a₂ : by rw [coe_mul]; exact canonically_ordered_semiring.mul_le_mul
(le_of_lt h₁)
(le_of_lt h₂)
end,
begin
cases a, {simp [none_eq_top] at hb, simp [none_eq_top, ht b hb, top_mul, hb] },
cases b, {
simp [none_eq_top] at ha,
simp [*, nhds_swap (a : ennreal) ⊤, none_eq_top, some_eq_coe, top_mul, tendsto_map'_iff, (∘), mul_comm] },
simp [some_eq_coe, nhds_coe_coe, tendsto_map'_iff, (∘)],
simp only [coe_mul.symm, tendsto_coe, tendsto_mul]
end
protected lemma tendsto.mul {f : filter α} {ma : α → ennreal} {mb : α → ennreal} {a b : ennreal}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) :
tendsto (λa, ma a * mb a) f (𝓝 (a * b)) :=
show tendsto ((λp:ennreal×ennreal, p.1 * p.2) ∘ (λa, (ma a, mb a))) f (𝓝 (a * b)), from
tendsto.comp (ennreal.tendsto_mul ha hb) (hma.prod_mk_nhds hmb)
protected lemma tendsto.const_mul {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ 0 ∨ a ≠ ⊤) : tendsto (λb, a * m b) f (𝓝 (a * b)) :=
by_cases
(assume : a = 0, by simp [this, tendsto_const_nhds])
(assume ha : a ≠ 0, ennreal.tendsto.mul tendsto_const_nhds (or.inl ha) hm hb)
protected lemma tendsto.mul_const {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ ⊤) : tendsto (λx, m x * b) f (𝓝 (a * b)) :=
by simpa only [mul_comm] using ennreal.tendsto.const_mul hm ha
protected lemma continuous_const_mul {a : ennreal} (ha : a < ⊤) : continuous ((*) a) :=
continuous_iff_continuous_at.2 $ λ x, tendsto.const_mul tendsto_id $ or.inr $ ne_of_lt ha
protected lemma continuous_mul_const {a : ennreal} (ha : a < ⊤) : continuous (λ x, x * a) :=
by simpa only [mul_comm] using ennreal.continuous_const_mul ha
protected lemma continuous_inv : continuous (has_inv.inv : ennreal → ennreal) :=
continuous_iff_continuous_at.2 $ λ a, tendsto_order.2
⟨begin
assume b hb,
simp only [@ennreal.lt_inv_iff_lt_inv b],
exact gt_mem_nhds (ennreal.lt_inv_iff_lt_inv.1 hb),
end,
begin
assume b hb,
simp only [gt_iff_lt, @ennreal.inv_lt_iff_inv_lt _ b],
exact lt_mem_nhds (ennreal.inv_lt_iff_inv_lt.1 hb)
end⟩
@[simp] protected lemma tendsto_inv_iff {f : filter α} {m : α → ennreal} {a : ennreal} :
tendsto (λ x, (m x)⁻¹) f (𝓝 a⁻¹) ↔ tendsto m f (𝓝 a) :=
⟨λ h, by simpa only [function.comp, ennreal.inv_inv]
using (ennreal.continuous_inv.tendsto a⁻¹).comp h,
(ennreal.continuous_inv.tendsto a).comp⟩
protected lemma tendsto.div {f : filter α} {ma : α → ennreal} {mb : α → ennreal} {a b : ennreal}
(hma : tendsto ma f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) (hmb : tendsto mb f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) :
tendsto (λa, ma a / mb a) f (𝓝 (a / b)) :=
by { apply tendsto.mul hma _ (ennreal.tendsto_inv_iff.2 hmb) _; simp [ha, hb] }
protected lemma tendsto.const_div {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 b)) (hb : b ≠ ⊤ ∨ a ≠ ⊤) : tendsto (λb, a / m b) f (𝓝 (a / b)) :=
by { apply tendsto.const_mul (ennreal.tendsto_inv_iff.2 hm), simp [hb] }
protected lemma tendsto.div_const {f : filter α} {m : α → ennreal} {a b : ennreal}
(hm : tendsto m f (𝓝 a)) (ha : a ≠ 0 ∨ b ≠ 0) : tendsto (λx, m x / b) f (𝓝 (a / b)) :=
by { apply tendsto.mul_const hm, simp [ha] }
protected lemma tendsto_inv_nat_nhds_zero : tendsto (λ n : ℕ, (n : ennreal)⁻¹) at_top (𝓝 0) :=
ennreal.inv_top ▸ ennreal.tendsto_inv_iff.2 tendsto_nat_nhds_top
lemma Sup_add {s : set ennreal} (hs : s.nonempty) : Sup s + a = ⨆b∈s, b + a :=
have Sup ((λb, b + a) '' s) = Sup s + a,
from is_lub.Sup_eq (is_lub_of_is_lub_of_tendsto
(assume x _ y _ h, add_le_add' h (le_refl _))
(is_lub_Sup s)
hs
(tendsto.add (tendsto_id' inf_le_left) tendsto_const_nhds)),
by simp [Sup_image, -add_comm] at this; exact this.symm
lemma supr_add {ι : Sort*} {s : ι → ennreal} [h : nonempty ι] : supr s + a = ⨆b, s b + a :=
let ⟨x⟩ := h in
calc supr s + a = Sup (range s) + a : by simp [Sup_range]
... = (⨆b∈range s, b + a) : Sup_add ⟨s x, x, rfl⟩
... = _ : supr_range
lemma add_supr {ι : Sort*} {s : ι → ennreal} [h : nonempty ι] : a + supr s = ⨆b, a + s b :=
by rw [add_comm, supr_add]; simp [add_comm]
lemma supr_add_supr {ι : Sort*} {f g : ι → ennreal} (h : ∀i j, ∃k, f i + g j ≤ f k + g k) :
supr f + supr g = (⨆ a, f a + g a) :=
begin
by_cases hι : nonempty ι,
{ letI := hι,
refine le_antisymm _ (supr_le $ λ a, add_le_add' (le_supr _ _) (le_supr _ _)),
simpa [add_supr, supr_add] using
λ i j:ι, show f i + g j ≤ ⨆ a, f a + g a, from
let ⟨k, hk⟩ := h i j in le_supr_of_le k hk },
{ have : ∀f:ι → ennreal, (⨆i, f i) = 0 := assume f, bot_unique (supr_le $ assume i, (hι ⟨i⟩).elim),
rw [this, this, this, zero_add] }
end
lemma supr_add_supr_of_monotone {ι : Sort*} [semilattice_sup ι]
{f g : ι → ennreal} (hf : monotone f) (hg : monotone g) :
supr f + supr g = (⨆ a, f a + g a) :=
supr_add_supr $ assume i j, ⟨i ⊔ j, add_le_add' (hf $ le_sup_left) (hg $ le_sup_right)⟩
lemma finset_sum_supr_nat {α} {ι} [semilattice_sup ι] {s : finset α} {f : α → ι → ennreal}
(hf : ∀a, monotone (f a)) :
s.sum (λa, supr (f a)) = (⨆ n, s.sum (λa, f a n)) :=
begin
refine finset.induction_on s _ _,
{ simp,
exact (bot_unique $ supr_le $ assume i, le_refl ⊥).symm },
{ assume a s has ih,
simp only [finset.sum_insert has],
rw [ih, supr_add_supr_of_monotone (hf a)],
assume i j h,
exact (finset.sum_le_sum $ assume a ha, hf a h) }
end
section priority
-- for some reason the next proof fails without changing the priority of this instance
local attribute [instance, priority 1000] classical.prop_decidable
lemma mul_Sup {s : set ennreal} {a : ennreal} : a * Sup s = ⨆i∈s, a * i :=
begin
by_cases hs : ∀x∈s, x = (0:ennreal),
{ have h₁ : Sup s = 0 := (bot_unique $ Sup_le $ assume a ha, (hs a ha).symm ▸ le_refl 0),
have h₂ : (⨆i ∈ s, a * i) = 0 :=
(bot_unique $ supr_le $ assume a, supr_le $ assume ha, by simp [hs a ha]),
rw [h₁, h₂, mul_zero] },
{ simp only [not_forall] at hs,
rcases hs with ⟨x, hx, hx0⟩,
have s₁ : Sup s ≠ 0 :=
zero_lt_iff_ne_zero.1 (lt_of_lt_of_le (zero_lt_iff_ne_zero.2 hx0) (le_Sup hx)),
have : Sup ((λb, a * b) '' s) = a * Sup s :=
is_lub.Sup_eq (is_lub_of_is_lub_of_tendsto
(assume x _ y _ h, canonically_ordered_semiring.mul_le_mul (le_refl _) h)
(is_lub_Sup _)
⟨x, hx⟩
(ennreal.tendsto.const_mul (tendsto_id' inf_le_left) (or.inl s₁))),
rw [this.symm, Sup_image] }
end
end priority
lemma mul_supr {ι : Sort*} {f : ι → ennreal} {a : ennreal} : a * supr f = ⨆i, a * f i :=
by rw [← Sup_range, mul_Sup, supr_range]
lemma supr_mul {ι : Sort*} {f : ι → ennreal} {a : ennreal} : supr f * a = ⨆i, f i * a :=
by rw [mul_comm, mul_supr]; congr; funext; rw [mul_comm]
protected lemma tendsto_coe_sub : ∀{b:ennreal}, tendsto (λb:ennreal, ↑r - b) (𝓝 b) (𝓝 (↑r - b)) :=
begin
refine (forall_ennreal.2 $ and.intro (assume a, _) _),
{ simp [@nhds_coe a, tendsto_map'_iff, (∘), tendsto_coe, coe_sub.symm],
exact nnreal.tendsto.sub tendsto_const_nhds tendsto_id },
simp,
exact (tendsto.congr' (mem_sets_of_superset (lt_mem_nhds $ @coe_lt_top r) $
by simp [le_of_lt] {contextual := tt})) tendsto_const_nhds
end
lemma sub_supr {ι : Sort*} [hι : nonempty ι] {b : ι → ennreal} (hr : a < ⊤) :
a - (⨆i, b i) = (⨅i, a - b i) :=
let ⟨i⟩ := hι in
let ⟨r, eq, _⟩ := lt_iff_exists_coe.mp hr in
have Inf ((λb, ↑r - b) '' range b) = ↑r - (⨆i, b i),
from is_glb.Inf_eq $ is_glb_of_is_lub_of_tendsto
(assume x _ y _, sub_le_sub (le_refl _))
is_lub_supr
⟨_, i, rfl⟩
(tendsto.comp ennreal.tendsto_coe_sub (tendsto_id' inf_le_left)),
by rw [eq, ←this]; simp [Inf_image, infi_range, -mem_range]; exact le_refl _
end topological_space
section tsum
variables {f g : α → ennreal}
@[norm_cast] protected lemma has_sum_coe {f : α → nnreal} {r : nnreal} :
has_sum (λa, (f a : ennreal)) ↑r ↔ has_sum f r :=
have (λs:finset α, s.sum (coe ∘ f)) = (coe : nnreal → ennreal) ∘ (λs:finset α, s.sum f),
from funext $ assume s, ennreal.coe_finset_sum.symm,
by unfold has_sum; rw [this, tendsto_coe]
protected lemma tsum_coe_eq {f : α → nnreal} (h : has_sum f r) : (∑'a, (f a : ennreal)) = r :=
tsum_eq_has_sum $ ennreal.has_sum_coe.2 $ h
protected lemma coe_tsum {f : α → nnreal} : summable f → ↑(tsum f) = (∑'a, (f a : ennreal))
| ⟨r, hr⟩ := by rw [tsum_eq_has_sum hr, ennreal.tsum_coe_eq hr]
protected lemma has_sum : has_sum f (⨆s:finset α, s.sum f) :=
tendsto_order.2
⟨assume a' ha',
let ⟨s, hs⟩ := lt_supr_iff.mp ha' in
mem_at_top_sets.mpr ⟨s, assume t ht, lt_of_lt_of_le hs $ finset.sum_le_sum_of_subset ht⟩,
assume a' ha',
univ_mem_sets' $ assume s,
have s.sum f ≤ ⨆(s : finset α), s.sum f,
from le_supr (λ(s : finset α), s.sum f) s,
lt_of_le_of_lt this ha'⟩
@[simp] protected lemma summable : summable f := ⟨_, ennreal.has_sum⟩
lemma tsum_coe_ne_top_iff_summable {f : β → nnreal} :
(∑' b, (f b:ennreal)) ≠ ∞ ↔ summable f :=
begin
refine ⟨λ h, _, λ h, ennreal.coe_tsum h ▸ ennreal.coe_ne_top⟩,
lift (∑' b, (f b:ennreal)) to nnreal using h with a ha,
refine ⟨a, ennreal.has_sum_coe.1 _⟩,
rw ha,
exact ennreal.summable.has_sum
end
protected lemma tsum_eq_supr_sum : (∑'a, f a) = (⨆s:finset α, s.sum f) :=
tsum_eq_has_sum ennreal.has_sum
protected lemma tsum_eq_supr_sum' {ι : Type*} (s : ι → finset α) (hs : ∀ t, ∃ i, t ⊆ s i) :
(∑' a, f a) = ⨆ i, (s i).sum f :=
begin
rw [ennreal.tsum_eq_supr_sum],
symmetry,
change (⨆i:ι, (λ t : finset α, t.sum f) (s i)) = ⨆s:finset α, s.sum f,
exact (finset.sum_mono_set f).supr_comp_eq hs
end
protected lemma tsum_sigma {β : α → Type*} (f : Πa, β a → ennreal) :
(∑'p:Σa, β a, f p.1 p.2) = (∑'a b, f a b) :=
tsum_sigma (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_sigma' {β : α → Type*} (f : (Σ a, β a) → ennreal) :
(∑'p:(Σa, β a), f p) = (∑'a b, f ⟨a, b⟩) :=
tsum_sigma (assume b, ennreal.summable) ennreal.summable
protected lemma tsum_prod {f : α → β → ennreal} : (∑'p:α×β, f p.1 p.2) = (∑'a, ∑'b, f a b) :=
let j : α × β → (Σa:α, β) := λp, sigma.mk p.1 p.2 in
let i : (Σa:α, β) → α × β := λp, (p.1, p.2) in
let f' : (Σa:α, β) → ennreal := λp, f p.1 p.2 in
calc (∑'p:α×β, f' (j p)) = (∑'p:Σa:α, β, f p.1 p.2) :
tsum_eq_tsum_of_iso j i (assume ⟨a, b⟩, rfl) (assume ⟨a, b⟩, rfl)
... = (∑'a, ∑'b, f a b) : ennreal.tsum_sigma f
protected lemma tsum_comm {f : α → β → ennreal} : (∑'a, ∑'b, f a b) = (∑'b, ∑'a, f a b) :=
let f' : α×β → ennreal := λp, f p.1 p.2 in
calc (∑'a, ∑'b, f a b) = (∑'p:α×β, f' p) : ennreal.tsum_prod.symm
... = (∑'p:β×α, f' (prod.swap p)) :
(tsum_eq_tsum_of_iso prod.swap (@prod.swap α β) (assume ⟨a, b⟩, rfl) (assume ⟨a, b⟩, rfl)).symm
... = (∑'b, ∑'a, f' (prod.swap (b, a))) : @ennreal.tsum_prod β α (λb a, f' (prod.swap (b, a)))
protected lemma tsum_add : (∑'a, f a + g a) = (∑'a, f a) + (∑'a, g a) :=
tsum_add ennreal.summable ennreal.summable
protected lemma tsum_le_tsum (h : ∀a, f a ≤ g a) : (∑'a, f a) ≤ (∑'a, g a) :=
tsum_le_tsum h ennreal.summable ennreal.summable
protected lemma tsum_eq_supr_nat {f : ℕ → ennreal} :
(∑'i:ℕ, f i) = (⨆i:ℕ, (finset.range i).sum f) :=
ennreal.tsum_eq_supr_sum' _ finset.exists_nat_subset_range
protected lemma le_tsum (a : α) : f a ≤ (∑'a, f a) :=
calc f a = ({a} : finset α).sum f : by simp
... ≤ (⨆s:finset α, s.sum f) : le_supr (λs:finset α, s.sum f) _
... = (∑'a, f a) : by rw [ennreal.tsum_eq_supr_sum]
protected lemma tsum_eq_top_of_eq_top : (∃ a, f a = ∞) → (∑' a, f a) = ∞
| ⟨a, ha⟩ := top_unique $ ha ▸ ennreal.le_tsum a
protected lemma ne_top_of_tsum_ne_top (h : (∑' a, f a) ≠ ∞) (a : α) : f a ≠ ∞ :=
λ ha, h $ ennreal.tsum_eq_top_of_eq_top ⟨a, ha⟩
protected lemma tsum_mul_left : (∑'i, a * f i) = a * (∑'i, f i) :=
if h : ∀i, f i = 0 then by simp [h] else
let ⟨i, (hi : f i ≠ 0)⟩ := classical.not_forall.mp h in
have sum_ne_0 : (∑'i, f i) ≠ 0, from ne_of_gt $
calc 0 < f i : lt_of_le_of_ne (zero_le _) hi.symm
... ≤ (∑'i, f i) : ennreal.le_tsum _,
have tendsto (λs:finset α, s.sum ((*) a ∘ f)) at_top (𝓝 (a * (∑'i, f i))),
by rw [← show (*) a ∘ (λs:finset α, s.sum f) = λs, s.sum ((*) a ∘ f),
from funext $ λ s, finset.mul_sum];
exact ennreal.tendsto.const_mul ennreal.summable.has_sum (or.inl sum_ne_0),
tsum_eq_has_sum this
protected lemma tsum_mul_right : (∑'i, f i * a) = (∑'i, f i) * a :=
by simp [mul_comm, ennreal.tsum_mul_left]
@[simp] lemma tsum_supr_eq {α : Type*} (a : α) {f : α → ennreal} :
(∑'b:α, ⨆ (h : a = b), f b) = f a :=
le_antisymm
(by rw [ennreal.tsum_eq_supr_sum]; exact supr_le (assume s,
calc (∑ b in s, ⨆ (h : a = b), f b) ≤ ∑ b in {a}, ⨆ (h : a = b), f b :
finset.sum_le_sum_of_ne_zero $ assume b _ hb,
suffices a = b, by simpa using this.symm,
classical.by_contradiction $ assume h,
by simpa [h] using hb
... = f a : by simp))
(calc f a ≤ (⨆ (h : a = a), f a) : le_supr (λh:a=a, f a) rfl
... ≤ (∑'b:α, ⨆ (h : a = b), f b) : ennreal.le_tsum _)
lemma has_sum_iff_tendsto_nat {f : ℕ → ennreal} (r : ennreal) :
has_sum f r ↔ tendsto (λn:ℕ, (finset.range n).sum f) at_top (𝓝 r) :=
begin
refine ⟨has_sum.tendsto_sum_nat, assume h, _⟩,
rw [← supr_eq_of_tendsto _ h, ← ennreal.tsum_eq_supr_nat],
{ exact ennreal.summable.has_sum },
{ exact assume s t hst, finset.sum_le_sum_of_subset (finset.range_subset.2 hst) }
end
end tsum
end ennreal
namespace nnreal
lemma exists_le_has_sum_of_le {f g : β → nnreal} {r : nnreal}
(hgf : ∀b, g b ≤ f b) (hfr : has_sum f r) : ∃p≤r, has_sum g p :=
have (∑'b, (g b : ennreal)) ≤ r,
begin
refine has_sum_le (assume b, _) ennreal.summable.has_sum (ennreal.has_sum_coe.2 hfr),
exact ennreal.coe_le_coe.2 (hgf _)
end,
let ⟨p, eq, hpr⟩ := ennreal.le_coe_iff.1 this in
⟨p, hpr, ennreal.has_sum_coe.1 $ eq ▸ ennreal.summable.has_sum⟩
lemma summable_of_le {f g : β → nnreal} (hgf : ∀b, g b ≤ f b) : summable f → summable g
| ⟨r, hfr⟩ := let ⟨p, _, hp⟩ := exists_le_has_sum_of_le hgf hfr in hp.summable
lemma has_sum_iff_tendsto_nat {f : ℕ → nnreal} (r : nnreal) :
has_sum f r ↔ tendsto (λn:ℕ, (finset.range n).sum f) at_top (𝓝 r) :=
begin
rw [← ennreal.has_sum_coe, ennreal.has_sum_iff_tendsto_nat],
simp only [ennreal.coe_finset_sum.symm],
exact ennreal.tendsto_coe
end
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → nnreal} (hf : summable f)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
tsum_le_tsum_of_inj i hi (λ c hc, zero_le _) (λ b, le_refl _) (summable_comp_injective hf hi) hf
end nnreal
lemma tsum_comp_le_tsum_of_inj {β : Type*} {f : α → ℝ} (hf : summable f) (hn : ∀ a, 0 ≤ f a)
{i : β → α} (hi : function.injective i) : tsum (f ∘ i) ≤ tsum f :=
begin
let g : α → nnreal := λ a, ⟨f a, hn a⟩,
have hg : summable g, by rwa ← nnreal.summable_coe,
convert nnreal.coe_le_coe.2 (nnreal.tsum_comp_le_tsum_of_inj hg hi);
{ rw nnreal.coe_tsum, congr }
end
lemma summable_of_nonneg_of_le {f g : β → ℝ}
(hg : ∀b, 0 ≤ g b) (hgf : ∀b, g b ≤ f b) (hf : summable f) : summable g :=
let f' (b : β) : nnreal := ⟨f b, le_trans (hg b) (hgf b)⟩ in
let g' (b : β) : nnreal := ⟨g b, hg b⟩ in
have summable f', from nnreal.summable_coe.1 hf,
have summable g', from
nnreal.summable_of_le (assume b, (@nnreal.coe_le_coe (g' b) (f' b)).2 $ hgf b) this,
show summable (λb, g' b : β → ℝ), from nnreal.summable_coe.2 this
lemma has_sum_iff_tendsto_nat_of_nonneg {f : ℕ → ℝ} (hf : ∀i, 0 ≤ f i) (r : ℝ) :
has_sum f r ↔ tendsto (λn:ℕ, (finset.range n).sum f) at_top (𝓝 r) :=
⟨has_sum.tendsto_sum_nat,
assume hfr,
have 0 ≤ r := ge_of_tendsto at_top_ne_bot hfr $ univ_mem_sets' $ assume i,
show 0 ≤ (finset.range i).sum f, from finset.sum_nonneg $ assume i _, hf i,
let f' (n : ℕ) : nnreal := ⟨f n, hf n⟩, r' : nnreal := ⟨r, this⟩ in
have f_eq : f = (λi:ℕ, (f' i : ℝ)) := rfl,
have r_eq : r = r' := rfl,
begin
rw [f_eq, r_eq, nnreal.has_sum_coe, nnreal.has_sum_iff_tendsto_nat, ← nnreal.tendsto_coe],
simp only [nnreal.coe_sum],
exact hfr
end⟩
lemma infi_real_pos_eq_infi_nnreal_pos {α : Type*} [complete_lattice α] {f : ℝ → α} :
(⨅(n:ℝ) (h : n > 0), f n) = (⨅(n:nnreal) (h : n > 0), f n) :=
le_antisymm
(le_infi $ assume n, le_infi $ assume hn, infi_le_of_le n $ infi_le _ (nnreal.coe_pos.2 hn))
(le_infi $ assume r, le_infi $ assume hr, infi_le_of_le ⟨r, le_of_lt hr⟩ $ infi_le _ hr)
section
variables [emetric_space β]
open ennreal filter emetric
/-- In an emetric ball, the distance between points is everywhere finite -/
lemma edist_ne_top_of_mem_ball {a : β} {r : ennreal} (x y : ball a r) : edist x.1 y.1 ≠ ⊤ :=
lt_top_iff_ne_top.1 $
calc edist x y ≤ edist a x + edist a y : edist_triangle_left x.1 y.1 a
... < r + r : by rw [edist_comm a x, edist_comm a y]; exact add_lt_add x.2 y.2
... ≤ ⊤ : le_top
/-- Each ball in an extended metric space gives us a metric space, as the edist
is everywhere finite. -/
def metric_space_emetric_ball (a : β) (r : ennreal) : metric_space (ball a r) :=
emetric_space.to_metric_space edist_ne_top_of_mem_ball
local attribute [instance] metric_space_emetric_ball
lemma nhds_eq_nhds_emetric_ball (a x : β) (r : ennreal) (h : x ∈ ball a r) :
𝓝 x = map (coe : ball a r → β) (𝓝 ⟨x, h⟩) :=
(map_nhds_subtype_val_eq _ $ mem_nhds_sets emetric.is_open_ball h).symm
end
section
variable [emetric_space α]
open emetric
lemma tendsto_iff_edist_tendsto_0 {l : filter β} {f : β → α} {y : α} :
tendsto f l (𝓝 y) ↔ tendsto (λ x, edist (f x) y) l (𝓝 0) :=
by simp only [emetric.nhds_basis_eball.tendsto_right_iff, emetric.mem_ball,
@tendsto_order ennreal β _ _, forall_prop_of_false ennreal.not_lt_zero, forall_const, true_and]
/-- Yet another metric characterization of Cauchy sequences on integers. This one is often the
most efficient. -/
lemma emetric.cauchy_seq_iff_le_tendsto_0 [nonempty β] [semilattice_sup β] {s : β → α} :
cauchy_seq s ↔ (∃ (b: β → ennreal), (∀ n m N : β, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N)
∧ (tendsto b at_top (𝓝 0))) :=
⟨begin
assume hs,
rw emetric.cauchy_seq_iff at hs,
/- `s` is Cauchy sequence. The sequence `b` will be constructed by taking
the supremum of the distances between `s n` and `s m` for `n m ≥ N`-/
let b := λN, Sup ((λ(p : β × β), edist (s p.1) (s p.2))''{p | p.1 ≥ N ∧ p.2 ≥ N}),
--Prove that it bounds the distances of points in the Cauchy sequence
have C : ∀ n m N, N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
{ refine λm n N hm hn, le_Sup _,
use (prod.mk m n),
simp only [and_true, eq_self_iff_true, set.mem_set_of_eq],
exact ⟨hm, hn⟩ },
--Prove that it tends to `0`, by using the Cauchy property of `s`
have D : tendsto b at_top (𝓝 0),
{ refine tendsto_order.2 ⟨λa ha, absurd ha (ennreal.not_lt_zero), λε εpos, _⟩,
rcases dense εpos with ⟨δ, δpos, δlt⟩,
rcases hs δ δpos with ⟨N, hN⟩,
refine filter.mem_at_top_sets.2 ⟨N, λn hn, _⟩,
have : b n ≤ δ := Sup_le begin
simp only [and_imp, set.mem_image, set.mem_set_of_eq, exists_imp_distrib, prod.exists],
intros d p q hp hq hd,
rw ← hd,
exact le_of_lt (hN p q (le_trans hn hp) (le_trans hn hq))
end,
simpa using lt_of_le_of_lt this δlt },
-- Conclude
exact ⟨b, ⟨C, D⟩⟩
end,
begin
rintros ⟨b, ⟨b_bound, b_lim⟩⟩,
/-b : ℕ → ℝ, b_bound : ∀ (n m N : ℕ), N ≤ n → N ≤ m → edist (s n) (s m) ≤ b N,
b_lim : tendsto b at_top (𝓝 0)-/
refine emetric.cauchy_seq_iff.2 (λε εpos, _),
have : ∀ᶠ n in at_top, b n < ε := (tendsto_order.1 b_lim ).2 _ εpos,
rcases filter.mem_at_top_sets.1 this with ⟨N, hN⟩,
exact ⟨N, λm n hm hn, calc
edist (s m) (s n) ≤ b N : b_bound m n N hm hn
... < ε : (hN _ (le_refl N)) ⟩
end⟩
lemma continuous_of_le_add_edist {f : α → ennreal} (C : ennreal)
(hC : C ≠ ⊤) (h : ∀x y, f x ≤ f y + C * edist x y) : continuous f :=
begin
refine continuous_iff_continuous_at.2 (λx, tendsto_order.2 ⟨_, _⟩),
show ∀e, e < f x → ∀ᶠ y in 𝓝 x, e < f y,
{ assume e he,
let ε := min (f x - e) 1,
have : ε < ⊤ := lt_of_le_of_lt (min_le_right _ _) (by simp [lt_top_iff_ne_top]),
have : 0 < ε := by simp [ε, hC, he, ennreal.zero_lt_one],
have : 0 < C⁻¹ * (ε/2) := bot_lt_iff_ne_bot.2 (by simp [hC, (ne_of_lt this).symm, ennreal.mul_eq_zero]),
have I : C * (C⁻¹ * (ε/2)) < ε,
{ by_cases C_zero : C = 0,
{ simp [C_zero, ‹0 < ε›] },
{ calc C * (C⁻¹ * (ε/2)) = (C * C⁻¹) * (ε/2) : by simp [mul_assoc]
... = ε/2 : by simp [ennreal.mul_inv_cancel C_zero hC]
... < ε : ennreal.half_lt_self (bot_lt_iff_ne_bot.1 ‹0 < ε›) (lt_top_iff_ne_top.1 ‹ε < ⊤›) }},
have : ball x (C⁻¹ * (ε/2)) ⊆ {y : α | e < f y},
{ rintros y hy,
by_cases htop : f y = ⊤,
{ simp [htop, lt_top_iff_ne_top, ne_top_of_lt he] },
{ simp at hy,
have : e + ε < f y + ε := calc
e + ε ≤ e + (f x - e) : add_le_add_left' (min_le_left _ _)
... = f x : by simp [le_of_lt he]
... ≤ f y + C * edist x y : h x y
... = f y + C * edist y x : by simp [edist_comm]
... ≤ f y + C * (C⁻¹ * (ε/2)) :
add_le_add_left' $ canonically_ordered_semiring.mul_le_mul (le_refl _) (le_of_lt hy)
... < f y + ε : (ennreal.add_lt_add_iff_left (lt_top_iff_ne_top.2 htop)).2 I,
show e < f y, from
(ennreal.add_lt_add_iff_right ‹ε < ⊤›).1 this }},
apply filter.mem_sets_of_superset (ball_mem_nhds _ (‹0 < C⁻¹ * (ε/2)›)) this },
show ∀e, f x < e → ∀ᶠ y in 𝓝 x, f y < e,
{ assume e he,
let ε := min (e - f x) 1,
have : ε < ⊤ := lt_of_le_of_lt (min_le_right _ _) (by simp [lt_top_iff_ne_top]),
have : 0 < ε := by simp [ε, he, ennreal.zero_lt_one],
have : 0 < C⁻¹ * (ε/2) := bot_lt_iff_ne_bot.2 (by simp [hC, (ne_of_lt this).symm, ennreal.mul_eq_zero]),
have I : C * (C⁻¹ * (ε/2)) < ε,
{ by_cases C_zero : C = 0,
simp [C_zero, ‹0 < ε›],
calc C * (C⁻¹ * (ε/2)) = (C * C⁻¹) * (ε/2) : by simp [mul_assoc]
... = ε/2 : by simp [ennreal.mul_inv_cancel C_zero hC]
... < ε : ennreal.half_lt_self (bot_lt_iff_ne_bot.1 ‹0 < ε›) (lt_top_iff_ne_top.1 ‹ε < ⊤›) },
have : ball x (C⁻¹ * (ε/2)) ⊆ {y : α | f y < e},
{ rintros y hy,
have htop : f x ≠ ⊤ := ne_top_of_lt he,
show f y < e, from calc
f y ≤ f x + C * edist y x : h y x
... ≤ f x + C * (C⁻¹ * (ε/2)) :
add_le_add_left' $ canonically_ordered_semiring.mul_le_mul (le_refl _) (le_of_lt hy)
... < f x + ε : (ennreal.add_lt_add_iff_left (lt_top_iff_ne_top.2 htop)).2 I
... ≤ f x + (e - f x) : add_le_add_left' (min_le_left _ _)
... = e : by simp [le_of_lt he] },
apply filter.mem_sets_of_superset (ball_mem_nhds _ (‹0 < C⁻¹ * (ε/2)›)) this },
end
theorem continuous_edist : continuous (λp:α×α, edist p.1 p.2) :=
begin
apply continuous_of_le_add_edist 2 (by simp),
rintros ⟨x, y⟩ ⟨x', y'⟩,
calc edist x y ≤ edist x x' + edist x' y' + edist y' y : edist_triangle4 _ _ _ _
... = edist x' y' + (edist x x' + edist y y') : by simp [edist_comm]; cc
... ≤ edist x' y' + (edist (x, y) (x', y') + edist (x, y) (x', y')) :
add_le_add_left' (add_le_add' (by simp [edist, le_refl]) (by simp [edist, le_refl]))
... = edist x' y' + 2 * edist (x, y) (x', y') : by rw [← mul_two, mul_comm]
end
theorem continuous.edist [topological_space β] {f g : β → α}
(hf : continuous f) (hg : continuous g) : continuous (λb, edist (f b) (g b)) :=
continuous_edist.comp (hf.prod_mk hg)
theorem filter.tendsto.edist {f g : β → α} {x : filter β} {a b : α}
(hf : tendsto f x (𝓝 a)) (hg : tendsto g x (𝓝 b)) :
tendsto (λx, edist (f x) (g x)) x (𝓝 (edist a b)) :=
(continuous_edist.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
lemma cauchy_seq_of_edist_le_of_tsum_ne_top {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n) (hd : tsum d ≠ ∞) :
cauchy_seq f :=
begin
lift d to (ℕ → nnreal) using (λ i, ennreal.ne_top_of_tsum_ne_top hd i),
rw ennreal.tsum_coe_ne_top_iff_summable at hd,
exact cauchy_seq_of_edist_le_of_summable d hf hd
end
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ennreal`,
then the distance from `f n` to the limit is bounded by `∑'_{k=n}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) (n : ℕ) :
edist (f n) a ≤ ∑' m, d (n + m) :=
begin
refine le_of_tendsto at_top_ne_bot (tendsto_const_nhds.edist ha)
(mem_at_top_sets.2 ⟨n, λ m hnm, _⟩),
refine le_trans (edist_le_Ico_sum_of_edist_le hnm (λ k _ _, hf k)) _,
rw [finset.sum_Ico_eq_sum_range],
exact sum_le_tsum _ (λ _ _, zero_le _) ennreal.summable
end
/-- If `edist (f n) (f (n+1))` is bounded above by a function `d : ℕ → ennreal`,
then the distance from `f 0` to the limit is bounded by `∑'_{k=0}^∞ d k`. -/
lemma edist_le_tsum_of_edist_le_of_tendsto₀ {f : ℕ → α} (d : ℕ → ennreal)
(hf : ∀ n, edist (f n) (f n.succ) ≤ d n)
{a : α} (ha : tendsto f at_top (𝓝 a)) :
edist (f 0) a ≤ ∑' m, d m :=
by simpa using edist_le_tsum_of_edist_le_of_tendsto d hf ha 0
end --section
|
de3db429f7191ff24bf2a0baaf51ad48b953f5a3
|
ea5678cc400c34ff95b661fa26d15024e27ea8cd
|
/new_structures.lean
|
09547caaab4957bf88ad80a06971597cc47389dc
|
[] |
no_license
|
ChrisHughes24/leanstuff
|
dca0b5349c3ed893e8792ffbd98cbcadaff20411
|
9efa85f72efaccd1d540385952a6acc18fce8687
|
refs/heads/master
| 1,654,883,241,759
| 1,652,873,885,000
| 1,652,873,885,000
| 134,599,537
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,617
|
lean
|
variables
class monoid' (α : Type*) extends semigroup α, has_one α :=
(mul_one : ∀ a : α, a * 1 = a)
(one_mul : ∀ a : α, 1 * a = a)
class comm_monoid' (α : Type*) extends semigroup α, has_one α :=
(mul_one : ∀ a : α, a * 1 = 1)
(mul_comm : ∀ a b : α, a * b = b * a)
class group' (α : Type*) extends semigroup α, has_one α, has_inv α :=
(one_mul : ∀ a : α, 1 * a = a)
(mul_left_inv : ∀ a : α, a⁻¹ * a = 1)
(mul_right_inv : ∀ a : α, a * a⁻¹ = 1)
class comm_group' (α : Type*) extends semigroup α, has_one α :=
(mul_comm : ∀ a b : α, a * b = b * a)
variable {α : Type*}
instance group'.to_monoid' [group' α] : monoid' α :=
{ mul := (*),
mul_one := λ a, by rw [← group'.mul_left_inv a, ← mul_assoc, group'.mul_right_inv, group'.one_mul],
..show group' α, by apply_instance }
instance comm_group'.to_comm_monoid' [comm_group' α] : comm_monoid' α :=
{ ..show comm_group' α, by apply_instance }
def pow [monoid' α] (a : α) : ℕ → α
| 0 := 1
| (n+1) := a * pow n
instance I1 [monoid' α] : has_pow α ℕ := ⟨pow⟩
def gpow [group' α] (a : α) : ℤ → α
| (n : ℕ) := a ^ n
| -[1+ n] := (a ^ (n + 1))⁻¹
instance I2 [group' α] : has_pow α ℤ := ⟨gpow⟩
lemma gpow_eq_pow [group' α] (a : α) (n : ℕ) : a ^ (n : ℕ) = a ^ (n : ℤ) := rfl
example {α : Type*} [comm_group' α] : @group'.to_monoid' _ (comm_group'.to_group' α) =
@comm_monoid'.to_monoid' _ comm_group'.to_comm_monoid' := rfl
set_option pp.all true
example [comm_group' α] (a : α) (n : ℕ) : a ^ (n : ℕ) = a ^ (n : ℤ) :=
begin
rw gpow_eq_pow,
end
|
eb4012977e787ef450498695d827edfe26b6d666
|
80cc5bf14c8ea85ff340d1d747a127dcadeb966f
|
/src/data/fin.lean
|
e69c1240ee2399465b0aaddc2bc919a80c8340d9
|
[
"Apache-2.0"
] |
permissive
|
lacker/mathlib
|
f2439c743c4f8eb413ec589430c82d0f73b2d539
|
ddf7563ac69d42cfa4a1bfe41db1fed521bd795f
|
refs/heads/master
| 1,671,948,326,773
| 1,601,479,268,000
| 1,601,479,268,000
| 298,686,743
| 0
| 0
|
Apache-2.0
| 1,601,070,794,000
| 1,601,070,794,000
| null |
UTF-8
|
Lean
| false
| false
| 40,583
|
lean
|
/-
Copyright (c) 2017 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Robert Y. Lewis, Keeley Hoek
-/
import data.nat.cast
import tactic.localized
import logic.embedding
/-!
# The finite type with `n` elements
`fin n` is the type whose elements are natural numbers smaller than `n`.
This file expands on the development in the core library.
## Main definitions
### Induction principles
* `fin_zero_elim` : Elimination principle for the empty set `fin 0`, generalizes `fin.elim0`.
* `fin.succ_rec` : Define `C n i` by induction on `i : fin n` interpreted
as `(0 : fin (n - i)).succ.succ…`. This function has two arguments: `H0 n` defines
`0`-th element `C (n+1) 0` of an `(n+1)`-tuple, and `Hs n i` defines `(i+1)`-st element
of `(n+1)`-tuple based on `n`, `i`, and `i`-th element of `n`-tuple.
* `fin.succ_rec_on` : same as `fin.succ_rec` but `i : fin n` is the first argument;
### Casts
* `cast_lt i h` : embed `i` into a `fin` where `h` proves it belongs into;
* `cast_le h` : embed `fin n` into `fin m`, `h : n ≤ m`;
* `cast eq` : embed `fin n` into `fin m`, `eq : n = m`;
* `cast_add m` : embed `fin n` into `fin (n+m)`;
* `cast_succ` : embed `fin n` into `fin (n+1)`;
* `succ_above p` : embed `fin n` into `fin (n + 1)` with a hole around `p`;
* `pred_above p i h` : embed `i : fin (n+1)` into `fin n` by ignoring `p`;
* `sub_nat i h` : subtract `m` from `i ≥ m`, generalizes `fin.pred`;
* `add_nat i h` : add `m` on `i` on the right, generalizes `fin.succ`;
* `nat_add i h` adds `n` on `i` on the left;
* `clamp n m` : `min n m` as an element of `fin (m + 1)`;
### Operation on tuples
We interpret maps `Π i : fin n, α i` as tuples `(α 0, …, α (n-1))`.
If `α i` is a constant map, then tuples are isomorphic (but not definitionally equal)
to `vector`s.
We define the following operations:
* `tail` : the tail of an `n+1` tuple, i.e., its last `n` entries;
* `cons` : adding an element at the beginning of an `n`-tuple, to get an `n+1`-tuple;
* `init` : the beginning of an `n+1` tuple, i.e., its first `n` entries;
* `snoc` : adding an element at the end of an `n`-tuple, to get an `n+1`-tuple. The name `snoc`
comes from `cons` (i.e., adding an element to the left of a tuple) read in reverse order.
* `find p` : returns the first index `n` where `p n` is satisfied, and `none` if it is never
satisfied.
### Misc definitions
* `fin.last n` : The greatest value of `fin (n+1)`.
-/
universe u
open fin nat function
/-- Elimination principle for the empty set `fin 0`, dependent version. -/
def fin_zero_elim {α : fin 0 → Sort u} (x : fin 0) : α x := x.elim0
lemma fact.succ.pos {n} : fact (0 < succ n) := zero_lt_succ _
lemma fact.bit0.pos {n} [h : fact (0 < n)] : fact (0 < bit0 n) :=
nat.zero_lt_bit0 $ ne_of_gt h
lemma fact.bit1.pos {n} : fact (0 < bit1 n) :=
nat.zero_lt_bit1 _
lemma fact.pow.pos {p n : ℕ} [h : fact $ 0 < p] : fact (0 < p ^ n) :=
pow_pos h _
localized "attribute [instance] fact.succ.pos" in fin_fact
localized "attribute [instance] fact.bit0.pos" in fin_fact
localized "attribute [instance] fact.bit1.pos" in fin_fact
localized "attribute [instance] fact.pow.pos" in fin_fact
namespace fin
variables {n m : ℕ} {a b : fin n}
instance fin_to_nat (n : ℕ) : has_coe (fin n) nat := ⟨subtype.val⟩
lemma is_lt (i : fin n) : (i : ℕ) < n := i.2
/-- convert a `ℕ` to `fin n`, provided `n` is positive -/
def of_nat' [h : fact (0 < n)] (i : ℕ) : fin n := ⟨i%n, mod_lt _ h⟩
@[simp] protected lemma eta (a : fin n) (h : (a : ℕ) < n) : (⟨(a : ℕ), h⟩ : fin n) = a :=
by cases a; refl
@[ext]
lemma ext {a b : fin n} (h : (a : ℕ) = b) : a = b := eq_of_veq h
lemma ext_iff (a b : fin n) : a = b ↔ (a : ℕ) = b :=
iff.intro (congr_arg _) fin.eq_of_veq
lemma coe_injective {n : ℕ} : injective (coe : fin n → ℕ) := subtype.coe_injective
lemma eq_iff_veq (a b : fin n) : a = b ↔ a.1 = b.1 :=
⟨veq_of_eq, eq_of_veq⟩
lemma ne_iff_vne (a b : fin n) : a ≠ b ↔ a.1 ≠ b.1 :=
⟨vne_of_ne, ne_of_vne⟩
@[simp] lemma mk_eq_subtype_mk (a : ℕ) (h : a < n) : mk a h = ⟨a, h⟩ := rfl
protected lemma mk.inj_iff {n a b : ℕ} {ha : a < n} {hb : b < n} :
(⟨a, ha⟩ : fin n) = ⟨b, hb⟩ ↔ a = b :=
⟨subtype.mk.inj, λ h, by subst h⟩
lemma mk_val {m n : ℕ} (h : m < n) : (⟨m, h⟩ : fin n).val = m := rfl
lemma eq_mk_iff_coe_eq {k : ℕ} {hk : k < n} : a = ⟨k, hk⟩ ↔ (a : ℕ) = k :=
fin.eq_iff_veq a ⟨k, hk⟩
@[simp, norm_cast] lemma coe_mk {m n : ℕ} (h : m < n) : ((⟨m, h⟩ : fin n) : ℕ) = m := rfl
lemma mk_coe (i : fin n) : (⟨i, i.is_lt⟩ : fin n) = i :=
fin.eta _ _
lemma coe_eq_val (a : fin n) : (a : ℕ) = a.val := rfl
@[simp] lemma val_eq_coe (a : fin n) : a.val = a := rfl
attribute [simp] val_zero
@[simp] lemma val_one {n : ℕ} : (1 : fin (n+2)).val = 1 := rfl
@[simp] lemma val_two {n : ℕ} : (2 : fin (n+3)).val = 2 := rfl
@[simp] lemma coe_zero {n : ℕ} : ((0 : fin (n+1)) : ℕ) = 0 := rfl
@[simp] lemma coe_one {n : ℕ} : ((1 : fin (n+2)) : ℕ) = 1 := rfl
@[simp] lemma coe_two {n : ℕ} : ((2 : fin (n+3)) : ℕ) = 2 := rfl
/-- `a < b` as natural numbers if and only if `a < b` in `fin n`. -/
@[norm_cast, simp] lemma coe_fin_lt {n : ℕ} {a b : fin n} : (a : ℕ) < (b : ℕ) ↔ a < b :=
iff.rfl
/-- `a ≤ b` as natural numbers if and only if `a ≤ b` in `fin n`. -/
@[norm_cast, simp] lemma coe_fin_le {n : ℕ} {a b : fin n} : (a : ℕ) ≤ (b : ℕ) ↔ a ≤ b :=
iff.rfl
lemma val_add {n : ℕ} : ∀ a b : fin n, (a + b).val = (a.val + b.val) % n
| ⟨_, _⟩ ⟨_, _⟩ := rfl
lemma coe_add {n : ℕ} : ∀ a b : fin n, ((a + b : fin n) : ℕ) = (a + b) % n
| ⟨_, _⟩ ⟨_, _⟩ := rfl
lemma val_mul {n : ℕ} : ∀ a b : fin n, (a * b).val = (a.val * b.val) % n
| ⟨_, _⟩ ⟨_, _⟩ := rfl
lemma coe_mul {n : ℕ} : ∀ a b : fin n, ((a * b : fin n) : ℕ) = (a * b) % n
| ⟨_, _⟩ ⟨_, _⟩ := rfl
lemma one_val {n : ℕ} : (1 : fin (n+1)).val = 1 % (n+1) := rfl
lemma coe_one' {n : ℕ} : ((1 : fin (n+1)) : ℕ) = 1 % (n+1) := rfl
@[simp] lemma val_zero' (n) : (0 : fin (n+1)).val = 0 := rfl
@[simp] lemma mk_zero : (⟨0, nat.succ_pos'⟩ : fin (n + 1)) = (0 : fin _) := rfl
@[simp] lemma mk_one : (⟨1, nat.succ_lt_succ (nat.succ_pos n)⟩ : fin (n + 2)) = (1 : fin _) := rfl
section bit
@[simp] lemma mk_bit0 {m n : ℕ} (h : bit0 m < n) :
(⟨bit0 m, h⟩ : fin n) = (bit0 ⟨m, (nat.le_add_right m m).trans_lt h⟩ : fin _) :=
eq_of_veq (nat.mod_eq_of_lt h).symm
@[simp] lemma mk_bit1 {m n : ℕ} (h : bit1 m < n + 1) :
(⟨bit1 m, h⟩ : fin (n + 1)) = (bit1 ⟨m, (nat.le_add_right m m).trans_lt
((m + m).lt_succ_self.trans h)⟩ : fin _) :=
begin
ext,
simp only [bit1, bit0] at h,
simp only [bit1, bit0, coe_add, coe_one', coe_mk, ←nat.add_mod, nat.mod_eq_of_lt h],
end
end bit
@[simp]
lemma of_nat_eq_coe (n : ℕ) (a : ℕ) : (of_nat a : fin (n+1)) = a :=
begin
induction a with a ih, { refl },
ext, show (a+1) % (n+1) = subtype.val (a+1 : fin (n+1)),
{ rw [val_add, ← ih, of_nat],
exact add_mod _ _ _ }
end
/-- Converting an in-range number to `fin (n + 1)` produces a result
whose value is the original number. -/
lemma coe_val_of_lt {n : ℕ} {a : ℕ} (h : a < n + 1) :
((a : fin (n + 1)).val) = a :=
begin
rw ←of_nat_eq_coe,
exact nat.mod_eq_of_lt h
end
/-- Converting the value of a `fin (n + 1)` to `fin (n + 1)` results
in the same value. -/
lemma coe_val_eq_self {n : ℕ} (a : fin (n + 1)) : (a.val : fin (n + 1)) = a :=
begin
rw fin.eq_iff_veq,
exact coe_val_of_lt a.property
end
/-- Coercing an in-range number to `fin (n + 1)`, and converting back
to `ℕ`, results in that number. -/
lemma coe_coe_of_lt {n : ℕ} {a : ℕ} (h : a < n + 1) :
((a : fin (n + 1)) : ℕ) = a :=
coe_val_of_lt h
/-- Converting a `fin (n + 1)` to `ℕ` and back results in the same
value. -/
@[simp] lemma coe_coe_eq_self {n : ℕ} (a : fin (n + 1)) : ((a : ℕ) : fin (n + 1)) = a :=
coe_val_eq_self a
/-- Assume `k = l`. If two functions defined on `fin k` and `fin l` are equal on each element,
then they coincide (in the heq sense). -/
protected lemma heq_fun_iff {α : Type*} {k l : ℕ} (h : k = l) {f : fin k → α} {g : fin l → α} :
f == g ↔ (∀ (i : fin k), f i = g ⟨(i : ℕ), h ▸ i.2⟩) :=
by { induction h, simp [heq_iff_eq, function.funext_iff] }
protected lemma heq_ext_iff {k l : ℕ} (h : k = l) {i : fin k} {j : fin l} :
i == j ↔ (i : ℕ) = (j : ℕ) :=
by { induction h, simp [ext_iff] }
instance {n : ℕ} : nontrivial (fin (n + 2)) := ⟨⟨0, 1, dec_trivial⟩⟩
instance {n : ℕ} : linear_order (fin n) :=
{ le := (≤), lt := (<), ..linear_order.lift (coe : fin n → ℕ) (@fin.eq_of_veq _) }
instance {n : ℕ} : decidable_linear_order (fin n) :=
{ decidable_le := fin.decidable_le,
decidable_lt := fin.decidable_lt,
decidable_eq := fin.decidable_eq _,
..fin.linear_order }
lemma exists_iff {p : fin n → Prop} : (∃ i, p i) ↔ ∃ i h, p ⟨i, h⟩ :=
⟨λ h, exists.elim h (λ ⟨i, hi⟩ hpi, ⟨i, hi, hpi⟩),
λ h, exists.elim h (λ i hi, ⟨⟨i, hi.fst⟩, hi.snd⟩)⟩
lemma forall_iff {p : fin n → Prop} : (∀ i, p i) ↔ ∀ i h, p ⟨i, h⟩ :=
⟨λ h i hi, h ⟨i, hi⟩, λ h ⟨i, hi⟩, h i hi⟩
lemma lt_iff_coe_lt_coe : a < b ↔ (a : ℕ) < b := iff.rfl
lemma le_iff_coe_le_coe : a ≤ b ↔ (a : ℕ) ≤ b := iff.rfl
lemma zero_le (a : fin (n + 1)) : 0 ≤ a := zero_le a.1
@[simp] lemma coe_succ (j : fin n) : (j.succ : ℕ) = j + 1 :=
by cases j; simp [fin.succ]
lemma succ_pos (a : fin n) : (0 : fin (n + 1)) < a.succ := by simp [lt_iff_coe_lt_coe]
protected theorem succ.inj (p : fin.succ a = fin.succ b) : a = b :=
by cases a; cases b; exact eq_of_veq (nat.succ.inj (veq_of_eq p))
@[simp] lemma succ_inj {a b : fin n} : a.succ = b.succ ↔ a = b :=
⟨λh, succ.inj h, λh, by rw h⟩
@[simp] lemma succ_le_succ_iff : a.succ ≤ b.succ ↔ a ≤ b :=
by { simp only [le_iff_coe_le_coe, coe_succ], exact ⟨le_of_succ_le_succ, succ_le_succ⟩ }
@[simp] lemma succ_lt_succ_iff : a.succ < b.succ ↔ a < b :=
by { simp only [lt_iff_coe_lt_coe, coe_succ], exact ⟨lt_of_succ_lt_succ, succ_lt_succ⟩ }
lemma succ_injective (n : ℕ) : injective (@fin.succ n) :=
λa b, succ.inj
lemma succ_ne_zero {n} : ∀ k : fin n, fin.succ k ≠ 0
| ⟨k, hk⟩ heq := nat.succ_ne_zero k $ (ext_iff _ _).1 heq
@[simp] lemma succ_zero_eq_one : fin.succ (0 : fin (n + 1)) = 1 := rfl
lemma mk_succ_pos (i : ℕ) (h : i < n) : (0 : fin (n + 1)) < ⟨i.succ, add_lt_add_right h 1⟩ :=
by { rw [lt_iff_coe_lt_coe, coe_zero], exact nat.succ_pos i }
lemma one_lt_succ_succ (a : fin n) : (1 : fin (n + 2)) < a.succ.succ :=
by { cases n, { exact fin_zero_elim a }, { rw [←succ_zero_eq_one, succ_lt_succ_iff], exact succ_pos a } }
lemma succ_succ_ne_one (a : fin n) : fin.succ (fin.succ a) ≠ 1 := ne_of_gt (one_lt_succ_succ a)
@[simp] lemma coe_pred (j : fin (n+1)) (h : j ≠ 0) : (j.pred h : ℕ) = j - 1 :=
by { cases j, refl }
@[simp] lemma succ_pred : ∀(i : fin (n+1)) (h : i ≠ 0), (i.pred h).succ = i
| ⟨0, h⟩ hi := by contradiction
| ⟨n + 1, h⟩ hi := rfl
@[simp] lemma pred_succ (i : fin n) {h : i.succ ≠ 0} : i.succ.pred h = i :=
by { cases i, refl }
@[simp] lemma pred_mk_succ (i : ℕ) (h : i < n + 1) :
fin.pred ⟨i + 1, add_lt_add_right h 1⟩ (ne_of_vne (ne_of_gt (mk_succ_pos i h))) = ⟨i, h⟩ :=
by simp only [ext_iff, coe_pred, coe_mk, nat.add_sub_cancel]
@[simp] lemma pred_inj :
∀ {a b : fin (n + 1)} {ha : a ≠ 0} {hb : b ≠ 0}, a.pred ha = b.pred hb ↔ a = b
| ⟨0, _⟩ b ha hb := by contradiction
| ⟨i+1, _⟩ ⟨0, _⟩ ha hb := by contradiction
| ⟨i+1, hi⟩ ⟨j+1, hj⟩ ha hb := by simp [fin.eq_iff_veq]
/-- The greatest value of `fin (n+1)` -/
def last (n : ℕ) : fin (n+1) := ⟨_, n.lt_succ_self⟩
/-- `cast_lt i h` embeds `i` into a `fin` where `h` proves it belongs into. -/
def cast_lt (i : fin m) (h : i.1 < n) : fin n := ⟨i.1, h⟩
/-- `cast_le h i` embeds `i` into a larger `fin` type. -/
def cast_le (h : n ≤ m) (a : fin n) : fin m := cast_lt a (lt_of_lt_of_le a.2 h)
/-- `cast eq i` embeds `i` into a equal `fin` type. -/
def cast (eq : n = m) : fin n → fin m := cast_le $ le_of_eq eq
/-- `cast_add m i` embeds `i : fin n` in `fin (n+m)`. -/
def cast_add (m) : fin n → fin (n + m) := cast_le $ le_add_right n m
/-- `cast_succ i` embeds `i : fin n` in `fin (n+1)`. -/
def cast_succ : fin n → fin (n + 1) := cast_add 1
/-- `succ_above p i` embeds `fin n` into `fin (n + 1)` with a hole around `p`. -/
def succ_above (p : fin (n + 1)) (i : fin n) : fin (n + 1) :=
if i.cast_succ < p then i.cast_succ else i.succ
/-- `pred_above p i h` embeds `i : fin (n+1)` into `fin n` by ignoring `p`. -/
def pred_above (p : fin (n+1)) (i : fin (n+1)) (hi : i ≠ p) : fin n :=
if h : i < p
then i.cast_lt (lt_of_lt_of_le h $ nat.le_of_lt_succ p.2)
else i.pred $
have p < i, from lt_of_le_of_ne (le_of_not_gt h) hi.symm,
ne_of_gt (lt_of_le_of_lt (zero_le p) this)
/-- `sub_nat i h` subtracts `m` from `i`, generalizes `fin.pred`. -/
def sub_nat (m) (i : fin (n + m)) (h : m ≤ (i : ℕ)) : fin n :=
⟨(i : ℕ) - m, by { rw [nat.sub_lt_right_iff_lt_add h], exact i.is_lt }⟩
/-- `add_nat i h` adds `m` on `i`, generalizes `fin.succ`. -/
def add_nat (m) (i : fin n) : fin (n + m) :=
⟨(i : ℕ) + m, add_lt_add_right i.2 _⟩
/-- `nat_add i h` adds `n` on `i` -/
def nat_add (n) {m} (i : fin m) : fin (n + m) :=
⟨n + (i : ℕ), add_lt_add_left i.2 _⟩
theorem le_last (i : fin (n+1)) : i ≤ last n :=
le_of_lt_succ i.is_lt
@[simp] lemma coe_cast (k : fin n) (h : n = m) : (fin.cast h k : ℕ) = k := rfl
@[simp] lemma coe_cast_succ (k : fin n) : (k.cast_succ : ℕ) = k := rfl
@[simp] lemma coe_cast_lt (k : fin m) (h : (k : ℕ) < n) : (k.cast_lt h : ℕ) = k := rfl
@[simp] lemma coe_cast_le (k : fin m) (h : m ≤ n) : (k.cast_le h : ℕ) = k := rfl
@[simp] lemma coe_cast_add (k : fin m) : (k.cast_add n : ℕ) = k := rfl
lemma last_val (n : ℕ) : (last n).val = n := rfl
@[simp, norm_cast] lemma coe_last {n : ℕ} : (last n : ℕ) = n := rfl
@[simp] lemma succ_last (n : ℕ) : (last n).succ = last (n.succ) := rfl
@[simp] lemma cast_succ_cast_lt (i : fin (n + 1)) (h : (i : ℕ) < n) : cast_succ (cast_lt i h) = i :=
fin.eq_of_veq rfl
@[simp] lemma cast_lt_cast_succ {n : ℕ} (a : fin n) (h : (a : ℕ) < n) : cast_lt (cast_succ a) h = a :=
by cases a; refl
@[simp] lemma coe_sub_nat (i : fin (n + m)) (h : m ≤ i) : (i.sub_nat m h : ℕ) = i - m :=
rfl
@[simp] lemma coe_add_nat (i : fin (n + m)) : (i.add_nat m : ℕ) = i + m :=
rfl
@[simp] lemma cast_succ_inj {a b : fin n} : a.cast_succ = b.cast_succ ↔ a = b :=
by simp [eq_iff_veq]
lemma cast_succ_lt_last (a : fin n) : cast_succ a < last n := lt_iff_coe_lt_coe.mpr a.is_lt
@[simp] lemma cast_succ_zero : cast_succ (0 : fin (n + 1)) = 0 := rfl
/-- `cast_succ i` is positive when `i` is positive -/
lemma cast_succ_pos (i : fin (n + 1)) (h : 0 < i) : 0 < cast_succ i :=
by simpa [lt_iff_coe_lt_coe] using h
lemma last_pos : (0 : fin (n + 2)) < last (n + 1) :=
by simp [lt_iff_coe_lt_coe]
lemma coe_nat_eq_last (n) : (n : fin (n + 1)) = fin.last n :=
by { rw [←fin.of_nat_eq_coe, fin.of_nat, fin.last], simp only [nat.mod_eq_of_lt n.lt_succ_self] }
lemma le_coe_last (i : fin (n + 1)) : i ≤ n :=
by { rw fin.coe_nat_eq_last, exact fin.le_last i }
lemma eq_last_of_not_lt {i : fin (n+1)} (h : ¬ (i : ℕ) < n) : i = last n :=
le_antisymm (le_last i) (not_lt.1 h)
lemma add_one_pos (i : fin (n + 1)) (h : i < fin.last n) : (0 : fin (n + 1)) < i + 1 :=
begin
cases n,
{ exact absurd h (nat.not_lt_zero _) },
{ rw [lt_iff_coe_lt_coe, coe_last, ←add_lt_add_iff_right 1] at h,
rw [lt_iff_coe_lt_coe, coe_add, coe_zero, coe_one, nat.mod_eq_of_lt h],
exact nat.zero_lt_succ _ }
end
lemma one_pos : (0 : fin (n + 2)) < 1 := succ_pos 0
lemma zero_ne_one : (0 : fin (n + 2)) ≠ 1 := ne_of_lt one_pos
@[simp] lemma zero_eq_one_iff : (0 : fin (n + 1)) = 1 ↔ n = 0 :=
begin
split,
{ cases n; intro h,
{ refl },
{ have := zero_ne_one, contradiction } },
{ rintro rfl, refl }
end
@[simp] lemma one_eq_zero_iff : (1 : fin (n + 1)) = 0 ↔ n = 0 :=
by rw [eq_comm, zero_eq_one_iff]
lemma cast_succ_fin_succ (n : ℕ) (j : fin n) :
cast_succ (fin.succ j) = fin.succ (cast_succ j) :=
by { simp [fin.ext_iff], }
lemma cast_succ_lt_succ (i : fin n) : i.cast_succ < i.succ :=
by { rw [lt_iff_coe_lt_coe, cast_succ, coe_cast_add, coe_succ], exact lt_add_one _ }
@[norm_cast, simp] lemma coe_eq_cast_succ : (a : fin (n + 1)) = a.cast_succ :=
begin
rw [cast_succ, cast_add, cast_le, cast_lt, eq_iff_veq],
exact coe_val_of_lt (nat.lt.step a.is_lt),
end
@[simp] lemma coe_succ_eq_succ : a.cast_succ + 1 = a.succ :=
begin
cases n,
{ exact fin_zero_elim a },
{ simp [a.is_lt, eq_iff_veq, add_def, nat.mod_eq_of_lt] }
end
lemma lt_succ : a.cast_succ < a.succ :=
by { rw [cast_succ, lt_iff_coe_lt_coe, coe_cast_add, coe_succ], exact lt_add_one a.val }
@[simp] lemma pred_one {n : ℕ} : fin.pred (1 : fin (n + 2)) (ne.symm (ne_of_lt one_pos)) = 0 := rfl
lemma pred_add_one (i : fin (n + 2)) (h : (i : ℕ) < n + 1) :
pred (i + 1) (ne_of_gt (add_one_pos _ (lt_iff_coe_lt_coe.mpr h))) = cast_lt i h :=
begin
rw [ext_iff, coe_pred, coe_cast_lt, coe_add, coe_one, mod_eq_of_lt, nat.add_sub_cancel],
exact add_lt_add_right h 1,
end
/-- `min n m` as an element of `fin (m + 1)` -/
def clamp (n m : ℕ) : fin (m + 1) := of_nat $ min n m
@[simp] lemma coe_clamp (n m : ℕ) : (clamp n m : ℕ) = min n m :=
nat.mod_eq_of_lt $ nat.lt_succ_iff.mpr $ min_le_right _ _
lemma cast_le_injective {n₁ n₂ : ℕ} (h : n₁ ≤ n₂) : injective (fin.cast_le h)
| ⟨i₁, h₁⟩ ⟨i₂, h₂⟩ eq := fin.eq_of_veq $ show i₁ = i₂, from fin.veq_of_eq eq
lemma cast_succ_injective (n : ℕ) : injective (@fin.cast_succ n) :=
cast_le_injective (le_add_right n 1)
/-- Embedding `i : fin n` into `fin (n + 1)` with a hole around `p : fin (n + 1)`
embeds `i` by `cast_succ` when the resulting `i.cast_succ < p` -/
lemma succ_above_below (p : fin (n + 1)) (i : fin n) (h : i.cast_succ < p) :
p.succ_above i = i.cast_succ :=
by { rw [succ_above], exact if_pos h }
/-- Embedding `fin n` into `fin (n + 1)` with a hole around zero embeds by `succ` -/
@[simp] lemma succ_above_zero : succ_above (0 : fin (n + 1)) = fin.succ := rfl
/-- Embedding `fin n` into `fin (n + 1)` with a whole around `last n` embeds by `cast_succ` -/
@[simp] lemma succ_above_last : succ_above (fin.last n) = cast_succ :=
by { ext, simp only [succ_above, cast_succ_lt_last, if_true] }
/-- Embedding `i : fin n` into `fin (n + 1)` with a hole around `p : fin (n + 1)`
embeds `i` by `succ` when the resulting `p < i.succ` -/
lemma succ_above_above (p : fin (n + 1)) (i : fin n) (h : p ≤ i.cast_succ) :
p.succ_above i = i.succ :=
by { rw [succ_above], exact if_neg (not_lt_of_le h) }
/-- Embedding `i : fin n` into `fin (n + 1)` is always about some hole `p` -/
lemma succ_above_lt_ge (p : fin (n + 1)) (i : fin n) : i.cast_succ < p ∨ p ≤ i.cast_succ :=
lt_or_ge (cast_succ i) p
/-- Embedding `i : fin n` into `fin (n + 1)` is always about some hole `p` -/
lemma succ_above_lt_gt (p : fin (n + 1)) (i : fin n) : i.cast_succ < p ∨ p < i.succ :=
or.cases_on (succ_above_lt_ge p i)
(λ h, or.inl h) (λ h, or.inr (lt_of_le_of_lt h (cast_succ_lt_succ i)))
/-- Embedding `i : fin n` into `fin (n + 1)` using a pivot `p` that is greater
results in a value that is less than `p`. -/
@[simp] lemma succ_above_lt_iff (p : fin (n + 1)) (i : fin n) : p.succ_above i < p ↔ i.cast_succ < p :=
begin
refine iff.intro _ _,
{ intro h,
cases succ_above_lt_ge p i with H H,
{ exact H },
{ rw succ_above_above _ _ H at h,
exact lt_trans (cast_succ_lt_succ i) h } },
{ intro h,
rw succ_above_below _ _ h,
exact h }
end
/-- Embedding `i : fin n` into `fin (n + 1)` using a pivot `p` that is lesser
results in a value that is greater than `p`. -/
lemma lt_succ_above_iff (p : fin (n + 1)) (i : fin n) : p < p.succ_above i ↔ p ≤ i.cast_succ :=
begin
refine iff.intro _ _,
{ intro h,
cases succ_above_lt_ge p i with H H,
{ rw succ_above_below _ _ H at h,
exact le_of_lt h },
{ exact H } },
{ intro h,
rw succ_above_above _ _ h,
exact lt_of_le_of_lt h (cast_succ_lt_succ i) },
end
/-- Embedding `i : fin n` into `fin (n + 1)` with a hole around `p : fin (n + 1)`
never results in `p` itself -/
theorem succ_above_ne (p : fin (n + 1)) (i : fin n) : p.succ_above i ≠ p :=
begin
intro eq,
by_cases H : i.cast_succ < p,
{ simpa [lt_irrefl, ←succ_above_below _ _ H, eq] using H },
{ simpa [←succ_above_above _ _ (le_of_not_lt H), eq] using cast_succ_lt_succ i }
end
/-- Embedding a positive `fin n` results in a positive fin (n + 1)` -/
lemma succ_above_pos (p : fin (n + 2)) (i : fin (n + 1)) (h : 0 < i) : 0 < p.succ_above i :=
begin
by_cases H : i.cast_succ < p,
{ simpa [succ_above_below _ _ H] using cast_succ_pos _ h },
{ simpa [succ_above_above _ _ (le_of_not_lt H)] using succ_pos _ },
end
/-- Given a fixed pivot `x : fin (n + 1)`, `x.succ_above` is injective -/
lemma succ_above_right_inj {x : fin (n + 1)} :
x.succ_above a = x.succ_above b ↔ a = b :=
begin
refine iff.intro _ (λ h, by rw h),
intro h,
cases succ_above_lt_ge x a with ha ha;
cases succ_above_lt_ge x b with hb hb,
{ simpa only [succ_above_below, ha, hb, cast_succ_inj] using h },
{ simp only [succ_above_below, succ_above_above, ha, hb] at h,
rw h at ha,
exact absurd (lt_of_le_of_lt hb (cast_succ_lt_succ _)) (asymm ha) },
{ simp only [succ_above_below, succ_above_above, ha, hb] at h,
rw ←h at hb,
exact absurd (lt_of_le_of_lt ha (cast_succ_lt_succ _)) (asymm hb) },
{ simpa only [succ_above_above, ha, hb, succ_inj] using h },
end
/-- Given a fixed pivot `x : fin (n + 1)`, `x.succ_above` is injective -/
lemma succ_above_right_injective {x : fin (n + 1)} : injective (succ_above x) :=
λ _ _, succ_above_right_inj.mp
/-- Embedding a `fin (n + 1)` into `fin n` and embedding it back around the same hole
gives the starting `fin (n + 1)` -/
@[simp] lemma succ_above_descend (p i : fin (n + 1)) (h : i ≠ p) :
p.succ_above (p.pred_above i h) = i :=
begin
rw pred_above,
cases lt_or_le i p with H H,
{ simp only [succ_above_below, cast_succ_cast_lt, H, dif_pos]},
{ rw le_iff_coe_le_coe at H,
rw succ_above_above,
{ simp only [le_iff_coe_le_coe, H, not_lt, dif_neg, succ_pred] },
{ simp only [le_iff_coe_le_coe, H, coe_pred, not_lt, dif_neg, coe_cast_succ],
exact le_pred_of_lt (lt_of_le_of_ne H (vne_of_ne h.symm)) } }
end
/-- Embedding a `fin n` into `fin (n + 1)` and embedding it back around the same hole
gives the starting `fin n` -/
@[simp] lemma pred_above_succ_above (p : fin (n + 1)) (i : fin n) :
p.pred_above (p.succ_above i) (succ_above_ne _ _) = i :=
begin
rw pred_above,
by_cases H : i.cast_succ < p,
{ simp [succ_above_below _ _ H, H] },
{ cases succ_above_lt_gt p i with h h,
{ exact absurd h H },
{ simp [succ_above_above _ _ (le_of_not_lt H), dif_neg H] } }
end
/-- `succ_above` is injective at the pivot -/
lemma succ_above_left_inj {x y : fin (n + 1)} :
x.succ_above = y.succ_above ↔ x = y :=
begin
refine iff.intro _ (λ h, by rw h),
contrapose!,
intros H h,
have key := congr_fun h (y.pred_above x H),
rw [succ_above_descend] at key,
exact absurd key (succ_above_ne x _)
end
/-- `succ_above` is injective at the pivot -/
lemma succ_above_left_injective : injective (@succ_above n) :=
λ _ _, succ_above_left_inj.mp
/-- A function `f` on `fin n` is strictly monotone if and only if `f i < f (i+1)` for all `i`. -/
lemma strict_mono_iff_lt_succ {α : Type*} [preorder α] {f : fin n → α} :
strict_mono f ↔ ∀ i (h : i + 1 < n), f ⟨i, lt_of_le_of_lt (nat.le_succ i) h⟩ < f ⟨i+1, h⟩ :=
begin
split,
{ assume H i hi,
apply H,
exact nat.lt_succ_self _ },
{ assume H,
have A : ∀ i j (h : i < j) (h' : j < n), f ⟨i, lt_trans h h'⟩ < f ⟨j, h'⟩,
{ assume i j h h',
induction h with k h IH,
{ exact H _ _ },
{ exact lt_trans (IH (nat.lt_of_succ_lt h')) (H _ _) } },
assume i j hij,
convert A (i : ℕ) (j : ℕ) hij j.2; ext; simp only [subtype.coe_eta] }
end
section rec
/-- Define `C n i` by induction on `i : fin n` interpreted as `(0 : fin (n - i)).succ.succ…`.
This function has two arguments: `H0 n` defines `0`-th element `C (n+1) 0` of an `(n+1)`-tuple,
and `Hs n i` defines `(i+1)`-st element of `(n+1)`-tuple based on `n`, `i`, and `i`-th element
of `n`-tuple. -/
@[elab_as_eliminator] def succ_rec
{C : Π n, fin n → Sort*}
(H0 : Π n, C (succ n) 0)
(Hs : Π n i, C n i → C (succ n) i.succ) : Π {n : ℕ} (i : fin n), C n i
| 0 i := i.elim0
| (succ n) ⟨0, _⟩ := H0 _
| (succ n) ⟨succ i, h⟩ := Hs _ _ (succ_rec ⟨i, lt_of_succ_lt_succ h⟩)
/-- Define `C n i` by induction on `i : fin n` interpreted as `(0 : fin (n - i)).succ.succ…`.
This function has two arguments: `H0 n` defines `0`-th element `C (n+1) 0` of an `(n+1)`-tuple,
and `Hs n i` defines `(i+1)`-st element of `(n+1)`-tuple based on `n`, `i`, and `i`-th element
of `n`-tuple.
A version of `fin.succ_rec` taking `i : fin n` as the first argument. -/
@[elab_as_eliminator] def succ_rec_on {n : ℕ} (i : fin n)
{C : Π n, fin n → Sort*}
(H0 : Π n, C (succ n) 0)
(Hs : Π n i, C n i → C (succ n) i.succ) : C n i :=
i.succ_rec H0 Hs
@[simp] theorem succ_rec_on_zero {C : ∀ n, fin n → Sort*} {H0 Hs} (n) :
@fin.succ_rec_on (succ n) 0 C H0 Hs = H0 n :=
rfl
@[simp] theorem succ_rec_on_succ {C : ∀ n, fin n → Sort*} {H0 Hs} {n} (i : fin n) :
@fin.succ_rec_on (succ n) i.succ C H0 Hs = Hs n i (fin.succ_rec_on i H0 Hs) :=
by cases i; refl
/-- Define `f : Π i : fin n.succ, C i` by separately handling the cases `i = 0` and
`i = j.succ`, `j : fin n`. -/
@[elab_as_eliminator] def cases
{C : fin (succ n) → Sort*} (H0 : C 0) (Hs : Π i : fin n, C (i.succ)) :
Π (i : fin (succ n)), C i
| ⟨0, h⟩ := H0
| ⟨succ i, h⟩ := Hs ⟨i, lt_of_succ_lt_succ h⟩
@[simp] theorem cases_zero {n} {C : fin (succ n) → Sort*} {H0 Hs} : @fin.cases n C H0 Hs 0 = H0 :=
rfl
@[simp] theorem cases_succ {n} {C : fin (succ n) → Sort*} {H0 Hs} (i : fin n) :
@fin.cases n C H0 Hs i.succ = Hs i :=
by cases i; refl
lemma forall_fin_succ {P : fin (n+1) → Prop} :
(∀ i, P i) ↔ P 0 ∧ (∀ i:fin n, P i.succ) :=
⟨λ H, ⟨H 0, λ i, H _⟩, λ ⟨H0, H1⟩ i, fin.cases H0 H1 i⟩
lemma exists_fin_succ {P : fin (n+1) → Prop} :
(∃ i, P i) ↔ P 0 ∨ (∃i:fin n, P i.succ) :=
⟨λ ⟨i, h⟩, fin.cases or.inl (λ i hi, or.inr ⟨i, hi⟩) i h,
λ h, or.elim h (λ h, ⟨0, h⟩) $ λ⟨i, hi⟩, ⟨i.succ, hi⟩⟩
end rec
section tuple
/-!
### Tuples
We can think of the type `Π(i : fin n), α i` as `n`-tuples of elements of possibly varying type
`α i`. A particular case is `fin n → α` of elements with all the same type. Here are some relevant
operations, first about adding or removing elements at the beginning of a tuple.
-/
/-- There is exactly one tuple of size zero. -/
instance tuple0_unique (α : fin 0 → Type u) : unique (Π i : fin 0, α i) :=
{ default := fin_zero_elim, uniq := λ x, funext fin_zero_elim }
variables {α : fin (n+1) → Type u} (x : α 0) (q : Πi, α i) (p : Π(i : fin n), α (i.succ))
(i : fin n) (y : α i.succ) (z : α 0)
/-- The tail of an `n+1` tuple, i.e., its last `n` entries -/
def tail (q : Πi, α i) : (Π(i : fin n), α (i.succ)) := λ i, q i.succ
/-- Adding an element at the beginning of an `n`-tuple, to get an `n+1`-tuple -/
def cons (x : α 0) (p : Π(i : fin n), α (i.succ)) : Πi, α i :=
λ j, fin.cases x p j
@[simp] lemma tail_cons : tail (cons x p) = p :=
by simp [tail, cons]
@[simp] lemma cons_succ : cons x p i.succ = p i :=
by simp [cons]
@[simp] lemma cons_zero : cons x p 0 = x :=
by simp [cons]
/-- Updating a tuple and adding an element at the beginning commute. -/
@[simp] lemma cons_update : cons x (update p i y) = update (cons x p) i.succ y :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp [ne.symm (succ_ne_zero i)] },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ],
by_cases h' : j' = i,
{ rw h', simp },
{ have : j'.succ ≠ i.succ, by rwa [ne.def, succ_inj],
rw [update_noteq h', update_noteq this, cons_succ] } }
end
/-- Adding an element at the beginning of a tuple and then updating it amounts to adding it
directly. -/
lemma update_cons_zero : update (cons x p) 0 z = cons z p :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp },
{ simp only [h, update_noteq, ne.def, not_false_iff],
let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ, cons_succ] }
end
/-- Concatenating the first element of a tuple with its tail gives back the original tuple -/
@[simp] lemma cons_self_tail : cons (q 0) (tail q) = q :=
begin
ext j,
by_cases h : j = 0,
{ rw h, simp },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, tail, cons_succ] }
end
/-- Updating the first element of a tuple does not change the tail. -/
@[simp] lemma tail_update_zero : tail (update q 0 z) = tail q :=
by { ext j, simp [tail, fin.succ_ne_zero] }
/-- Updating a nonzero element and taking the tail commute. -/
@[simp] lemma tail_update_succ :
tail (update q i.succ y) = update (tail q) i y :=
begin
ext j,
by_cases h : j = i,
{ rw h, simp [tail] },
{ simp [tail, (fin.succ_injective n).ne h, h] }
end
lemma comp_cons {α : Type*} {β : Type*} (g : α → β) (y : α) (q : fin n → α) :
g ∘ (cons y q) = cons (g y) (g ∘ q) :=
begin
ext j,
by_cases h : j = 0,
{ rw h, refl },
{ let j' := pred j h,
have : j'.succ = j := succ_pred j h,
rw [← this, cons_succ, comp_app, cons_succ] }
end
lemma comp_tail {α : Type*} {β : Type*} (g : α → β) (q : fin n.succ → α) :
g ∘ (tail q) = tail (g ∘ q) :=
by { ext j, simp [tail] }
end tuple
section tuple_right
/-! In the previous section, we have discussed inserting or removing elements on the left of a
tuple. In this section, we do the same on the right. A difference is that `fin (n+1)` is constructed
inductively from `fin n` starting from the left, not from the right. This implies that Lean needs
more help to realize that elements belong to the right types, i.e., we need to insert casts at
several places. -/
variables {α : fin (n+1) → Type u} (x : α (last n)) (q : Πi, α i) (p : Π(i : fin n), α i.cast_succ)
(i : fin n) (y : α i.cast_succ) (z : α (last n))
/-- The beginning of an `n+1` tuple, i.e., its first `n` entries -/
def init (q : Πi, α i) (i : fin n) : α i.cast_succ :=
q i.cast_succ
/-- Adding an element at the end of an `n`-tuple, to get an `n+1`-tuple. The name `snoc` comes from
`cons` (i.e., adding an element to the left of a tuple) read in reverse order. -/
def snoc (p : Π(i : fin n), α i.cast_succ) (x : α (last n)) (i : fin (n+1)) : α i :=
if h : i.val < n
then _root_.cast (by rw fin.cast_succ_cast_lt i h) (p (cast_lt i h))
else _root_.cast (by rw eq_last_of_not_lt h) x
@[simp] lemma init_snoc : init (snoc p x) = p :=
begin
ext i,
have h' := fin.cast_lt_cast_succ i i.is_lt,
simp [init, snoc, i.is_lt, h'],
convert cast_eq rfl (p i)
end
@[simp] lemma snoc_cast_succ : snoc p x i.cast_succ = p i :=
begin
have : i.cast_succ.val < n := i.is_lt,
have h' := fin.cast_lt_cast_succ i i.is_lt,
simp [snoc, this, h'],
convert cast_eq rfl (p i)
end
@[simp] lemma snoc_last : snoc p x (last n) = x :=
by { simp [snoc] }
/-- Updating a tuple and adding an element at the end commute. -/
@[simp] lemma snoc_update : snoc (update p i y) x = update (snoc p x) i.cast_succ y :=
begin
ext j,
by_cases h : j.val < n,
{ simp only [snoc, h, dif_pos],
by_cases h' : j = cast_succ i,
{ have C1 : α i.cast_succ = α j, by rw h',
have E1 : update (snoc p x) i.cast_succ y j = _root_.cast C1 y,
{ have : update (snoc p x) j (_root_.cast C1 y) j = _root_.cast C1 y, by simp,
convert this,
{ exact h'.symm },
{ exact heq_of_eq_mp (congr_arg α (eq.symm h')) rfl } },
have C2 : α i.cast_succ = α (cast_succ (cast_lt j h)),
by rw [cast_succ_cast_lt, h'],
have E2 : update p i y (cast_lt j h) = _root_.cast C2 y,
{ have : update p (cast_lt j h) (_root_.cast C2 y) (cast_lt j h) = _root_.cast C2 y,
by simp,
convert this,
{ simp [h, h'] },
{ exact heq_of_eq_mp C2 rfl } },
rw [E1, E2],
exact eq_rec_compose _ _ _ },
{ have : ¬(cast_lt j h = i),
by { assume E, apply h', rw [← E, cast_succ_cast_lt] },
simp [h', this, snoc, h] } },
{ rw eq_last_of_not_lt h,
simp [ne.symm (ne_of_lt (cast_succ_lt_last i))] }
end
/-- Adding an element at the beginning of a tuple and then updating it amounts to adding it
directly. -/
lemma update_snoc_last : update (snoc p x) (last n) z = snoc p z :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, update_noteq, this, snoc] },
{ rw eq_last_of_not_lt h,
simp }
end
/-- Concatenating the first element of a tuple with its tail gives back the original tuple -/
@[simp] lemma snoc_init_self : snoc (init q) (q (last n)) = q :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, update_noteq, this, snoc, init, cast_succ_cast_lt],
have A : cast_succ (cast_lt j h) = j := cast_succ_cast_lt _ _,
rw ← cast_eq rfl (q j),
congr' 1; rw A },
{ rw eq_last_of_not_lt h,
simp }
end
/-- Updating the last element of a tuple does not change the beginning. -/
@[simp] lemma init_update_last : init (update q (last n) z) = init q :=
by { ext j, simp [init, ne_of_lt, cast_succ_lt_last] }
/-- Updating an element and taking the beginning commute. -/
@[simp] lemma init_update_cast_succ :
init (update q i.cast_succ y) = update (init q) i y :=
begin
ext j,
by_cases h : j = i,
{ rw h, simp [init] },
{ simp [init, h] }
end
/-- `tail` and `init` commute. We state this lemma in a non-dependent setting, as otherwise it
would involve a cast to convince Lean that the two types are equal, making it harder to use. -/
lemma tail_init_eq_init_tail {β : Type*} (q : fin (n+2) → β) :
tail (init q) = init (tail q) :=
by { ext i, simp [tail, init, cast_succ_fin_succ] }
/-- `cons` and `snoc` commute. We state this lemma in a non-dependent setting, as otherwise it
would involve a cast to convince Lean that the two types are equal, making it harder to use. -/
lemma cons_snoc_eq_snoc_cons {β : Type*} (a : β) (q : fin n → β) (b : β) :
@cons n.succ (λ i, β) a (snoc q b) = snoc (cons a q) b :=
begin
ext i,
by_cases h : i = 0,
{ rw h, refl },
set j := pred i h with ji,
have : i = j.succ, by rw [ji, succ_pred],
rw [this, cons_succ],
by_cases h' : j.val < n,
{ set k := cast_lt j h' with jk,
have : j = k.cast_succ, by rw [jk, cast_succ_cast_lt],
rw [this, ← cast_succ_fin_succ],
simp },
rw [eq_last_of_not_lt h', succ_last],
simp
end
lemma comp_snoc {α : Type*} {β : Type*} (g : α → β) (q : fin n → α) (y : α) :
g ∘ (snoc q y) = snoc (g ∘ q) (g y) :=
begin
ext j,
by_cases h : j.val < n,
{ have : j ≠ last n := ne_of_lt h,
simp [h, this, snoc, cast_succ_cast_lt] },
{ rw eq_last_of_not_lt h,
simp }
end
lemma comp_init {α : Type*} {β : Type*} (g : α → β) (q : fin n.succ → α) :
g ∘ (init q) = init (g ∘ q) :=
by { ext j, simp [init] }
end tuple_right
section find
/-- `find p` returns the first index `n` where `p n` is satisfied, and `none` if it is never
satisfied. -/
def find : Π {n : ℕ} (p : fin n → Prop) [decidable_pred p], option (fin n)
| 0 p _ := none
| (n+1) p _ := by resetI; exact option.cases_on
(@find n (λ i, p (i.cast_lt (nat.lt_succ_of_lt i.2))) _)
(if h : p (fin.last n) then some (fin.last n) else none)
(λ i, some (i.cast_lt (nat.lt_succ_of_lt i.2)))
/-- If `find p = some i`, then `p i` holds -/
lemma find_spec : Π {n : ℕ} (p : fin n → Prop) [decidable_pred p] {i : fin n}
(hi : i ∈ by exactI fin.find p), p i
| 0 p I i hi := option.no_confusion hi
| (n+1) p I i hi := begin
dsimp [find] at hi,
resetI,
cases h : find (λ i : fin n, (p (i.cast_lt (nat.lt_succ_of_lt i.2)))) with j,
{ rw h at hi,
dsimp at hi,
split_ifs at hi with hl hl,
{ exact option.some_inj.1 hi ▸ hl },
{ exact option.no_confusion hi } },
{ rw h at hi,
rw [← option.some_inj.1 hi],
exact find_spec _ h }
end
/-- `find p` does not return `none` if and only if `p i` holds at some index `i`. -/
lemma is_some_find_iff : Π {n : ℕ} {p : fin n → Prop} [decidable_pred p],
by exactI (find p).is_some ↔ ∃ i, p i
| 0 p _ := iff_of_false (λ h, bool.no_confusion h) (λ ⟨i, _⟩, fin_zero_elim i)
| (n+1) p _ := ⟨λ h, begin
rw [option.is_some_iff_exists] at h,
cases h with i hi,
exactI ⟨i, find_spec _ hi⟩
end, λ ⟨⟨i, hin⟩, hi⟩,
begin
resetI,
dsimp [find],
cases h : find (λ i : fin n, (p (i.cast_lt (nat.lt_succ_of_lt i.2)))) with j,
{ split_ifs with hl hl,
{ exact option.is_some_some },
{ have := (@is_some_find_iff n (λ x, p (x.cast_lt (nat.lt_succ_of_lt x.2))) _).2
⟨⟨i, lt_of_le_of_ne (nat.le_of_lt_succ hin)
(λ h, by clear_aux_decl; cases h; exact hl hi)⟩, hi⟩,
rw h at this,
exact this } },
{ simp }
end⟩
/-- `find p` returns `none` if and only if `p i` never holds. -/
lemma find_eq_none_iff {n : ℕ} {p : fin n → Prop} [decidable_pred p] :
find p = none ↔ ∀ i, ¬ p i :=
by rw [← not_exists, ← is_some_find_iff]; cases (find p); simp
/-- If `find p` returns `some i`, then `p j` does not hold for `j < i`, i.e., `i` is minimal among
the indices where `p` holds. -/
lemma find_min : Π {n : ℕ} {p : fin n → Prop} [decidable_pred p] {i : fin n}
(hi : i ∈ by exactI fin.find p) {j : fin n} (hj : j < i), ¬ p j
| 0 p _ i hi j hj hpj := option.no_confusion hi
| (n+1) p _ i hi ⟨j, hjn⟩ hj hpj := begin
resetI,
dsimp [find] at hi,
cases h : find (λ i : fin n, (p (i.cast_lt (nat.lt_succ_of_lt i.2)))) with k,
{ rw [h] at hi,
split_ifs at hi with hl hl,
{ have := option.some_inj.1 hi,
subst this,
rw [find_eq_none_iff] at h,
exact h ⟨j, hj⟩ hpj },
{ exact option.no_confusion hi } },
{ rw h at hi,
dsimp at hi,
have := option.some_inj.1 hi,
subst this,
exact find_min h (show (⟨j, lt_trans hj k.2⟩ : fin n) < k, from hj) hpj }
end
lemma find_min' {p : fin n → Prop} [decidable_pred p] {i : fin n}
(h : i ∈ fin.find p) {j : fin n} (hj : p j) : i ≤ j :=
le_of_not_gt (λ hij, find_min h hij hj)
lemma nat_find_mem_find {p : fin n → Prop} [decidable_pred p]
(h : ∃ i, ∃ hin : i < n, p ⟨i, hin⟩) :
(⟨nat.find h, (nat.find_spec h).fst⟩ : fin n) ∈ find p :=
let ⟨i, hin, hi⟩ := h in
begin
cases hf : find p with f,
{ rw [find_eq_none_iff] at hf,
exact (hf ⟨i, hin⟩ hi).elim },
{ refine option.some_inj.2 (le_antisymm _ _),
{ exact find_min' hf (nat.find_spec h).snd },
{ exact nat.find_min' _ ⟨f.2, by convert find_spec p hf;
exact fin.eta _ _⟩ } }
end
lemma mem_find_iff {p : fin n → Prop} [decidable_pred p] {i : fin n} :
i ∈ fin.find p ↔ p i ∧ ∀ j, p j → i ≤ j :=
⟨λ hi, ⟨find_spec _ hi, λ _, find_min' hi⟩,
begin
rintros ⟨hpi, hj⟩,
cases hfp : fin.find p,
{ rw [find_eq_none_iff] at hfp,
exact (hfp _ hpi).elim },
{ exact option.some_inj.2 (le_antisymm (find_min' hfp hpi) (hj _ (find_spec _ hfp))) }
end⟩
lemma find_eq_some_iff {p : fin n → Prop} [decidable_pred p] {i : fin n} :
fin.find p = some i ↔ p i ∧ ∀ j, p j → i ≤ j :=
mem_find_iff
lemma mem_find_of_unique {p : fin n → Prop} [decidable_pred p]
(h : ∀ i j, p i → p j → i = j) {i : fin n} (hi : p i) : i ∈ fin.find p :=
mem_find_iff.2 ⟨hi, λ j hj, le_of_eq $ h i j hi hj⟩
end find
@[simp]
lemma coe_of_nat_eq_mod (m n : ℕ) :
((n : fin (succ m)) : ℕ) = n % succ m :=
by rw [← of_nat_eq_coe]; refl
@[simp] lemma coe_of_nat_eq_mod' (m n : ℕ) [I : fact (0 < m)] :
(@fin.of_nat' _ I n : ℕ) = n % m :=
rfl
end fin
|
44c90ff2c83dfe0535bc96490de90bc1aaad308b
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/counterexamples/sorgenfrey_line.lean
|
ac740f1bb34da9aaf4cf9e24180859284440b69c
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 14,390
|
lean
|
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import topology.instances.irrational
import topology.algebra.order.archimedean
import topology.paracompact
import topology.metric_space.metrizable
import topology.metric_space.emetric_paracompact
import data.set.intervals.monotone
/-!
# Sorgenfrey line
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define `sorgenfrey_line` (notation: `ℝₗ`) to be the Sorgenfrey line. It is the real
line with the topology space structure generated by half-open intervals `set.Ico a b`.
We prove that this line is a completely normal Hausdorff space but its product with itself is not a
normal space. In particular, this implies that the topology on `ℝₗ` is neither metrizable, nor
second countable.
## Notations
- `ℝₗ`: Sorgenfrey line.
## TODO
Prove that the Sorgenfrey line is a paracompact space.
-/
open set filter topological_space
open_locale topology filter
namespace counterexample
noncomputable theory
/-- The Sorgenfrey line. It is the real line with the topology space structure generated by
half-open intervals `set.Ico a b`. -/
@[derive [conditionally_complete_linear_order, linear_ordered_field, archimedean]]
def sorgenfrey_line : Type := ℝ
localized "notation (name := sorgenfrey_line) `ℝₗ` := sorgenfrey_line" in sorgenfrey_line
namespace sorgenfrey_line
/-- Ring homomorphism between the Sorgenfrey line and the standard real line. -/
def to_real : ℝₗ ≃+* ℝ := ring_equiv.refl ℝ
instance : topological_space ℝₗ :=
topological_space.generate_from {s : set ℝₗ | ∃ a b : ℝₗ, Ico a b = s}
lemma is_open_Ico (a b : ℝₗ) : is_open (Ico a b) :=
topological_space.generate_open.basic _ ⟨a, b, rfl⟩
lemma is_open_Ici (a : ℝₗ) : is_open (Ici a) :=
Union_Ico_right a ▸ is_open_Union (is_open_Ico a)
lemma nhds_basis_Ico (a : ℝₗ) : (𝓝 a).has_basis (λ b, a < b) (λ b, Ico a b) :=
begin
rw topological_space.nhds_generate_from,
haveI : nonempty {x // x ≤ a} := set.nonempty_Iic_subtype,
have : (⨅ (x : {i // i ≤ a}), 𝓟 (Ici ↑x)) = 𝓟 (Ici a),
{ refine (is_least.is_glb _).infi_eq,
exact ⟨⟨⟨a, le_rfl⟩, rfl⟩, forall_range_iff.2 $
λ b, principal_mono.2 $ Ici_subset_Ici.2 b.2⟩, },
simp only [mem_set_of_eq, infi_and, infi_exists, @infi_comm _ (_ ∈ _),
@infi_comm _ (set ℝₗ), infi_infi_eq_right],
simp_rw [@infi_comm _ ℝₗ (_ ≤ _), infi_subtype', ← Ici_inter_Iio, ← inf_principal, ← inf_infi,
← infi_inf, this, infi_subtype],
suffices : (⨅ x ∈ Ioi a, 𝓟 (Iio x)).has_basis ((<) a) Iio, from this.principal_inf _,
refine has_basis_binfi_principal _ nonempty_Ioi,
exact directed_on_iff_directed.2 (directed_of_inf $ λ x y hxy, Iio_subset_Iio hxy),
end
lemma nhds_basis_Ico_rat (a : ℝₗ) :
(𝓝 a).has_countable_basis (λ r : ℚ, a < r) (λ r, Ico a r) :=
begin
refine ⟨(nhds_basis_Ico a).to_has_basis (λ b hb, _) (λ r hr, ⟨_, hr, subset.rfl⟩),
set.to_countable _⟩,
rcases exists_rat_btwn hb with ⟨r, har, hrb⟩,
exact ⟨r, har, Ico_subset_Ico_right hrb.le⟩
end
lemma nhds_basis_Ico_inv_pnat (a : ℝₗ) :
(𝓝 a).has_basis (λ n : ℕ+, true) (λ n, Ico a (a + n⁻¹)) :=
begin
refine (nhds_basis_Ico a).to_has_basis (λ b hb, _)
(λ n hn, ⟨_, lt_add_of_pos_right _ (inv_pos.2 $ nat.cast_pos.2 n.pos), subset.rfl⟩),
rcases exists_nat_one_div_lt (sub_pos.2 hb) with ⟨k, hk⟩,
rw [one_div] at hk,
rw [← nat.cast_add_one] at hk,
exact ⟨k.succ_pnat, trivial, Ico_subset_Ico_right (le_sub_iff_add_le'.1 hk.le)⟩
end
lemma nhds_countable_basis_Ico_inv_pnat (a : ℝₗ) :
(𝓝 a).has_countable_basis (λ n : ℕ+, true) (λ n, Ico a (a + n⁻¹)) :=
⟨nhds_basis_Ico_inv_pnat a, set.to_countable _⟩
lemma nhds_antitone_basis_Ico_inv_pnat (a : ℝₗ) :
(𝓝 a).has_antitone_basis (λ n : ℕ+, Ico a (a + n⁻¹)) :=
⟨nhds_basis_Ico_inv_pnat a, monotone_const.Ico $
antitone.const_add (λ k l hkl, inv_le_inv_of_le (nat.cast_pos.2 k.pos) (nat.mono_cast hkl)) _⟩
lemma is_open_iff {s : set ℝₗ} : is_open s ↔ ∀ x ∈ s, ∃ y > x, Ico x y ⊆ s :=
is_open_iff_mem_nhds.trans $ forall₂_congr $ λ x hx, (nhds_basis_Ico x).mem_iff
lemma is_closed_iff {s : set ℝₗ} : is_closed s ↔ ∀ x ∉ s, ∃ y > x, disjoint (Ico x y) s :=
by simp only [← is_open_compl_iff, is_open_iff, mem_compl_iff, subset_compl_iff_disjoint_right]
lemma exists_Ico_disjoint_closed {a : ℝₗ} {s : set ℝₗ} (hs : is_closed s) (ha : a ∉ s) :
∃ b > a, disjoint (Ico a b) s :=
is_closed_iff.1 hs a ha
@[simp] lemma map_to_real_nhds (a : ℝₗ) : map to_real (𝓝 a) = 𝓝[≥] (to_real a) :=
begin
refine ((nhds_basis_Ico a).map _).eq_of_same_basis _,
simpa only [to_real.image_eq_preimage] using nhds_within_Ici_basis_Ico (to_real a)
end
lemma nhds_eq_map (a : ℝₗ) : 𝓝 a = map to_real.symm (𝓝[≥] a.to_real) :=
by simp_rw [← map_to_real_nhds, map_map, (∘), to_real.symm_apply_apply, map_id']
lemma nhds_eq_comap (a : ℝₗ) : 𝓝 a = comap to_real (𝓝[≥] a.to_real) :=
by rw [← map_to_real_nhds, comap_map to_real.injective]
@[continuity] lemma continuous_to_real : continuous to_real :=
continuous_iff_continuous_at.2 $ λ x,
by { rw [continuous_at, tendsto, map_to_real_nhds], exact inf_le_left }
instance : order_closed_topology ℝₗ :=
⟨is_closed_le_prod.preimage (continuous_to_real.prod_map continuous_to_real)⟩
instance : has_continuous_add ℝₗ :=
begin
refine ⟨continuous_iff_continuous_at.2 _⟩,
rintro ⟨x, y⟩,
simp only [continuous_at, nhds_prod_eq, nhds_eq_map, nhds_eq_comap (x + y), prod_map_map_eq,
tendsto_comap_iff, tendsto_map'_iff, (∘), ← nhds_within_prod_eq],
exact (continuous_add.tendsto _).inf (maps_to.tendsto $ λ x hx, add_le_add hx.1 hx.2)
end
lemma is_clopen_Ici (a : ℝₗ) : is_clopen (Ici a) := ⟨is_open_Ici a, is_closed_Ici⟩
lemma is_clopen_Iio (a : ℝₗ) : is_clopen (Iio a) :=
by simpa only [compl_Ici] using (is_clopen_Ici a).compl
lemma is_clopen_Ico (a b : ℝₗ) : is_clopen (Ico a b) :=
(is_clopen_Ici a).inter (is_clopen_Iio b)
instance : totally_disconnected_space ℝₗ :=
⟨λ s hs' hs x hx y hy, le_antisymm (hs.subset_clopen (is_clopen_Ici x) ⟨x, hx, le_rfl⟩ hy)
(hs.subset_clopen (is_clopen_Ici y) ⟨y, hy, le_rfl⟩ hx)⟩
instance : first_countable_topology ℝₗ := ⟨λ x, (nhds_basis_Ico_rat x).is_countably_generated⟩
/-- Sorgenfrey line is a completely normal Hausdorff topological space. -/
instance : t5_space ℝₗ :=
begin
/- Let `s` and `t` be disjoint closed sets. For each `x ∈ s` we choose `X x` such that
`set.Ico x (X x)` is disjoint with `t`. Similarly, for each `y ∈ t` we choose `Y y` such that
`set.Ico y (Y y)` is disjoint with `s`. Then `⋃ x ∈ s, Ico x (X x)` and `⋃ y ∈ t, Ico y (Y y)` are
disjoint open sets that include `s` and `t`. -/
refine ⟨λ s t hd₁ hd₂, _⟩,
choose! X hX hXd
using λ x (hx : x ∈ s), exists_Ico_disjoint_closed is_closed_closure (disjoint_left.1 hd₂ hx),
choose! Y hY hYd
using λ y (hy : y ∈ t), exists_Ico_disjoint_closed is_closed_closure (disjoint_right.1 hd₁ hy),
refine disjoint_of_disjoint_of_mem _
(bUnion_mem_nhds_set $ λ x hx, (is_open_Ico x (X x)).mem_nhds $ left_mem_Ico.2 (hX x hx))
(bUnion_mem_nhds_set $ λ y hy, (is_open_Ico y (Y y)).mem_nhds $ left_mem_Ico.2 (hY y hy)),
simp only [disjoint_Union_left, disjoint_Union_right, Ico_disjoint_Ico],
intros y hy x hx,
cases le_total x y with hle hle,
{ calc min (X x) (Y y) ≤ X x : min_le_left _ _
... ≤ y : not_lt.1 (λ hyx, (hXd x hx).le_bot ⟨⟨hle, hyx⟩, subset_closure hy⟩)
... ≤ max x y : le_max_right _ _ },
{ calc min (X x) (Y y) ≤ Y y : min_le_right _ _
... ≤ x : not_lt.1 $ λ hxy, (hYd y hy).le_bot ⟨⟨hle, hxy⟩, subset_closure hx⟩
... ≤ max x y : le_max_left _ _ }
end
lemma dense_range_coe_rat : dense_range (coe : ℚ → ℝₗ) :=
begin
refine dense_iff_inter_open.2 _,
rintro U Uo ⟨x, hx⟩,
rcases is_open_iff.1 Uo _ hx with ⟨y, hxy, hU⟩,
rcases exists_rat_btwn hxy with ⟨z, hxz, hzy⟩,
exact ⟨z, hU ⟨hxz.le, hzy⟩, mem_range_self _⟩
end
instance : separable_space ℝₗ := ⟨⟨_, countable_range _, dense_range_coe_rat⟩⟩
lemma is_closed_antidiagonal (c : ℝₗ) : is_closed {x : ℝₗ × ℝₗ | x.1 + x.2 = c} :=
is_closed_singleton.preimage continuous_add
lemma is_clopen_Ici_prod (x : ℝₗ × ℝₗ) : is_clopen (Ici x) :=
(Ici_prod_eq x).symm ▸ (is_clopen_Ici _).prod (is_clopen_Ici _)
/-- Any subset of an antidiagonal `{(x, y) : ℝₗ × ℝₗ| x + y = c}` is a closed set. -/
lemma is_closed_of_subset_antidiagonal {s : set (ℝₗ × ℝₗ)} {c : ℝₗ}
(hs : ∀ x : ℝₗ × ℝₗ, x ∈ s → x.1 + x.2 = c) : is_closed s :=
begin
rw [← closure_subset_iff_is_closed],
rintro ⟨x, y⟩ H,
obtain rfl : x + y = c,
{ change (x, y) ∈ {p : ℝₗ × ℝₗ | p.1 + p.2 = c},
exact closure_minimal (hs : s ⊆ {x | x.1 + x.2 = c}) (is_closed_antidiagonal c) H },
rcases mem_closure_iff.1 H (Ici (x, y)) (is_clopen_Ici_prod _).1 le_rfl
with ⟨⟨x', y'⟩, ⟨hx : x ≤ x', hy : y ≤ y'⟩, H⟩,
convert H,
{ refine hx.antisymm _,
rwa [← add_le_add_iff_right, hs _ H, add_le_add_iff_left] },
{ refine hy.antisymm _,
rwa [← add_le_add_iff_left, hs _ H, add_le_add_iff_right] }
end
lemma nhds_prod_antitone_basis_inv_pnat (x y : ℝₗ) :
(𝓝 (x, y)).has_antitone_basis (λ n : ℕ+, Ico x (x + n⁻¹) ×ˢ Ico y (y + n⁻¹)) :=
begin
rw [nhds_prod_eq],
exact (nhds_antitone_basis_Ico_inv_pnat x).prod (nhds_antitone_basis_Ico_inv_pnat y)
end
/-- The product of the Sorgenfrey line and itself is not a normal topological space. -/
lemma not_normal_space_prod : ¬normal_space (ℝₗ × ℝₗ) :=
begin
have h₀ : ∀ {n : ℕ+}, (0 : ℝ) < n⁻¹, from λ n, inv_pos.2 (nat.cast_pos.2 n.pos),
have h₀' : ∀ {n : ℕ+} {x : ℝ}, x < x + n⁻¹, from λ n x, lt_add_of_pos_right _ h₀,
introI,
/- Let `S` be the set of points `(x, y)` on the line `x + y = 0` such that `x` is rational.
Let `T` be the set of points `(x, y)` on the line `x + y = 0` such that `x` is irrational.
These sets are closed, see `sorgenfrey_line.is_closed_of_subset_antidiagonal`, and disjoint. -/
set S := {x : ℝₗ × ℝₗ | x.1 + x.2 = 0 ∧ ∃ r : ℚ, ↑r = x.1},
set T := {x : ℝₗ × ℝₗ | x.1 + x.2 = 0 ∧ irrational x.1.to_real},
have hSc : is_closed S, from is_closed_of_subset_antidiagonal (λ x hx, hx.1),
have hTc : is_closed T, from is_closed_of_subset_antidiagonal (λ x hx, hx.1),
have hd : disjoint S T,
{ rw disjoint_iff_inf_le,
rintro ⟨x, y⟩ ⟨⟨-, r, rfl : _ = x⟩, -, hr⟩,
exact r.not_irrational hr },
/- Consider disjoint open sets `U ⊇ S` and `V ⊇ T`. -/
rcases normal_separation hSc hTc hd with ⟨U, V, Uo, Vo, SU, TV, UV⟩,
/- For each point `(x, -x) ∈ T`, choose a neighborhood
`Ico x (x + k⁻¹) ×ˢ Ico (-x) (-x + k⁻¹) ⊆ V`. -/
have : ∀ x : ℝₗ, irrational x.to_real →
∃ k : ℕ+, Ico x (x + k⁻¹) ×ˢ Ico (-x) (-x + k⁻¹) ⊆ V,
{ intros x hx,
have hV : V ∈ 𝓝 (x, -x), from Vo.mem_nhds (@TV (x, -x) ⟨add_neg_self x, hx⟩),
exact (nhds_prod_antitone_basis_inv_pnat _ _).mem_iff.1 hV },
choose! k hkV,
/- Since the set of irrational numbers is a dense Gδ set in the usual topology of `ℝ`, there
exists `N > 0` such that the set `C N = {x : ℝ | irrational x ∧ k x = N}` is dense in a nonempty
interval. In other words, the closure of this set has a nonempty interior. -/
set C : ℕ+ → set ℝ := λ n, closure {x | irrational x ∧ k (to_real.symm x) = n},
have H : {x : ℝ | irrational x} ⊆ ⋃ n, C n,
from λ x hx, mem_Union.2 ⟨_, subset_closure ⟨hx, rfl⟩⟩,
have Hd : dense (⋃ n, interior (C n)) :=
is_Gδ_irrational.dense_Union_interior_of_closed dense_irrational (λ _, is_closed_closure) H,
obtain ⟨N, hN⟩ : ∃ n : ℕ+, (interior $ C n).nonempty, from nonempty_Union.mp Hd.nonempty,
/- Choose a rational number `r` in the interior of the closure of `C N`, then choose `n ≥ N > 0`
such that `Ico r (r + n⁻¹) × Ico (-r) (-r + n⁻¹) ⊆ U`. -/
rcases rat.dense_range_cast.exists_mem_open is_open_interior hN with ⟨r, hr⟩,
have hrU : ((r, -r) : ℝₗ × ℝₗ) ∈ U, from @SU (r, -r) ⟨add_neg_self _, r, rfl⟩,
obtain ⟨n, hnN, hn⟩ : ∃ n (hnN : N ≤ n), Ico (r : ℝₗ) (r + n⁻¹) ×ˢ Ico (-r : ℝₗ) (-r + n⁻¹) ⊆ U,
from ((nhds_prod_antitone_basis_inv_pnat _ _).has_basis_ge N).mem_iff.1 (Uo.mem_nhds hrU),
/- Finally, choose `x ∈ Ioo (r : ℝ) (r + n⁻¹) ∩ C N`. Then `(x, -r)` belongs both to `U` and `V`,
so they are not disjoint. This contradiction completes the proof. -/
obtain ⟨x, hxn, hx_irr, rfl⟩ :
∃ x : ℝ, x ∈ Ioo (r : ℝ) (r + n⁻¹) ∧ irrational x ∧ k (to_real.symm x) = N,
{ have : (r : ℝ) ∈ closure (Ioo (r : ℝ) (r + n⁻¹)),
{ rw [closure_Ioo h₀'.ne, left_mem_Icc], exact h₀'.le },
rcases mem_closure_iff_nhds.1 this _ (mem_interior_iff_mem_nhds.1 hr) with ⟨x', hx', hx'ε⟩,
exact mem_closure_iff.1 hx' _ is_open_Ioo hx'ε },
refine UV.le_bot (_ : (to_real.symm x, -↑r) ∈ _),
refine ⟨hn ⟨_, _⟩, hkV (to_real.symm x) hx_irr ⟨_, _⟩⟩,
{ exact Ioo_subset_Ico_self hxn },
{ exact left_mem_Ico.2 h₀' },
{ exact left_mem_Ico.2 h₀' },
{ refine (nhds_antitone_basis_Ico_inv_pnat (-x)).2 hnN ⟨neg_le_neg hxn.1.le, _⟩,
simp only [add_neg_lt_iff_le_add', lt_neg_add_iff_add_lt],
exact hxn.2 }
end
/-- Topology on the Sorgenfrey line is not metrizable. -/
lemma not_metrizable_space : ¬metrizable_space ℝₗ :=
begin
introI,
letI := metrizable_space_metric ℝₗ,
exact not_normal_space_prod infer_instance
end
/-- Topology on the Sorgenfrey line is not second countable. -/
lemma not_second_countable_topology : ¬second_countable_topology ℝₗ :=
by { introI, exact not_metrizable_space (metrizable_space_of_t3_second_countable _) }
end sorgenfrey_line
end counterexample
|
9bdf6802eff9df7a139f067514499f9cc7bbdbda
|
0d9b0a832bc57849732c5bd008a7a142f7e49656
|
/src/list2d.lean
|
1ac8fcb604b7d6609722c26649c773f76c4c56fc
|
[] |
no_license
|
mirefek/sokoban.lean
|
bb9414af67894e4d8ce75f8c8d7031df02d371d0
|
451c92308afb4d3f8e566594b9751286f93b899b
|
refs/heads/master
| 1,681,025,245,267
| 1,618,997,832,000
| 1,618,997,832,000
| 359,491,681
| 10
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 5,038
|
lean
|
import tactic
import data.list.func
universe u
def list2d (α : Type u) := list (list α)
namespace list2d
open list.func
variables {α : Type} {β : Type} {γ : Type} {δ : Type}
variables [inhabited α] [inhabited β]
def get2d (xy : ℕ × ℕ) (l : list2d α) : α
:= let (x,y) := xy in get x (get y l)
def set2d (a : α) (l : list2d α) (xy : ℕ × ℕ) : list2d α
:= let (x,y) := xy in set (set a (get y l) x) l y
def map2d (f : γ → δ) : list2d γ → list2d δ
:= list.map (list.map f)
def fold2d (f : γ → δ → δ) : δ → list2d γ → δ
:= list.foldr (function.swap (list.foldr f))
def pointwise2d (f : α → β → γ) : list2d α → list2d β → list2d γ
:= pointwise (pointwise f)
def dfzip2d : list2d α → list2d β → list2d (α × β)
:= pointwise2d prod.mk
def equiv (l1 l2 : list2d α)
:= ∀ (xy : ℕ × ℕ), l1.get2d xy = l2.get2d xy
def add_to_line1 (a : α) : list2d α -> list2d α
| [] := [[a]]
| (h::t) := (a::h)::t
@[simp] lemma get2d_nil : ∀ xy : ℕ×ℕ, get2d xy ([] : list2d α) = default α
| (x,y) := by simp [get2d, default]
def transpose : list2d α → list2d α := (list.foldr (pointwise list.cons)) []
instance [has_repr α] : has_repr (list2d α) := ⟨λ l, list.repr (l : list (list α))⟩
private lemma g_get_pointwise {δ : Type} [inhabited γ] {f : α → β → γ} (g : γ -> δ)
(h1 : g (f (default α) (default β)) = g (default γ)) :
∀ (k : nat) (as : list α) (bs : list β),
g (get k (pointwise f as bs)) = g (f (get k as) (get k bs))
| k [] [] := by simp only [h1, get_nil, list.func.pointwise, list.func.get]
| 0 [] (b::bs) :=
by simp only [get_pointwise, get_nil,
list.func.pointwise, list.func.get, nat.nat_zero_eq_zero, list.map]
| (k+1) [] (b::bs) :=
by { have : g (get k (list.map (f $ default α) bs)) = g (f (default α) (get k bs)),
{ simpa [nil_pointwise, get_nil] using (g_get_pointwise k [] bs) },
simpa [list.func.get, get_nil, pointwise, list.map] }
| 0 (a::as) [] :=
by simp only [g_get_pointwise, get_nil,
list.func.pointwise, list.func.get, nat.nat_zero_eq_zero, list.map]
| (k+1) (a::as) [] :=
by simpa [list.func.get, get_nil, pointwise, list.map, pointwise_nil, get_nil]
using g_get_pointwise k as []
| 0 (a::as) (b::bs) := by simp only [list.func.pointwise, list.func.get]
| (k+1) (a::as) (b::bs) :=
by simp only [list.func.pointwise, list.func.get, g_get_pointwise k]
@[simp] theorem get2d_transpose : ∀ (xy : ℕ × ℕ) (l : list2d α),
get2d xy (transpose l) = get2d (prod.swap xy) l :=
begin
intros, cases xy with x y, simp, revert x, induction l, {
intros, unfold transpose, unfold get2d,
simp [default],
}, {
intros, unfold get2d, unfold transpose,
simp [list.foldr], rewrite g_get_pointwise (get x),
cases x, refl, apply l_ih,
simp [default], cases x, refl, simp,
}
end
@[simp] theorem map_pointwise {δ : Type}
(f : α → β → γ) (g : γ → δ) : ∀ (l1 : list α) (l2 : list β),
list.map g (list.func.pointwise f l1 l2)
= list.func.pointwise (λ a b, g (f a b)) l1 l2
| [] [] := by simp
| (a::as) [] := by simp
| [] (b::bs) := by simp
| (a::as) (b::bs) := by simp [(map_pointwise as bs)]
@[simp] theorem map_map_circ {δ2 : Type} (f : δ → δ2) (g : γ → δ) : (list.map f) ∘ (list.map g) = list.map (f ∘ g) :=
begin apply funext, simp end
theorem nil_pointwise_curry {f : α → β → γ} : pointwise f list.nil = list.map (f (default α)) := begin apply funext, intros, apply nil_pointwise end
theorem map_pointwise2d
(f : α → β → γ) (g : γ → δ) : ∀ (l1 : list2d α) (l2 : list2d β),
map2d g (pointwise2d f l1 l2)
= pointwise2d (λ a b, g (f a b)) l1 l2 :=
begin
intros, unfold map2d, unfold pointwise2d, simp
end
theorem pointwise_dfzip2d
(f : α → β → γ) : ∀ (l1 : list2d α) (l2 : list2d β),
pointwise2d f l1 l2 =
(pointwise2d prod.mk l1 l2).map2d (λ ab, match ab with (a,b) := f a b end) :=
by simp [map_pointwise2d]
theorem get2d_pointwise [inhabited γ] {f : α → β → γ}
(H : f (default α) (default β) = default γ) :
∀ (xy : ℕ×ℕ) (as : list2d α) (bs : list2d β),
get2d xy (pointwise2d f as bs) = f (get2d xy as) (get2d xy bs) :=
begin
intros, cases xy with x y, unfold get2d, unfold pointwise2d,
repeat { rw list.func.get_pointwise, }, exact H,
simp! [default],
end
theorem get2d_set2d {a : α} {xy : ℕ×ℕ} {l : list2d α} : (l.set2d a xy).get2d xy = a :=
begin
cases xy with x y, unfold get2d, unfold set2d, simp!,
end
theorem get2d_set2d_eq_of_ne {a : α} {xy1 xy2 : ℕ×ℕ} {l : list2d α}
: xy1 ≠ xy2 → (l.set2d a xy2).get2d xy1 = (l.get2d xy1) :=
begin
cases xy1 with x1 y1,
cases xy2 with x2 y2,
unfold get2d, unfold set2d,
intro H,
by_cases Hy : y1 = y2, {
rw Hy, rw Hy at H, rw get_set,
by_cases Hx : x1 = x2,
{ exfalso, rw Hx at H, exact H rfl, },
{ exact get_set_eq_of_ne x2 x1 Hx, },
}, {
apply congr_arg, exact get_set_eq_of_ne y2 y1 Hy,
},
end
end list2d
|
0e158639495fa726442e6527adae953455da8144
|
3dd1b66af77106badae6edb1c4dea91a146ead30
|
/tests/lean/run/tactic4.lean
|
98184be2369d535523e88a7606ca85f766901726
|
[
"Apache-2.0"
] |
permissive
|
silky/lean
|
79c20c15c93feef47bb659a2cc139b26f3614642
|
df8b88dca2f8da1a422cb618cd476ef5be730546
|
refs/heads/master
| 1,610,737,587,697
| 1,406,574,534,000
| 1,406,574,534,000
| 22,362,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 234
|
lean
|
import standard
using tactic (renaming id->id_tac)
definition id {A : Type} (a : A) := a
definition simple {A : Prop} : tactic
:= unfold @id.{1}; assumption
theorem tst {A B : Prop} (H1 : A) (H2 : B) : id A
:= by simple
check tst
|
eb5cceb094e5463776f03d718020ee5a33b6ea88
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/logic/encodable/basic.lean
|
6b5e4ce62d3516786564d799f590eedcbb434b71
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 19,752
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Mario Carneiro
-/
import logic.equiv.nat
import order.directed
import data.countable.defs
import order.rel_iso
import data.fin.basic
/-!
# Encodable types
This file defines encodable (constructively countable) types as a typeclass.
This is used to provide explicit encode/decode functions from and to `ℕ`, with the information that
those functions are inverses of each other.
The difference with `denumerable` is that finite types are encodable. For infinite types,
`encodable` and `denumerable` agree.
## Main declarations
* `encodable α`: States that there exists an explicit encoding function `encode : α → ℕ` with a
partial inverse `decode : ℕ → option α`.
* `decode₂`: Version of `decode` that is equal to `none` outside of the range of `encode`. Useful as
we do not require this in the definition of `decode`.
* `ulower α`: Any encodable type has an equivalent type living in the lowest universe, namely a
subtype of `ℕ`. `ulower α` finds it.
## Implementation notes
The point of asking for an explicit partial inverse `decode : ℕ → option α` to `encode : α → ℕ` is
to make the range of `encode` decidable even when the finiteness of `α` is not.
-/
open option list nat function
/-- Constructively countable type. Made from an explicit injection `encode : α → ℕ` and a partial
inverse `decode : ℕ → option α`. Note that finite types *are* countable. See `denumerable` if you
wish to enforce infiniteness. -/
class encodable (α : Type*) :=
(encode : α → ℕ)
(decode [] : ℕ → option α)
(encodek : ∀ a, decode (encode a) = some a)
attribute [simp] encodable.encodek
namespace encodable
variables {α : Type*} {β : Type*}
universe u
theorem encode_injective [encodable α] : function.injective (@encode α _)
| x y e := option.some.inj $ by rw [← encodek, e, encodek]
@[simp] lemma encode_inj [encodable α] {a b : α} : encode a = encode b ↔ a = b :=
encode_injective.eq_iff
-- The priority of the instance below is less than the priorities of `subtype.countable`
-- and `quotient.countable`
@[priority 400] instance [encodable α] : countable α := encode_injective.countable
lemma surjective_decode_iget (α : Type*) [encodable α] [inhabited α] :
surjective (λ n, (encodable.decode α n).iget) :=
λ x, ⟨encodable.encode x, by simp_rw [encodable.encodek]⟩
/-- An encodable type has decidable equality. Not set as an instance because this is usually not the
best way to infer decidability. -/
def decidable_eq_of_encodable (α) [encodable α] : decidable_eq α
| a b := decidable_of_iff _ encode_inj
/-- If `α` is encodable and there is an injection `f : β → α`, then `β` is encodable as well. -/
def of_left_injection [encodable α]
(f : β → α) (finv : α → option β) (linv : ∀ b, finv (f b) = some b) : encodable β :=
⟨λ b, encode (f b),
λ n, (decode α n).bind finv,
λ b, by simp [encodable.encodek, linv]⟩
/-- If `α` is encodable and `f : β → α` is invertible, then `β` is encodable as well. -/
def of_left_inverse [encodable α]
(f : β → α) (finv : α → β) (linv : ∀ b, finv (f b) = b) : encodable β :=
of_left_injection f (some ∘ finv) (λ b, congr_arg some (linv b))
/-- Encodability is preserved by equivalence. -/
def of_equiv (α) [encodable α] (e : β ≃ α) : encodable β :=
of_left_inverse e e.symm e.left_inv
@[simp] theorem encode_of_equiv {α β} [encodable α] (e : β ≃ α) (b : β) :
@encode _ (of_equiv _ e) b = encode (e b) := rfl
@[simp] theorem decode_of_equiv {α β} [encodable α] (e : β ≃ α) (n : ℕ) :
@decode _ (of_equiv _ e) n = (decode α n).map e.symm := rfl
instance _root_.nat.encodable : encodable ℕ :=
⟨id, some, λ a, rfl⟩
@[simp] theorem encode_nat (n : ℕ) : encode n = n := rfl
@[simp] theorem decode_nat (n : ℕ) : decode ℕ n = some n := rfl
@[priority 100] instance _root_.is_empty.to_encodable [is_empty α] : encodable α :=
⟨is_empty_elim, λ n, none, is_empty_elim⟩
instance _root_.punit.encodable : encodable punit :=
⟨λ_, 0, λ n, nat.cases_on n (some punit.star) (λ _, none), λ _, by simp⟩
@[simp] theorem encode_star : encode punit.star = 0 := rfl
@[simp] theorem decode_unit_zero : decode punit 0 = some punit.star := rfl
@[simp] theorem decode_unit_succ (n) : decode punit (succ n) = none := rfl
/-- If `α` is encodable, then so is `option α`. -/
instance _root_.option.encodable {α : Type*} [h : encodable α] : encodable (option α) :=
⟨λ o, option.cases_on o nat.zero (λ a, succ (encode a)),
λ n, nat.cases_on n (some none) (λ m, (decode α m).map some),
λ o, by cases o; dsimp; simp [encodek, nat.succ_ne_zero]⟩
@[simp] theorem encode_none [encodable α] : encode (@none α) = 0 := rfl
@[simp] theorem encode_some [encodable α] (a : α) :
encode (some a) = succ (encode a) := rfl
@[simp] theorem decode_option_zero [encodable α] : decode (option α) 0 = some none := rfl
@[simp] theorem decode_option_succ [encodable α] (n) :
decode (option α) (succ n) = (decode α n).map some := rfl
/-- Failsafe variant of `decode`. `decode₂ α n` returns the preimage of `n` under `encode` if it
exists, and returns `none` if it doesn't. This requirement could be imposed directly on `decode` but
is not to help make the definition easier to use. -/
def decode₂ (α) [encodable α] (n : ℕ) : option α :=
(decode α n).bind (option.guard (λ a, encode a = n))
theorem mem_decode₂' [encodable α] {n : ℕ} {a : α} :
a ∈ decode₂ α n ↔ a ∈ decode α n ∧ encode a = n :=
by simp [decode₂]; exact
⟨λ ⟨_, h₁, rfl, h₂⟩, ⟨h₁, h₂⟩, λ ⟨h₁, h₂⟩, ⟨_, h₁, rfl, h₂⟩⟩
theorem mem_decode₂ [encodable α] {n : ℕ} {a : α} :
a ∈ decode₂ α n ↔ encode a = n :=
mem_decode₂'.trans (and_iff_right_of_imp $ λ e, e ▸ encodek _)
theorem decode₂_eq_some [encodable α] {n : ℕ} {a : α} :
decode₂ α n = some a ↔ encode a = n :=
mem_decode₂
@[simp] lemma decode₂_encode [encodable α] (a : α) :
decode₂ α (encode a) = some a :=
by { ext, simp [mem_decode₂, eq_comm] }
theorem decode₂_ne_none_iff [encodable α] {n : ℕ} :
decode₂ α n ≠ none ↔ n ∈ set.range (encode : α → ℕ) :=
by simp_rw [set.range, set.mem_set_of_eq, ne.def, option.eq_none_iff_forall_not_mem,
encodable.mem_decode₂, not_forall, not_not]
theorem decode₂_is_partial_inv [encodable α] : is_partial_inv encode (decode₂ α) :=
λ a n, mem_decode₂
theorem decode₂_inj [encodable α] {n : ℕ} {a₁ a₂ : α}
(h₁ : a₁ ∈ decode₂ α n) (h₂ : a₂ ∈ decode₂ α n) : a₁ = a₂ :=
encode_injective $ (mem_decode₂.1 h₁).trans (mem_decode₂.1 h₂).symm
theorem encodek₂ [encodable α] (a : α) : decode₂ α (encode a) = some a :=
mem_decode₂.2 rfl
/-- The encoding function has decidable range. -/
def decidable_range_encode (α : Type*) [encodable α] : decidable_pred (∈ set.range (@encode α _)) :=
λ x, decidable_of_iff (option.is_some (decode₂ α x))
⟨λ h, ⟨option.get h, by rw [← decode₂_is_partial_inv (option.get h), option.some_get]⟩,
λ ⟨n, hn⟩, by rw [← hn, encodek₂]; exact rfl⟩
/-- An encodable type is equivalent to the range of its encoding function. -/
def equiv_range_encode (α : Type*) [encodable α] : α ≃ set.range (@encode α _) :=
{ to_fun := λ a : α, ⟨encode a, set.mem_range_self _⟩,
inv_fun := λ n, option.get (show is_some (decode₂ α n.1),
by cases n.2 with x hx; rw [← hx, encodek₂]; exact rfl),
left_inv := λ a, by dsimp;
rw [← option.some_inj, option.some_get, encodek₂],
right_inv := λ ⟨n, x, hx⟩, begin
apply subtype.eq,
dsimp,
conv {to_rhs, rw ← hx},
rw [encode_injective.eq_iff, ← option.some_inj, option.some_get, ← hx, encodek₂],
end }
/-- A type with unique element is encodable. This is not an instance to avoid diamonds. -/
def _root_.unique.encodable [unique α] : encodable α :=
⟨λ _, 0, λ _, some default, unique.forall_iff.2 rfl⟩
section sum
variables [encodable α] [encodable β]
/-- Explicit encoding function for the sum of two encodable types. -/
def encode_sum : α ⊕ β → ℕ
| (sum.inl a) := bit0 $ encode a
| (sum.inr b) := bit1 $ encode b
/-- Explicit decoding function for the sum of two encodable types. -/
def decode_sum (n : ℕ) : option (α ⊕ β) :=
match bodd_div2 n with
| (ff, m) := (decode α m).map sum.inl
| (tt, m) := (decode β m).map sum.inr
end
/-- If `α` and `β` are encodable, then so is their sum. -/
instance _root_.sum.encodable : encodable (α ⊕ β) :=
⟨encode_sum, decode_sum, λ s,
by cases s; simp [encode_sum, decode_sum, encodek]; refl⟩
@[simp] theorem encode_inl (a : α) :
@encode (α ⊕ β) _ (sum.inl a) = bit0 (encode a) := rfl
@[simp] theorem encode_inr (b : β) :
@encode (α ⊕ β) _ (sum.inr b) = bit1 (encode b) := rfl
@[simp] theorem decode_sum_val (n : ℕ) :
decode (α ⊕ β) n = decode_sum n := rfl
end sum
instance _root_.bool.encodable : encodable bool :=
of_equiv (unit ⊕ unit) equiv.bool_equiv_punit_sum_punit
@[simp] theorem encode_tt : encode tt = 1 := rfl
@[simp] theorem encode_ff : encode ff = 0 := rfl
@[simp] theorem decode_zero : decode bool 0 = some ff := rfl
@[simp] theorem decode_one : decode bool 1 = some tt := rfl
theorem decode_ge_two (n) (h : 2 ≤ n) : decode bool n = none :=
begin
suffices : decode_sum n = none,
{ change (decode_sum n).map _ = none, rw this, refl },
have : 1 ≤ div2 n,
{ rw [div2_val, nat.le_div_iff_mul_le],
exacts [h, dec_trivial] },
cases exists_eq_succ_of_ne_zero (ne_of_gt this) with m e,
simp [decode_sum]; cases bodd n; simp [decode_sum]; rw e; refl
end
noncomputable instance _root_.Prop.encodable : encodable Prop :=
of_equiv bool equiv.Prop_equiv_bool
section sigma
variables {γ : α → Type*} [encodable α] [∀ a, encodable (γ a)]
/-- Explicit encoding function for `sigma γ` -/
def encode_sigma : sigma γ → ℕ
| ⟨a, b⟩ := mkpair (encode a) (encode b)
/-- Explicit decoding function for `sigma γ` -/
def decode_sigma (n : ℕ) : option (sigma γ) :=
let (n₁, n₂) := unpair n in
(decode α n₁).bind $ λ a, (decode (γ a) n₂).map $ sigma.mk a
instance _root_.sigma.encodable : encodable (sigma γ) :=
⟨encode_sigma, decode_sigma, λ ⟨a, b⟩,
by simp [encode_sigma, decode_sigma, unpair_mkpair, encodek]⟩
@[simp] theorem decode_sigma_val (n : ℕ) : decode (sigma γ) n =
(decode α n.unpair.1).bind (λ a, (decode (γ a) n.unpair.2).map $ sigma.mk a) :=
show decode_sigma._match_1 _ = _, by cases n.unpair; refl
@[simp] theorem encode_sigma_val (a b) : @encode (sigma γ) _ ⟨a, b⟩ =
mkpair (encode a) (encode b) := rfl
end sigma
section prod
variables [encodable α] [encodable β]
/-- If `α` and `β` are encodable, then so is their product. -/
instance _root_.prod.encodable : encodable (α × β) :=
of_equiv _ (equiv.sigma_equiv_prod α β).symm
@[simp] theorem decode_prod_val (n : ℕ) : decode (α × β) n =
(decode α n.unpair.1).bind (λ a, (decode β n.unpair.2).map $ prod.mk a) :=
show (decode (sigma (λ _, β)) n).map (equiv.sigma_equiv_prod α β) = _,
by simp; cases decode α n.unpair.1; simp;
cases decode β n.unpair.2; refl
@[simp] theorem encode_prod_val (a b) : @encode (α × β) _ (a, b) =
mkpair (encode a) (encode b) := rfl
end prod
section subtype
open subtype decidable
variables {P : α → Prop} [encA : encodable α] [decP : decidable_pred P]
include encA
/-- Explicit encoding function for a decidable subtype of an encodable type -/
def encode_subtype : {a : α // P a} → ℕ
| ⟨v, h⟩ := encode v
include decP
/-- Explicit decoding function for a decidable subtype of an encodable type -/
def decode_subtype (v : ℕ) : option {a : α // P a} :=
(decode α v).bind $ λ a,
if h : P a then some ⟨a, h⟩ else none
/-- A decidable subtype of an encodable type is encodable. -/
instance _root_.subtype.encodable : encodable {a : α // P a} :=
⟨encode_subtype, decode_subtype,
λ ⟨v, h⟩, by simp [encode_subtype, decode_subtype, encodek, h]⟩
lemma subtype.encode_eq (a : subtype P) : encode a = encode a.val :=
by cases a; refl
end subtype
instance _root_.fin.encodable (n) : encodable (fin n) :=
of_equiv _ fin.equiv_subtype
instance _root_.int.encodable : encodable ℤ :=
of_equiv _ equiv.int_equiv_nat
instance _root_.pnat.encodable : encodable ℕ+ :=
of_equiv _ equiv.pnat_equiv_nat
/-- The lift of an encodable type is encodable. -/
instance _root_.ulift.encodable [encodable α] : encodable (ulift α) :=
of_equiv _ equiv.ulift
/-- The lift of an encodable type is encodable. -/
instance _root_.plift.encodable [encodable α] : encodable (plift α) :=
of_equiv _ equiv.plift
/-- If `β` is encodable and there is an injection `f : α → β`, then `α` is encodable as well. -/
noncomputable def of_inj [encodable β] (f : α → β) (hf : injective f) : encodable α :=
of_left_injection f (partial_inv f) (λ x, (partial_inv_of_injective hf _ _).2 rfl)
/-- If `α` is countable, then it has a (non-canonical) `encodable` structure. -/
noncomputable def of_countable (α : Type*) [countable α] : encodable α :=
nonempty.some $ let ⟨f, hf⟩ := exists_injective_nat α in ⟨of_inj f hf⟩
@[simp] lemma nonempty_encodable : nonempty (encodable α) ↔ countable α :=
⟨λ ⟨h⟩, @encodable.countable α h, λ h, ⟨@of_countable _ h⟩⟩
end encodable
lemma nonempty_encodable (α : Type*) [countable α] : nonempty (encodable α) :=
⟨encodable.of_countable _⟩
instance : countable ℕ+ := subtype.countable -- short-circuit instance search
section ulower
local attribute [instance, priority 100] encodable.decidable_range_encode
/-- `ulower α : Type` is an equivalent type in the lowest universe, given `encodable α`. -/
@[derive decidable_eq, derive encodable]
def ulower (α : Type*) [encodable α] : Type :=
set.range (encodable.encode : α → ℕ)
end ulower
namespace ulower
variables (α : Type*) [encodable α]
/-- The equivalence between the encodable type `α` and `ulower α : Type`. -/
def equiv : α ≃ ulower α :=
encodable.equiv_range_encode α
variables {α}
/-- Lowers an `a : α` into `ulower α`. -/
def down (a : α) : ulower α := equiv α a
instance [inhabited α] : inhabited (ulower α) := ⟨down default⟩
/-- Lifts an `a : ulower α` into `α`. -/
def up (a : ulower α) : α := (equiv α).symm a
@[simp] lemma down_up {a : ulower α} : down a.up = a := equiv.right_inv _ _
@[simp] lemma up_down {a : α} : (down a).up = a := equiv.left_inv _ _
@[simp] lemma up_eq_up {a b : ulower α} : a.up = b.up ↔ a = b :=
equiv.apply_eq_iff_eq _
@[simp] lemma down_eq_down {a b : α} : down a = down b ↔ a = b :=
equiv.apply_eq_iff_eq _
@[ext] protected lemma ext {a b : ulower α} : a.up = b.up → a = b :=
up_eq_up.1
end ulower
/-
Choice function for encodable types and decidable predicates.
We provide the following API
choose {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] : (∃ x, p x) → α :=
choose_spec {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] (ex : ∃ x, p x) :
p (choose ex) :=
-/
namespace encodable
section find_a
variables {α : Type*} (p : α → Prop) [encodable α] [decidable_pred p]
private def good : option α → Prop
| (some a) := p a
| none := false
private def decidable_good : decidable_pred (good p)
| n := by cases n; unfold good; apply_instance
local attribute [instance] decidable_good
open encodable
variable {p}
/-- Constructive choice function for a decidable subtype of an encodable type. -/
def choose_x (h : ∃ x, p x) : {a : α // p a} :=
have ∃ n, good p (decode α n), from
let ⟨w, pw⟩ := h in ⟨encode w, by simp [good, encodek, pw]⟩,
match _, nat.find_spec this : ∀ o, good p o → {a // p a} with
| some a, h := ⟨a, h⟩
end
/-- Constructive choice function for a decidable predicate over an encodable type. -/
def choose (h : ∃ x, p x) : α := (choose_x h).1
lemma choose_spec (h : ∃ x, p x) : p (choose h) := (choose_x h).2
end find_a
/-- A constructive version of `classical.axiom_of_choice` for `encodable` types. -/
theorem axiom_of_choice {α : Type*} {β : α → Type*} {R : Π x, β x → Prop}
[Π a, encodable (β a)] [∀ x y, decidable (R x y)]
(H : ∀ x, ∃ y, R x y) : ∃ f : Π a, β a, ∀ x, R x (f x) :=
⟨λ x, choose (H x), λ x, choose_spec (H x)⟩
/-- A constructive version of `classical.skolem` for `encodable` types. -/
theorem skolem {α : Type*} {β : α → Type*} {P : Π x, β x → Prop}
[c : Π a, encodable (β a)] [d : ∀ x y, decidable (P x y)] :
(∀ x, ∃ y, P x y) ↔ ∃ f : Π a, β a, (∀ x, P x (f x)) :=
⟨axiom_of_choice, λ ⟨f, H⟩ x, ⟨_, H x⟩⟩
/-
There is a total ordering on the elements of an encodable type, induced by the map to ℕ.
-/
/-- The `encode` function, viewed as an embedding. -/
def encode' (α) [encodable α] : α ↪ ℕ :=
⟨encodable.encode, encodable.encode_injective⟩
instance {α} [encodable α] : is_trans _ (encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_trans
instance {α} [encodable α] : is_antisymm _ (encodable.encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_antisymm
instance {α} [encodable α] : is_total _ (encodable.encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_total
end encodable
namespace directed
open encodable
variables {α : Type*} {β : Type*} [encodable α] [inhabited α]
/-- Given a `directed r` function `f : α → β` defined on an encodable inhabited type,
construct a noncomputable sequence such that `r (f (x n)) (f (x (n + 1)))`
and `r (f a) (f (x (encode a + 1))`. -/
protected noncomputable def sequence {r : β → β → Prop} (f : α → β) (hf : directed r f) : ℕ → α
| 0 := default
| (n + 1) :=
let p := sequence n in
match decode α n with
| none := classical.some (hf p p)
| (some a) := classical.some (hf p a)
end
lemma sequence_mono_nat {r : β → β → Prop} {f : α → β} (hf : directed r f) (n : ℕ) :
r (f (hf.sequence f n)) (f (hf.sequence f (n+1))) :=
begin
dsimp [directed.sequence],
generalize eq : hf.sequence f n = p,
cases h : decode α n with a,
{ exact (classical.some_spec (hf p p)).1 },
{ exact (classical.some_spec (hf p a)).1 }
end
lemma rel_sequence {r : β → β → Prop} {f : α → β} (hf : directed r f) (a : α) :
r (f a) (f (hf.sequence f (encode a + 1))) :=
begin
simp only [directed.sequence, encodek],
exact (classical.some_spec (hf _ a)).2
end
variables [preorder β] {f : α → β} (hf : directed (≤) f)
lemma sequence_mono : monotone (f ∘ (hf.sequence f)) :=
monotone_nat_of_le_succ $ hf.sequence_mono_nat
lemma le_sequence (a : α) : f a ≤ f (hf.sequence f (encode a + 1)) :=
hf.rel_sequence a
end directed
section quotient
open encodable quotient
variables {α : Type*} {s : setoid α} [@decidable_rel α (≈)] [encodable α]
/-- Representative of an equivalence class. This is a computable version of `quot.out` for a setoid
on an encodable type. -/
def quotient.rep (q : quotient s) : α :=
choose (exists_rep q)
theorem quotient.rep_spec (q : quotient s) : ⟦q.rep⟧ = q :=
choose_spec (exists_rep q)
/-- The quotient of an encodable space by a decidable equivalence relation is encodable. -/
def encodable_quotient : encodable (quotient s) :=
⟨λ q, encode q.rep,
λ n, quotient.mk <$> decode α n,
by rintros ⟨l⟩; rw encodek; exact congr_arg some ⟦l⟧.rep_spec⟩
end quotient
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.