blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 7
139
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
16
| license_type
stringclasses 2
values | repo_name
stringlengths 7
55
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 6
values | visit_date
int64 1,471B
1,694B
| revision_date
int64 1,378B
1,694B
| committer_date
int64 1,378B
1,694B
| github_id
float64 1.33M
604M
⌀ | star_events_count
int64 0
43.5k
| fork_events_count
int64 0
1.5k
| gha_license_id
stringclasses 6
values | gha_event_created_at
int64 1,402B
1,695B
⌀ | gha_created_at
int64 1,359B
1,637B
⌀ | gha_language
stringclasses 19
values | src_encoding
stringclasses 2
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 1
class | length_bytes
int64 3
6.4M
| extension
stringclasses 4
values | content
stringlengths 3
6.12M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44084c144ab44a3000db43eeb8527609b9c0eb94
|
75db7e3219bba2fbf41bf5b905f34fcb3c6ca3f2
|
/library/data/nat/fact.lean
|
9b56fd0e839ad8c459deb479884d288464acb71d
|
[
"Apache-2.0"
] |
permissive
|
jroesch/lean
|
30ef0860fa905d35b9ad6f76de1a4f65c9af6871
|
3de4ec1a6ce9a960feb2a48eeea8b53246fa34f2
|
refs/heads/master
| 1,586,090,835,348
| 1,455,142,203,000
| 1,455,142,277,000
| 51,536,958
| 1
| 0
| null | 1,455,215,811,000
| 1,455,215,811,000
| null |
UTF-8
|
Lean
| false
| false
| 1,427
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
Factorial
-/
import data.nat.div
namespace nat
definition fact : nat → nat
| 0 := 1
| (succ n) := (succ n) * fact n
lemma fact_zero : fact 0 = 1 :=
rfl
lemma fact_one : fact 1 = 1 :=
rfl
lemma fact_succ (n) : fact (succ n) = succ n * fact n :=
rfl
lemma fact_pos : ∀ n, fact n > 0
| 0 := zero_lt_one
| (succ n) := mul_pos !succ_pos (fact_pos n)
lemma fact_ne_zero (n : ℕ) : fact n ≠ 0 := ne_of_gt !fact_pos
lemma dvd_fact : ∀ {m n}, m > 0 → m ≤ n → m ∣ fact n
| m 0 h₁ h₂ := absurd h₁ (not_lt_of_ge h₂)
| m (succ n) h₁ h₂ :=
begin
rewrite fact_succ,
cases (eq_or_lt_of_le h₂) with he hl,
{subst m, apply dvd_mul_right},
{have aux : m ∣ fact n, from dvd_fact h₁ (le_of_lt_succ hl),
apply dvd_mul_of_dvd_right aux}
end
lemma fact_le {m n} : m ≤ n → fact m ≤ fact n :=
begin
induction n with n ih,
{intro h,
have meq0 : m = 0, from eq_zero_of_le_zero h,
subst m},
{intro m_le_succ_n,
cases (eq_or_lt_of_le m_le_succ_n) with h₁ h₂,
{subst m},
{transitivity (fact n),
exact ih (le_of_lt_succ h₂),
rewrite [fact_succ, -one_mul at {1}],
exact nat.mul_le_mul (succ_le_succ (zero_le n)) !le.refl}}
end
end nat
|
0dabd09e0ea154747877bf4c225d949a6dc04040
|
6dc0c8ce7a76229dd81e73ed4474f15f88a9e294
|
/stage0/src/Lean/Elab/Tactic.lean
|
d3c0343147b49ba512b00f4f49ed2149bb45ba18
|
[
"Apache-2.0"
] |
permissive
|
williamdemeo/lean4
|
72161c58fe65c3ad955d6a3050bb7d37c04c0d54
|
6d00fcf1d6d873e195f9220c668ef9c58e9c4a35
|
refs/heads/master
| 1,678,305,356,877
| 1,614,708,995,000
| 1,614,708,995,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 527
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Sebastian Ullrich
-/
import Lean.Elab.Term
import Lean.Elab.Tactic.Basic
import Lean.Elab.Tactic.ElabTerm
import Lean.Elab.Tactic.Induction
import Lean.Elab.Tactic.Generalize
import Lean.Elab.Tactic.Injection
import Lean.Elab.Tactic.Match
import Lean.Elab.Tactic.Binders
import Lean.Elab.Tactic.Rewrite
import Lean.Elab.Tactic.Location
import Lean.Elab.Tactic.Simp
|
c8bca2767ce749ca3b1ceb0b5d187b8bf8128b81
|
abd85493667895c57a7507870867b28124b3998f
|
/src/tactic/equiv_rw.lean
|
036fa8b2a746457e107f7f1cec69cacf59160281
|
[
"Apache-2.0"
] |
permissive
|
pechersky/mathlib
|
d56eef16bddb0bfc8bc552b05b7270aff5944393
|
f1df14c2214ee114c9738e733efd5de174deb95d
|
refs/heads/master
| 1,666,714,392,571
| 1,591,747,567,000
| 1,591,747,567,000
| 270,557,274
| 0
| 0
|
Apache-2.0
| 1,591,597,975,000
| 1,591,597,974,000
| null |
UTF-8
|
Lean
| false
| false
| 12,055
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import control.equiv_functor
/-!
# The `equiv_rw` tactic transports goals or hypotheses along equivalences.
The basic syntax is `equiv_rw e`, where `e : α ≃ β` is an equivalence.
This will try to replace occurrences of `α` in the goal with `β`, for example
transforming
* `⊢ α` to `⊢ β`,
* `⊢ option α` to `⊢ option β`
* `⊢ {a // P}` to `{b // P (⇑(equiv.symm e) b)}`
The tactic can also be used to rewrite hypotheses, using the syntax `equiv_rw e at h`.
## Implementation details
The main internal function is `equiv_rw_type e t`,
which attempts to turn an expression `e : α ≃ β` into a new equivalence with left hand side `t`.
As an example, with `t = option α`, it will generate `functor.map_equiv option e`.
This is achieved by generating a new synthetic goal `%%t ≃ _`,
and calling `solve_by_elim` with an appropriate set of congruence lemmas.
To avoid having to specify the relevant congruence lemmas by hand,
we mostly rely on `equiv_functor.map_equiv` and `bifunctor.map_equiv`
along with some structural congruence lemmas such as
* `equiv.arrow_congr'`,
* `equiv.subtype_equiv_of_subtype'`,
* `equiv.sigma_congr_left'`, and
* `equiv.Pi_congr_left'`.
The main `equiv_rw` function, when operating on the goal, simply generates a new equivalence `e'`
with left hand side matching the target, and calls `apply e'.inv_fun`.
When operating on a hypothesis `x : α`, we introduce a new fact `h : x = e.symm (e x)`,
revert this, and then attempt to `generalize`, replacing all occurrences of `e x` with a new constant `y`,
before `intro`ing and `subst`ing `h`, and renaming `y` back to `x`.
## Future improvements
In a future PR I anticipate that `derive equiv_functor` should work on many examples,
(internally using `transport`, which is in turn based on `equiv_rw`)
and we can incrementally bootstrap the strength of `equiv_rw`.
An ambitious project might be to add `equiv_rw!`,
a tactic which, when failing to find appropriate `equiv_functor` instances,
attempts to `derive` them on the spot.
For now `equiv_rw` is entirely based on `equiv`,
but the framework can readily be generalised to also work with other types of equivalences,
for example specific notations such as ring equivalence (`≃+*`),
or general categorical isomorphisms (`≅`).
This will allow us to transport across more general types of equivalences,
but this will wait for another subsequent PR.
-/
namespace tactic
/-- A list of lemmas used for constructing congruence equivalences. -/
-- Although this looks 'hard-coded', in fact the lemma `equiv_functor.map_equiv`
-- allows us to extend `equiv_rw` simply by constructing new instance so `equiv_functor`.
-- TODO: We should also use `category_theory.functorial` and `category_theory.hygienic` instances.
-- (example goal: we could rewrite along an isomorphism of rings (either as `R ≅ S` or `R ≃+* S`)
-- and turn an `x : mv_polynomial σ R` into an `x : mv_polynomial σ S`.).
meta def equiv_congr_lemmas : tactic (list expr) :=
do exprs ←
[
`equiv.of_iff,
-- TODO decide what to do with this; it's an equiv_bifunctor?
`equiv.equiv_congr,
-- The function arrow is technically a bifunctor `Typeᵒᵖ → Type → Type`,
-- but the pattern matcher will never see this.
`equiv.arrow_congr',
-- Allow rewriting in subtypes:
`equiv.subtype_equiv_of_subtype',
-- Allow rewriting in the first component of a sigma-type:
`equiv.sigma_congr_left',
-- Allow rewriting ∀s:
-- (You might think that repeated application of `equiv.forall_congr'
-- would handle the higher arity cases, but unfortunately unification is not clever enough.)
`equiv.forall₃_congr',
`equiv.forall₂_congr',
`equiv.forall_congr',
-- Allow rewriting in argument of Pi types:
`equiv.Pi_congr_left',
-- Handles `sum` and `prod`, and many others:
`bifunctor.map_equiv,
-- Handles `list`, `option`, `unique`, and many others:
`equiv_functor.map_equiv,
-- We have to filter results to ensure we don't cheat and use exclusively `equiv.refl` and `iff.refl`!
`equiv.refl,
`iff.refl
].mmap (λ n, try_core (mk_const n)),
return (exprs.map option.to_list).join -- TODO: implement `.mfilter_map mk_const`?
declare_trace equiv_rw_type
/--
Configuration structure for `equiv_rw`.
* `max_depth` bounds the search depth for equivalences to rewrite along.
The default value is 10.
(e.g., if you're rewriting along `e : α ≃ β`, and `max_depth := 2`,
you can rewrite `option (option α))` but not `option (option (option α))`.
-/
meta structure equiv_rw_cfg :=
(max_depth : ℕ := 10)
/--
Implementation of `equiv_rw_type`, using `solve_by_elim`.
Expects a goal of the form `t ≃ _`,
and tries to solve it using `eq : α ≃ β` and congruence lemmas.
-/
meta def equiv_rw_type_core (eq : expr) (cfg : equiv_rw_cfg) : tactic unit :=
do
-- Assemble the relevant lemmas.
equiv_congr_lemmas ← equiv_congr_lemmas,
/-
We now call `solve_by_elim` to try to generate the requested equivalence.
There are a few subtleties!
* We make sure that `eq` is the first lemma, so it is applied whenever possible.
* In `equiv_congr_lemmas`, we put `equiv.refl` last so it is only used when it is not possible
to descend further.
* Since some congruence lemmas generate subgoals with `∀` statements,
we use the `pre_apply` subtactic of `solve_by_elim` to preprocess each new goal with `intros`.
-/
solve_by_elim
{ use_symmetry := false,
use_exfalso := false,
lemmas := some (eq :: equiv_congr_lemmas),
max_depth := cfg.max_depth,
-- Subgoals may contain function types,
-- and we want to continue trying to construct equivalences after the binders.
pre_apply := tactic.intros >> skip,
-- If solve_by_elim gets stuck, make sure it isn't because there's a later `≃` or `↔` goal
-- that we should still attempt.
discharger := `[show _ ≃ _] <|> `[show _ ↔ _] <|>
trace_if_enabled `equiv_rw_type "Failed, no congruence lemma applied!" >> failed,
-- We use the `accept` tactic in `solve_by_elim` to provide tracing.
accept := λ goals, lock_tactic_state (do
when_tracing `equiv_rw_type (do
goals.mmap pp >>= λ goals, trace format!"So far, we've built: {goals}"),
done <|>
when_tracing `equiv_rw_type (do
gs ← get_goals,
gs ← gs.mmap (λ g, infer_type g >>= pp),
trace format!"Attempting to adapt to {gs}")) }
/--
`equiv_rw_type e t` rewrites the type `t` using the equivalence `e : α ≃ β`,
returning a new equivalence `t ≃ t'`.
-/
meta def equiv_rw_type (eqv : expr) (ty : expr) (cfg : equiv_rw_cfg) : tactic expr :=
do
when_tracing `equiv_rw_type (do
ty_pp ← pp ty,
eqv_pp ← pp eqv,
eqv_ty_pp ← infer_type eqv >>= pp,
trace format!"Attempting to rewrite the type `{ty_pp}` using `{eqv_pp} : {eqv_ty_pp}`."),
`(_ ≃ _) ← infer_type eqv | fail format!"{eqv} must be an `equiv`",
-- We prepare a synthetic goal of type `(%%ty ≃ _)`, for some placeholder right hand side.
equiv_ty ← to_expr ``(%%ty ≃ _),
-- Now call `equiv_rw_type_core`.
new_eqv ← prod.snd <$> (solve_aux equiv_ty $ equiv_rw_type_core eqv cfg),
-- Check that we actually used the equivalence `eq`
-- (`equiv_rw_type_core` will always find `equiv.refl`, but hopefully only after all other possibilities)
new_eqv ← instantiate_mvars new_eqv,
-- We previously had `guard (eqv.occurs new_eqv)` here, but `kdepends_on` is more reliable.
kdepends_on new_eqv eqv >>= guardb <|> (do
eqv_pp ← pp eqv,
ty_pp ← pp ty,
fail format!"Could not construct an equivalence from {eqv_pp} of the form: {ty_pp} ≃ _"),
-- Finally we simplify the resulting equivalence,
-- to compress away some `map_equiv equiv.refl` subexpressions.
prod.fst <$> new_eqv.simp {fail_if_unchanged := ff}
mk_simp_attribute equiv_rw_simp "The simpset `equiv_rw_simp` is used by the tactic `equiv_rw` to
simplify applications of equivalences and their inverses."
attribute [equiv_rw_simp] equiv.symm_symm equiv.apply_symm_apply equiv.symm_apply_apply
/--
Attempt to replace the hypothesis with name `x`
by transporting it along the equivalence in `e : α ≃ β`.
-/
meta def equiv_rw_hyp (x : name) (e : expr) (cfg : equiv_rw_cfg := {}) : tactic unit :=
-- We call `dsimp_result` to perform the beta redex introduced by `revert`
dsimp_result (do
x' ← get_local x,
x_ty ← infer_type x',
-- Adapt `e` to an equivalence with left-hand-side `x_ty`.
e ← equiv_rw_type e x_ty cfg,
eq ← to_expr ``(%%x' = equiv.symm %%e (equiv.to_fun %%e %%x')),
prf ← to_expr ``((equiv.symm_apply_apply %%e %%x').symm),
h ← note_anon eq prf,
-- Revert the new hypothesis, so it is also part of the goal.
revert h,
ex ← to_expr ``(equiv.to_fun %%e %%x'),
-- Now call `generalize`,
-- attempting to replace all occurrences of `e x`,
-- calling it for now `j : β`, with `k : x = e.symm j`.
generalize ex (by apply_opt_param) transparency.none,
-- Reintroduce `x` (now of type `b`), and the hypothesis `h`.
intro x,
h ← intro1,
-- We may need to unfreeze `x` before we can `subst` or `clear` it.
unfreeze x',
-- Finally, if we're working on properties, substitute along `h`, then do some cleanup,
-- and if we're working on data, just throw out the old `x`.
b ← target >>= is_prop,
if b then do
subst h,
`[try { simp only [] with equiv_rw_simp }]
else
clear' tt [x'] <|>
fail format!"equiv_rw expected to be able to clear the original hypothesis {x}, but couldn't.",
skip)
{fail_if_unchanged := ff} tt -- call `dsimp_result` with `no_defaults := tt`.
/-- Rewrite the goal using an equiv `e`. -/
meta def equiv_rw_target (e : expr) (cfg : equiv_rw_cfg := {}) : tactic unit :=
do
t ← target,
e ← equiv_rw_type e t cfg,
s ← to_expr ``(equiv.inv_fun %%e),
tactic.eapply s,
skip
end tactic
namespace tactic.interactive
open lean.parser
open interactive interactive.types
open tactic
local postfix `?`:9001 := optional
/--
`equiv_rw e at h`, where `h : α` is a hypothesis, and `e : α ≃ β`,
will attempt to transport `h` along `e`, producing a new hypothesis `h : β`,
with all occurrences of `h` in other hypotheses and the goal replaced with `e.symm h`.
`equiv_rw e` will attempt to transport the goal along an equivalence `e : α ≃ β`.
In its minimal form it replaces the goal `⊢ α` with `⊢ β` by calling `apply e.inv_fun`.
`equiv_rw` will also try rewriting under (equiv_)functors, so can turn
a hypothesis `h : list α` into `h : list β` or
a goal `⊢ unique α` into `⊢ unique β`.
The maximum search depth for rewriting in subexpressions is controlled by
`equiv_rw e {max_depth := n}`.
-/
meta def equiv_rw (e : parse texpr) (loc : parse $ (tk "at" *> ident)?) (cfg : equiv_rw_cfg := {}) : itactic :=
do e ← to_expr e,
match loc with
| (some hyp) := equiv_rw_hyp hyp e cfg
| none := equiv_rw_target e cfg
end
add_tactic_doc
{ name := "equiv_rw",
category := doc_category.tactic,
decl_names := [`tactic.interactive.equiv_rw],
tags := ["rewriting", "equiv", "transport"] }
/--
Solve a goal of the form `t ≃ _`,
by constructing an equivalence from `e : α ≃ β`.
This is the same equivalence that `equiv_rw` would use to rewrite a term of type `t`.
A typical usage might be:
```
have e' : option α ≃ option β := by equiv_rw_type e
```
-/
meta def equiv_rw_type (e : parse texpr) (cfg : equiv_rw_cfg := {}) : itactic :=
do
`(%%t ≃ _) ← target | fail "`equiv_rw_type` solves goals of the form `t ≃ _`.",
e ← to_expr e,
tactic.equiv_rw_type e t cfg >>= tactic.exact
add_tactic_doc
{ name := "equiv_rw_type",
category := doc_category.tactic,
decl_names := [`tactic.interactive.equiv_rw_type],
tags := ["rewriting", "equiv", "transport"] }
end tactic.interactive
|
4733afb1ef48446b7f789d1da9e65b767a04c24d
|
dce2ec26258c35f21c8a5318a7a2ed297ddba7c6
|
/src/ncategory_theory/Ndefs.lean
|
ad1485edc55095980961dce4f407c0cd18f67cb5
|
[] |
no_license
|
nedsu/lean-category-theory-pr
|
c151c4f95d58a2d3b33e8f38ff7734d85d82e5ee
|
8d72f4645d83c72d15afaee7a2df781b952417e9
|
refs/heads/master
| 1,625,444,569,743
| 1,597,056,476,000
| 1,597,056,476,000
| 145,738,350
| 0
| 0
| null | 1,534,957,201,000
| 1,534,957,201,000
| null |
UTF-8
|
Lean
| false
| false
| 3,842
|
lean
|
import category_theory.isomorphism
open category_theory
universes u v u₀ v₀
namespace category_theory.isomorphism
definition is_iso' {D : Type u} [𝒟 : category.{v} D] {X Y : D} (f : X ⟶ Y) : Prop := nonempty (is_iso f)
end category_theory.isomorphism
namespace category_theory.functor
variables {D : Type u} [𝒟 : category.{v} D] {E : Type u₀} [ℰ : category.{v₀} E]
include 𝒟 ℰ
definition is_Faithful_Functor (F : D ⥤ E) :=
∀ {A B : D} {f g : A ⟶ B} (p : F.map f = F.map g), f = g
definition is_Full_Functor (F : D ⥤ E) :=
∀ {A B : D} (h : F.obj A ⟶ F.obj B), ∃f : A ⟶ B, F.map f = h
structure Full_and_Faithful_Functor (F : D ⥤ E) : Type (max u v u₀ v₀) :=
(morinv : Π {X Y : D}, (F.obj X ⟶ F.obj Y) → (X ⟶ Y))
(left_inverse' : ∀ {X Y : D} {f : X ⟶ Y}, morinv (F.map f) = f)
(right_inverse' : ∀ {X Y : D} {f : F.obj X ⟶ F.obj Y}, F.map (morinv f) = f)
restate_axiom Full_and_Faithful_Functor.left_inverse'
restate_axiom Full_and_Faithful_Functor.right_inverse'
definition Full_and_Faithful_Functor_is_Full_is_Faithful (F : D ⥤ E) : Full_and_Faithful_Functor F → (is_Full_Functor F ∧ is_Faithful_Functor F) :=
begin
intro,
apply and.intro,
unfold is_Full_Functor,
intros,
exact ⟨ a.morinv h, by rw Full_and_Faithful_Functor.right_inverse ⟩,
unfold is_Faithful_Functor,
intros,
exact calc
f = a.morinv (F.map f) : by rw Full_and_Faithful_Functor.left_inverse
... = a.morinv (F.map g) : by rw p
... = g : by rw Full_and_Faithful_Functor.left_inverse
end
noncomputable definition is_Full_is_Faithful_to_Full_and_Faithful_Functor (F : D ⥤ E) : (is_Full_Functor F ∧ is_Faithful_Functor F) → Full_and_Faithful_Functor F :=
begin
intros,
exact {
morinv := begin
intros _ _ g,
exact (classical.indefinite_description _ (a.left g)).1
end,
left_inverse' := begin
intros,
exact (a.right ((classical.indefinite_description (λ (x : X ⟶ Y), F.map x = F.map f) _).2))
end,
right_inverse' := begin
intros,
exact (classical.indefinite_description (λ (x : X ⟶ Y), F.map x = f) _).2
end,
}
end
end category_theory.functor
namespace category_theory.Idempotent
definition is_Idempotent {D : Type u} [𝒟 : category.{v} D] {X : D} (e : X ⟶ X) :=
e ≫ e = e
definition is_Split_Idempotent {D : Type u} [𝒟 : category.{v} D] {X : D} (e : X ⟶ X) :=
∃(Y : D) (f : X ⟶ Y) (g : Y ⟶ X),(f ≫ g = e)∧(g ≫ f = 𝟙Y)
lemma Split_Idem_is_Idem {D : Type u} [𝒟 : category.{v} D] (X : D) (e : X ⟶ X) : (is_Split_Idempotent e) → (is_Idempotent e) :=
begin
intro hsi,
cases (classical.indefinite_description _ hsi) with Y hsi₁,
cases (classical.indefinite_description _ hsi₁) with f hsi₂,
cases (classical.indefinite_description _ hsi₂) with g hg,
exact calc
e ≫ e = (f ≫ g) ≫ f ≫ g : by rw hg.1
... = f ≫ g ≫ f ≫ g : by rw category.assoc
... = f ≫ (g ≫ f) ≫ g : by rw category.assoc
... = f ≫ 𝟙Y ≫ g : by rw hg.2
... = f ≫ g : by rw category.id_comp
... = e : by rw hg.1
end
end category_theory.Idempotent
|
f4398302857788cd788886bc48fe81f3906f46eb
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/tests/pkg/user_attr/UserAttr/Tst.lean
|
53b664d876eabf965441df94f89e5a5aa54a24ae
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,273
|
lean
|
import Lean
import UserAttr.BlaAttr
@[bla] def f (x : Nat) := x + 2
@[bla] def g (x : Nat) := x + 1
@[foo 10] def h1 (x : Nat) := 2*x + 1
@[foo 20 important] def h2 (x : Nat) := 2*x + 1
open Lean in
def hasBlaAttr (declName : Name) : CoreM Bool :=
return blaAttr.hasTag (← getEnv) declName
#eval hasBlaAttr ``f
#eval hasBlaAttr ``id
open Lean in
def getFooAttrInfo? (declName : Name) : CoreM (Option (Nat × Bool)) :=
return fooAttr.getParam? (← getEnv) declName
#eval getFooAttrInfo? ``f
#eval getFooAttrInfo? ``h1
#eval getFooAttrInfo? ``h2
@[my_simp] theorem f_eq : f x = x + 2 := rfl
@[my_simp] theorem g_eq : g x = x + 1 := rfl
example : f x + g x = 2*x + 3 := by
fail_if_success simp_arith -- does not appy f_eq and g_eq
simp_arith [f, g]
example : f x + g x = 2*x + 3 := by
simp_arith [my_simp]
example : f x = id (x + 2) := by
simp
simp [my_simp]
macro "my_simp" : tactic => `(tactic| simp [my_simp])
example : f x = id (x + 2) := by
my_simp
@[simp low, my_simp low]
axiom expand_mul_add (x y z : Nat) : x * (y + z) = x * y + x * y
@[simp high, my_simp high]
axiom expand_add_mul (x y z : Nat) : (x + y) * z = x * z + y * z
@[simp, my_simp]
axiom lassoc_add (x y z : Nat) : x + (y + z) = x + y + z
set_option trace.Meta.Tactic.simp.rewrite true
-- Rewrites: expand_mul_add -> expand_mul_add -> lassoc_add
theorem ex1 (x : Nat) : (x + x) * (x + x) = x * x + x * x + x * x + x * x := by simp only [my_simp]
-- Rewrites: expand_add_mul -> expand_mul_add -> lassoc_add
theorem ex2 (x : Nat) : (x + x) * (x + x) = x * x + x * x + x * x + x * x := by simp
open Lean Meta in
def checkProofs : MetaM Unit := do
let .thmInfo info1 ← getConstInfo `ex1 | throwError "unexpected"
let .thmInfo info2 ← getConstInfo `ex2 | throwError "unexpected"
unless info1.value == info2.value do
throwError "unexpected values"
#eval checkProofs
open Lean Meta in
def showThmsOf (simpAttrName : Name) : MetaM Unit := do
let some simpExt ← getSimpExtension? simpAttrName
| throwError "`{simpAttrName}` is not a simp attribute"
let thms ← simpExt.getTheorems
let thmNames := thms.lemmaNames.fold (init := #[]) fun acc origin => acc.push origin.key
for thmName in thmNames do
IO.println thmName
#eval showThmsOf `my_simp
|
a766492fe78aee7ab7d3b1059c9aca7e97ea667a
|
957a80ea22c5abb4f4670b250d55534d9db99108
|
/tests/lean/coe6.lean
|
3aa5bfb1746799d9d81646b5f42cc9b235beaf48
|
[
"Apache-2.0"
] |
permissive
|
GaloisInc/lean
|
aa1e64d604051e602fcf4610061314b9a37ab8cd
|
f1ec117a24459b59c6ff9e56a1d09d9e9e60a6c0
|
refs/heads/master
| 1,592,202,909,807
| 1,504,624,387,000
| 1,504,624,387,000
| 75,319,626
| 2
| 1
|
Apache-2.0
| 1,539,290,164,000
| 1,480,616,104,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 322
|
lean
|
universe variables u
structure Group :=
(carrier : Type u) (mul : carrier → carrier → carrier) (one : carrier)
attribute [instance]
definition Group_to_Type : has_coe_to_sort Group :=
{ S := Type u, coe := λ g, g^.carrier }
constant g : Group.{1}
set_option pp.binder_types true
#check λ a b : g, Group.mul g a b
|
c51a3593e01c282591c6140e92553cc520557332
|
d1a52c3f208fa42c41df8278c3d280f075eb020c
|
/stage0/src/Lean/Elab/DeclUtil.lean
|
d849ee478b803a690c2558fe0919687d420f3ddc
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
cipher1024/lean4
|
6e1f98bb58e7a92b28f5364eb38a14c8d0aae393
|
69114d3b50806264ef35b57394391c3e738a9822
|
refs/heads/master
| 1,642,227,983,603
| 1,642,011,696,000
| 1,642,011,696,000
| 228,607,691
| 0
| 0
|
Apache-2.0
| 1,576,584,269,000
| 1,576,584,268,000
| null |
UTF-8
|
Lean
| false
| false
| 3,835
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Sebastian Ullrich
-/
import Lean.Meta.Basic
import Lean.Meta.Check
namespace Lean.Meta
def forallTelescopeCompatibleAux {α} (k : Array Expr → Expr → Expr → MetaM α) : Nat → Expr → Expr → Array Expr → MetaM α
| 0, type₁, type₂, xs => k xs type₁ type₂
| i+1, type₁, type₂, xs => do
let type₁ ← whnf type₁
let type₂ ← whnf type₂
match type₁, type₂ with
| Expr.forallE n₁ d₁ b₁ c₁, Expr.forallE n₂ d₂ b₂ c₂ =>
unless n₁ == n₂ do
throwError "parameter name mismatch '{n₁}', expected '{n₂}'"
unless (← isDefEq d₁ d₂) do
throwError "parameter '{n₁}' {← mkHasTypeButIsExpectedMsg d₁ d₂}"
unless c₁.binderInfo == c₂.binderInfo do
throwError "binder annotation mismatch at parameter '{n₁}'"
withLocalDecl n₁ c₁.binderInfo d₁ fun x =>
let type₁ := b₁.instantiate1 x
let type₂ := b₂.instantiate1 x
forallTelescopeCompatibleAux k i type₁ type₂ (xs.push x)
| _, _ => throwError "unexpected number of parameters"
/-- Given two forall-expressions `type₁` and `type₂`, ensure the first `numParams` parameters are compatible, and
then execute `k` with the parameters and remaining types. -/
def forallTelescopeCompatible {α m} [Monad m] [MonadControlT MetaM m] (type₁ type₂ : Expr) (numParams : Nat) (k : Array Expr → Expr → Expr → m α) : m α :=
controlAt MetaM fun runInBase =>
forallTelescopeCompatibleAux (fun xs type₁ type₂ => runInBase $ k xs type₁ type₂) numParams type₁ type₂ #[]
end Meta
namespace Elab
def expandOptDeclSig (stx : Syntax) : Syntax × Option Syntax :=
-- many Term.bracketedBinder >> Term.optType
let binders := stx[0]
let optType := stx[1] -- optional (leading_parser " : " >> termParser)
if optType.isNone then
(binders, none)
else
let typeSpec := optType[0]
(binders, some typeSpec[1])
def expandDeclSig (stx : Syntax) : Syntax × Syntax :=
-- many Term.bracketedBinder >> Term.typeSpec
let binders := stx[0]
let typeSpec := stx[1]
(binders, typeSpec[1])
def mkFreshInstanceName (env : Environment) (nextIdx : Nat) : Name :=
(env.mainModule ++ `_instance).appendIndexAfter nextIdx
def isFreshInstanceName (name : Name) : Bool :=
match name with
| Name.str _ s _ => "_instance".isPrefixOf s
| _ => false
/--
Sort the given list of `usedParams` using the following order:
- If it is an explicit level `allUserParams`, then use user given order.
- Otherwise, use lexicographical.
Remark: `scopeParams` are the universe params introduced using the `universe` command. `allUserParams` contains
the universe params introduced using the `universe` command *and* the `.{...}` notation.
Remark: this function return an exception if there is an `u` not in `usedParams`, that is in `allUserParams` but not in `scopeParams`.
Remark: `explicitParams` are in reverse declaration order. That is, the head is the last declared parameter. -/
def sortDeclLevelParams (scopeParams : List Name) (allUserParams : List Name) (usedParams : Array Name) : Except String (List Name) :=
match allUserParams.find? $ fun u => !usedParams.contains u && !scopeParams.elem u with
| some u => throw s!"unused universe parameter '{u}'"
| none =>
let result := allUserParams.foldl (fun result levelName => if usedParams.elem levelName then levelName :: result else result) []
let remaining := usedParams.filter (fun levelParam => !allUserParams.elem levelParam)
let remaining := remaining.qsort Name.lt
pure $ result ++ remaining.toList
end Lean.Elab
|
02b54ea45f6eb484f762993eb5fca4868cf87ed2
|
2f4707593665f4eac651cd5e5b39ea8eabafe0d6
|
/src/manifold.lean
|
3d1db88b5a4bc742f6ba356c7cc3b6260fc35cd5
|
[] |
no_license
|
PatrickMassot/lean-differential-topology
|
941d2ac639b2c8bcfcbad85fe5ff0a4676d159e9
|
5b020daa5f935140c53408748a9f11ba02e7bf42
|
refs/heads/master
| 1,543,081,959,510
| 1,536,089,170,000
| 1,536,089,170,000
| 112,875,905
| 12
| 5
| null | 1,520,841,685,000
| 1,512,247,905,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 11,112
|
lean
|
import data.set.function
import analysis.normed_space
import tactic.easy
noncomputable theory
local attribute [instance] classical.prop_decidable
open function set
def continuous_on {α : Type*} {β : Type*} [topological_space α] [topological_space β] (U : set α) (f : α → β) :=
∀ W : set β, is_open W → is_open (f ⁻¹' W ∩ U)
section smooth_maps
universes u
variables {α : Type u} {β : Type u} {γ : Type u} [normed_space ℝ α] [normed_space ℝ β] [normed_space ℝ γ]
def smooth_at : ∀ (x : α) (f : α → β), Prop := sorry
lemma smooth_at_congr {x : α} {f g : α → β} {U : set α} (U_nhd : U ∈ (nhds x).sets) (Hcongr : ∀ x ∈ U, f x = g x) :
smooth_at x f ↔ smooth_at x g :=
sorry
lemma smooth_at_comp {x} {f : α → β} {g : β → γ} :
smooth_at x f → smooth_at (f x) g → smooth_at x (g ∘ f) := sorry
def smooth_on (U : set α) (f : α → β) : Prop := ∀ x ∈ U, smooth_at x f
lemma cont_on_of_smooth_on {U : set α} {f : α → β} : smooth_on U f → continuous_on U f :=
sorry
variables {U : set α} {V : set β}
lemma smooth_on_comp (f : α → β) (g : β → γ) :
smooth_on U f → smooth_on V g → f '' U ⊆ V → smooth_on U (g ∘ f) :=
assume sm_f sm_g f_U_V x x_in_U,
smooth_at_comp (sm_f x x_in_U) (sm_g (f x) $ f_U_V $ mem_image_of_mem f x_in_U)
end smooth_maps
section charts
structure chart (X : Type) :=
(domain : set X)
(target : Type)
[normed_target : normed_space ℝ target]
(map : X → target)
(inj : inj_on map domain)
(open_range : is_open $ map '' domain)
variables {X : Type} [inhabited X]
def chart.range (φ : chart X) := φ.map '' φ.domain
def chart.inv (φ : chart X) := inv_fun_on φ.map (φ.domain)
instance : has_coe_to_fun (chart X) := ⟨_, λ φ, φ.map⟩
instance : has_mem X (chart X) := ⟨λ x φ, x ∈ φ.domain⟩
instance chart.target_is_normed (φ : chart X) : normed_space ℝ (φ.target) :=
φ.normed_target
lemma chart.injectivity (φ : chart X) :
∀ (x : X), x ∈ φ.domain → ∀ (y : X), y ∈ φ.domain → φ x = φ y → x = y :=
begin
intros,
apply φ.inj ; assumption
end
lemma chart_inv_chart {φ : chart X} (x ∈ φ.domain) :
φ.inv (φ x) = x :=
begin
change inv_fun_on (φ.map) (φ.domain) (φ.map x) = x,
rw inv_fun_on_eq',
exact chart.injectivity φ,
assumption
end
lemma chart_chart_inv {φ : chart X} {x : φ.target} : x ∈ φ.range → φ (φ.inv x) = x
| ⟨y, ⟨y_in, φ_y⟩⟩ := inv_fun_on_eq ⟨y, ⟨y_in, φ_y⟩⟩
lemma chart.mem_inter {φ ψ : chart X} {x : X} (h : x ∈ φ) (h' : x ∈ ψ) : x ∈ φ.domain ∩ ψ.domain :=
mem_inter h h'
lemma chart.image_inter {φ : chart X} (U V : set X) : φ '' (φ.domain ∩ U) ∩ φ '' (φ.domain ∩ V) = φ '' (φ.domain ∩ U ∩ V) :=
begin
have : ∀ (x ∈ φ.domain ∩ U) (y ∈ φ.domain ∩ V), φ x = φ y → x = y,
{ intros x x_in y y_in,
apply φ.injectivity,
easy },
have : φ '' (φ.domain ∩ V) ∩ φ '' (φ.domain ∩ U) = φ '' (φ.domain ∩ V ∩ (φ.domain ∩ U))
:= image_inter_on this,
have tauto : φ.domain ∩ V ∩ (φ.domain ∩ U) = φ.domain ∩ U ∩ V,
by easy,
rwa [inter_comm, tauto] at this
end
def transition (φ ψ : chart X) : φ.target → ψ.target :=
ψ ∘ φ.inv
lemma transition_comp (φ ψ χ : chart X) (x : φ.target) :
x ∈ φ '' (φ.domain ∩ ψ.domain ∩ χ.domain) → transition φ χ x = ((transition ψ χ) ∘ (transition φ ψ)) x :=
begin
intro h,
change χ.map (φ.inv x) =
(χ.map ((ψ.inv) (ψ (φ.inv x)))),
rcases h with ⟨y, ⟨⟨y_in_φ, y_in_ψ⟩, y_in_χ⟩, ψ_y⟩,
rw chart_inv_chart,
rw [←ψ_y, chart_inv_chart] ; assumption
end
@[simp]
lemma transition_self {φ : chart X} : ∀ y ∈ φ.range, transition φ φ y = y :=
begin
intros,
change φ (chart.inv φ y) = y,
simp [chart_chart_inv, *]
end
@[simp]
lemma transition_inverse {φ ψ : chart X} {y : φ.target} (H : y ∈ φ '' (φ.domain ∩ ψ.domain)) :
transition ψ φ (transition φ ψ y) = y :=
begin
change ((transition ψ φ) ∘ transition φ ψ) y = y,
rw ←transition_comp,
{ have : y ∈ φ.range := image_subset φ (inter_subset_left _ _) H,
simp [*] },
have : φ.domain ∩ ψ.domain ∩ φ.domain = φ.domain ∩ ψ.domain,
by easy,
simp [this, H]
end
lemma image_transition_eq_preimage {φ ψ : chart X} {U : set φ.target} :
U ⊆ φ '' (φ.domain ∩ ψ.domain) → (transition φ ψ) '' U = (transition ψ φ) ⁻¹' U ∩ ψ ''(φ.domain ∩ ψ.domain) :=
begin
intro sub,
ext x,
split,
{ intro x_in,
rcases x_in with ⟨y, ⟨y_in_U, tr_y_x⟩⟩,
have : transition ψ φ x = y,
{ rw ←tr_y_x,
exact transition_inverse (sub y_in_U) },
split,
{ rw mem_preimage_eq,
cc },
{ existsi φ.inv y,
split,
{ rcases sub y_in_U with ⟨a, ⟨⟨h, h'⟩, h''⟩⟩,
rw [←h'', chart_inv_chart] ; try { split } ; assumption },
{ exact tr_y_x } } },
{ intro h,
cases h with tr_x_in x_in_ψ_inter,
rw inter_comm at x_in_ψ_inter,
existsi [transition ψ φ x, tr_x_in],
simp[transition_inverse, *] }
end
def is_open_intersection (φ ψ : chart X) :=
is_open (φ '' (φ.domain ∩ ψ.domain))
def is_smooth_transition (φ ψ : chart X) :=
smooth_on (φ '' (φ.domain ∩ ψ.domain)) (transition φ ψ)
lemma open_transition_of_compatible {φ ψ : chart X} {U : set φ.target} :
is_smooth_transition ψ φ → is_open U → U ⊆ φ '' (φ.domain ∩ ψ.domain) → is_open ((transition φ ψ) '' U) :=
begin
intros sm op sub,
rw image_transition_eq_preimage sub,
simpa [inter_comm] using cont_on_of_smooth_on sm U op
end
structure smooth_compatible (φ ψ : chart X) : Prop :=
(open_intersection : is_open_intersection φ ψ)
(smooth_transition : is_smooth_transition φ ψ)
end charts
section atlases
variables (X : Type) [inhabited X]
structure smooth_atlas :=
(charts : set (chart X))
(covering : ∀ m : X, ∃ φ ∈ charts, m ∈ φ)
(compatibility : ∀ φ ψ ∈ charts, smooth_compatible φ ψ)
lemma open_triple_intersection {φ ψ χ : chart X} :
smooth_compatible φ ψ → smooth_compatible ψ φ → smooth_compatible ψ χ → is_open (φ '' (φ.domain ∩ ψ.domain ∩ χ.domain))
:=
begin
intros h h' h'',
rcases h with ⟨op_φ_ψ, smooth_φ_ψ⟩,
rcases h' with ⟨op_ψ_φ, smooth_ψ_φ⟩,
rcases h'' with ⟨op_ψ_χ, smooth_ψ_χ⟩,
have op := is_open_inter op_ψ_φ op_ψ_χ,
rw chart.image_inter at op,
have : ψ '' (ψ.domain ∩ φ.domain ∩ χ.domain) ⊆ ψ '' (ψ.domain ∩ φ.domain) :=
image_subset ψ (inter_subset_left _ _),
have op := open_transition_of_compatible smooth_φ_ψ op this,
have h : transition ψ φ '' (ψ '' (ψ.domain ∩ φ.domain ∩ χ.domain)) = φ '' (ψ.domain ∩ φ.domain ∩ χ.domain),
{ rw [←image_comp, image_eq_image_of_eq_on],
intros x x_in,
rcases x_in with ⟨⟨x_in_ψ, x_in_φ⟩, x_in_χ⟩,
change φ (ψ.inv (ψ x)) = φ x,
rwa chart_inv_chart },
have comm : ψ.domain ∩ φ.domain ∩ χ.domain = φ.domain ∩ ψ.domain ∩ χ.domain,
by easy,
rwa [h, comm] at op
end
lemma smooth_atlas_eqv_aux {a b c : smooth_atlas X} {φ χ : chart X}
(φ_in_a : φ ∈ a.charts) (χ_in_c : χ ∈ c.charts)
(h₁ : ∀ (ψ : chart X), ψ ∈ b.charts → smooth_compatible φ ψ ∧ smooth_compatible ψ φ)
(h₂ : ∀ (ψ : chart X), ψ ∈ b.charts → smooth_compatible ψ χ ∧ smooth_compatible χ ψ)
: smooth_compatible φ χ :=
begin
constructor,
{ apply is_open_iff_forall_mem_open.2,
intros x x_in,
rcases x_in with ⟨y, ⟨y_in_φ, y_in_χ⟩, φ_y⟩,
rcases b.covering y with ⟨ψ, ψ_in_b, y_in_ψ⟩,
have H : φ '' (φ.domain ∩ ψ.domain ∩ χ.domain) ⊆ φ '' (φ.domain ∩ χ.domain),
{ apply image_subset,
intros x h,
easy },
existsi [φ '' (φ.domain ∩ ψ.domain ∩ χ.domain), H],
exact ⟨open_triple_intersection X (h₁ ψ ψ_in_b).1 (h₁ ψ ψ_in_b).2 (h₂ ψ ψ_in_b).1,
⟨y, by { easy}⟩⟩ },
{ intros x x_in,
rcases x_in with ⟨y, ⟨y_in_φ, y_in_χ⟩, φ_y⟩,
rcases b.covering y with ⟨ψ, ψ_in_b, y_in_ψ⟩,
rcases h₁ ψ ψ_in_b with ⟨⟨op_φ_ψ, smooth_φ_ψ⟩, ⟨op_ψ_φ, smooth_ψ_φ⟩⟩,
rcases h₂ ψ ψ_in_b with ⟨⟨op_ψ_χ, smooth_ψ_χ⟩, ⟨op_χ_ψ, smooth_χ_ψ⟩⟩,
have : y ∈ φ.domain ∩ ψ.domain ∩ χ.domain,
by easy,
let W := φ '' (φ.domain ∩ ψ.domain ∩ χ.domain),
have x_in_W : x ∈ W := φ_y ▸ mem_image_of_mem φ this,
have W_nhds_x : W ∈ (nhds x).sets :=
mem_nhds_sets (open_triple_intersection X (h₁ ψ ψ_in_b).1 (h₁ ψ ψ_in_b).2 (h₂ ψ ψ_in_b).1) x_in_W,
rw smooth_at_congr W_nhds_x (transition_comp φ ψ χ),
change smooth_at x (transition ψ χ ∘ transition φ ψ),
apply smooth_at_comp,
{ exact smooth_φ_ψ x (φ_y ▸ mem_image_of_mem φ ⟨y_in_φ, y_in_ψ⟩) },
{ apply smooth_ψ_χ,
existsi y,
split,
{ easy },
{ rw ←φ_y,
change ψ y = ψ (chart.inv φ (φ y)),
rwa chart_inv_chart } } }
end
instance smooth_atlas_eqv : setoid (smooth_atlas X) :=
⟨λ a b, ∀ (φ ∈ a.charts) (ψ ∈ b.charts),
(smooth_compatible φ ψ) ∧ (smooth_compatible ψ φ),
begin
apply mk_equivalence,
{ intros a φ φ_in_a ψ ψ_in_a,
exact ⟨a.compatibility φ ψ φ_in_a ψ_in_a, a.compatibility ψ φ ψ_in_a φ_in_a⟩ },
{ intros a b H ψ ψ_in_b φ φ_in_a,
finish },
{ intros a b c h₁ h₂ φ φ_in_a χ χ_in_c,
specialize h₁ φ φ_in_a,
replace h₂ := λ ψ ψ_in_b, h₂ ψ ψ_in_b χ χ_in_c,
split,
exact smooth_atlas_eqv_aux X φ_in_a χ_in_c h₁ h₂,
apply smooth_atlas_eqv_aux X χ_in_c φ_in_a,
swap 3,
exact b,
all_goals { simp * {contextual := tt} } }
end⟩
def smooth_atlas_class := quotient $ smooth_atlas_eqv X
variable {X}
def atlases (atlas_class : smooth_atlas_class X) : set (smooth_atlas X) :=
{ a | ⟦a⟧ = atlas_class }
def charts (atlas_class : smooth_atlas_class X) : set (chart X) :=
⋃₀ (smooth_atlas.charts '' atlases atlas_class)
def chart_domains (atlas_class : smooth_atlas_class X) : set (set X) :=
chart.domain '' (charts atlas_class)
open topological_space
def smooth_atlas_class.to_topology (atlas_class : smooth_atlas_class X) : topological_space X :=
generate_from (chart_domains atlas_class)
variable (X)
class smooth_manifold :=
(dim : ℕ)
(atlas_class : smooth_atlas_class X)
(is_t2 : @t2_space X (smooth_atlas_class.to_topology atlas_class))
(is_second_countable : @second_countable_topology X (smooth_atlas_class.to_topology atlas_class))
end atlases
namespace smooth_manifold
variables (X : Type) [inhabited X] [smooth_manifold X]
def charts := charts (smooth_manifold.atlas_class X)
def domains := chart.domain '' charts X
def topology := smooth_atlas_class.to_topology (smooth_manifold.atlas_class X)
end smooth_manifold
|
3c12fd47371dd9ef3bfc61a48795ffbb5de0f0d0
|
b2e508d02500f1512e1618150413e6be69d9db10
|
/src/category_theory/equivalence.lean
|
f7d29dfa28f8041193a20e7f9937580e218ded76
|
[
"Apache-2.0"
] |
permissive
|
callum-sutton/mathlib
|
c3788f90216e9cd43eeffcb9f8c9f959b3b01771
|
afd623825a3ac6bfbcc675a9b023edad3f069e89
|
refs/heads/master
| 1,591,371,888,053
| 1,560,990,690,000
| 1,560,990,690,000
| 192,476,045
| 0
| 0
|
Apache-2.0
| 1,568,941,843,000
| 1,560,837,965,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 13,626
|
lean
|
-- Copyright (c) 2017 Scott Morrison. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
-- Authors: Tim Baumann, Stephen Morgan, Scott Morrison, Floris van Doorn
import category_theory.fully_faithful
import category_theory.whiskering
import category_theory.natural_isomorphism
import tactic.slice
import tactic.converter.interactive
namespace category_theory
open category_theory.functor nat_iso category
universes v₁ v₂ v₃ u₁ u₂ u₃ -- declare the `v`'s first; see `category_theory.category` for an explanation
/-- We define an equivalence as a (half)-adjoint equivalence, a pair of functors with
a unit and counit which are natural isomorphisms and the triangle law `Fη ≫ εF = 1`, or in other
words the composite `F ⟶ FGF ⟶ F` is the identity.
The triangle equation is written as a family of equalities between morphisms, it is more
complicated if we write it as an equality of natural transformations, because then we would have
to insert natural transformations like `F ⟶ F1`.
-/
structure equivalence (C : Type u₁) [category.{v₁} C] (D : Type u₂) [category.{v₂} D] :=
mk' ::
(functor : C ⥤ D)
(inverse : D ⥤ C)
(unit_iso : 𝟭 C ≅ functor ⋙ inverse)
(counit_iso : inverse ⋙ functor ≅ 𝟭 D)
(functor_unit_iso_comp' : ∀(X : C), functor.map ((unit_iso.hom : 𝟭 C ⟶ functor ⋙ inverse).app X) ≫
counit_iso.hom.app (functor.obj X) = 𝟙 (functor.obj X) . obviously)
restate_axiom equivalence.functor_unit_iso_comp'
infixr ` ≌ `:10 := equivalence
variables {C : Type u₁} [𝒞 : category.{v₁} C] {D : Type u₂} [𝒟 : category.{v₂} D]
include 𝒞 𝒟
namespace equivalence
@[simp] def unit (e : C ≌ D) : 𝟭 C ⟶ e.functor ⋙ e.inverse := e.unit_iso.hom
@[simp] def counit (e : C ≌ D) : e.inverse ⋙ e.functor ⟶ 𝟭 D := e.counit_iso.hom
@[simp] def unit_inv (e : C ≌ D) : e.functor ⋙ e.inverse ⟶ 𝟭 C := e.unit_iso.inv
@[simp] def counit_inv (e : C ≌ D) : 𝟭 D ⟶ e.inverse ⋙ e.functor := e.counit_iso.inv
lemma unit_def (e : C ≌ D) : e.unit_iso.hom = e.unit := rfl
lemma counit_def (e : C ≌ D) : e.counit_iso.hom = e.counit := rfl
lemma unit_inv_def (e : C ≌ D) : e.unit_iso.inv = e.unit_inv := rfl
lemma counit_inv_def (e : C ≌ D) : e.counit_iso.inv = e.counit_inv := rfl
lemma functor_unit_comp (e : C ≌ D) (X : C) : e.functor.map (e.unit.app X) ≫
e.counit.app (e.functor.obj X) = 𝟙 (e.functor.obj X) :=
e.functor_unit_iso_comp X
lemma counit_inv_functor_comp (e : C ≌ D) (X : C) :
e.counit_inv.app (e.functor.obj X) ≫ e.functor.map (e.unit_inv.app X) = 𝟙 (e.functor.obj X) :=
begin
erw [iso.inv_eq_inv
(e.functor.map_iso (e.unit_iso.app X) ≪≫ e.counit_iso.app (e.functor.obj X)) (iso.refl _)],
exact e.functor_unit_comp X
end
lemma functor_unit (e : C ≌ D) (X : C) :
e.functor.map (e.unit.app X) = e.counit_inv.app (e.functor.obj X) :=
by { erw [←iso.comp_hom_eq_id (e.counit_iso.app _), functor_unit_comp], refl }
lemma counit_functor (e : C ≌ D) (X : C) :
e.counit.app (e.functor.obj X) = e.functor.map (e.unit_inv.app X) :=
by { erw [←iso.hom_comp_eq_id (e.functor.map_iso (e.unit_iso.app X)), functor_unit_comp], refl }
/-- The other triangle equality. The proof follows the following proof in Globular:
http://globular.science/1905.001 -/
lemma unit_inverse_comp (e : C ≌ D) (Y : D) :
e.unit.app (e.inverse.obj Y) ≫ e.inverse.map (e.counit.app Y) = 𝟙 (e.inverse.obj Y) :=
begin
rw [←id_comp _ (e.inverse.map _), ←map_id e.inverse, ←counit_inv_functor_comp, map_comp,
←iso.hom_inv_id_assoc (e.unit_iso.app _) (e.inverse.map (e.functor.map _)),
app_hom, app_inv, unit_def, unit_inv_def],
slice_lhs 2 3 { erw [e.unit.naturality] },
slice_lhs 1 2 { erw [e.unit.naturality] },
slice_lhs 4 4
{ rw [←iso.hom_inv_id_assoc (e.inverse.map_iso (e.counit_iso.app _)) (e.unit_inv.app _)] },
slice_lhs 3 4 { erw [←map_comp e.inverse, e.counit.naturality],
erw [(e.counit_iso.app _).hom_inv_id, map_id] }, erw [id_comp],
slice_lhs 2 3 { erw [←map_comp e.inverse, e.counit_iso.inv.naturality, map_comp] },
slice_lhs 3 4 { erw [e.unit_inv.naturality] },
slice_lhs 4 5 { erw [←map_comp (e.functor ⋙ e.inverse), (e.unit_iso.app _).hom_inv_id, map_id] },
erw [id_comp],
slice_lhs 3 4 { erw [←e.unit_inv.naturality] },
slice_lhs 2 3 { erw [←map_comp e.inverse, ←e.counit_iso.inv.naturality,
(e.counit_iso.app _).hom_inv_id, map_id] }, erw [id_comp, (e.unit_iso.app _).hom_inv_id], refl
end
lemma inverse_counit_inv_comp (e : C ≌ D) (Y : D) :
e.inverse.map (e.counit_inv.app Y) ≫ e.unit_inv.app (e.inverse.obj Y) = 𝟙 (e.inverse.obj Y) :=
begin
erw [iso.inv_eq_inv
(e.unit_iso.app (e.inverse.obj Y) ≪≫ e.inverse.map_iso (e.counit_iso.app Y)) (iso.refl _)],
exact e.unit_inverse_comp Y
end
lemma unit_inverse (e : C ≌ D) (Y : D) :
e.unit.app (e.inverse.obj Y) = e.inverse.map (e.counit_inv.app Y) :=
by { erw [←iso.comp_hom_eq_id (e.inverse.map_iso (e.counit_iso.app Y)), unit_inverse_comp], refl }
lemma inverse_counit (e : C ≌ D) (Y : D) :
e.inverse.map (e.counit.app Y) = e.unit_inv.app (e.inverse.obj Y) :=
by { erw [←iso.hom_comp_eq_id (e.unit_iso.app _), unit_inverse_comp], refl }
@[simp] lemma fun_inv_map (e : C ≌ D) (X Y : D) (f : X ⟶ Y) :
e.functor.map (e.inverse.map f) = e.counit.app X ≫ f ≫ e.counit_inv.app Y :=
(nat_iso.naturality_2 (e.counit_iso) f).symm
@[simp] lemma inv_fun_map (e : C ≌ D) (X Y : C) (f : X ⟶ Y) :
e.inverse.map (e.functor.map f) = e.unit_inv.app X ≫ f ≫ e.unit.app Y :=
(nat_iso.naturality_1 (e.unit_iso) f).symm
section
-- In this section we convert an arbitrary equivalence to a half-adjoint equivalence.
variables {F : C ⥤ D} {G : D ⥤ C} (η : 𝟭 C ≅ F ⋙ G) (ε : G ⋙ F ≅ 𝟭 D)
def adjointify_η : 𝟭 C ≅ F ⋙ G :=
calc
𝟭 C ≅ F ⋙ G : η
... ≅ F ⋙ (𝟭 D ⋙ G) : iso_whisker_left F (left_unitor G).symm
... ≅ F ⋙ ((G ⋙ F) ⋙ G) : iso_whisker_left F (iso_whisker_right ε.symm G)
... ≅ F ⋙ (G ⋙ (F ⋙ G)) : iso_whisker_left F (associator G F G)
... ≅ (F ⋙ G) ⋙ (F ⋙ G) : (associator F G (F ⋙ G)).symm
... ≅ 𝟭 C ⋙ (F ⋙ G) : iso_whisker_right η.symm (F ⋙ G)
... ≅ F ⋙ G : left_unitor (F ⋙ G)
lemma adjointify_η_ε (X : C) :
F.map ((adjointify_η η ε).hom.app X) ≫ ε.hom.app (F.obj X) = 𝟙 (F.obj X) :=
begin
dsimp [adjointify_η], simp,
have := ε.hom.naturality (F.map (η.inv.app X)), dsimp at this, rw [this], clear this,
rw [assoc_symm _ _ (F.map _)],
have := ε.hom.naturality (ε.inv.app $ F.obj X), dsimp at this, rw [this], clear this,
have := (ε.app $ F.obj X).hom_inv_id, dsimp at this, rw [this], clear this,
rw [id_comp], have := (F.map_iso $ η.app X).hom_inv_id, dsimp at this, rw [this]
end
end
protected definition mk (F : C ⥤ D) (G : D ⥤ C)
(η : 𝟭 C ≅ F ⋙ G) (ε : G ⋙ F ≅ 𝟭 D) : C ≌ D :=
⟨F, G, adjointify_η η ε, ε, adjointify_η_ε η ε⟩
omit 𝒟
@[refl] def refl : C ≌ C := equivalence.mk (𝟭 C) (𝟭 C) (iso.refl _) (iso.refl _)
include 𝒟
@[symm] def symm (e : C ≌ D) : D ≌ C :=
⟨e.inverse, e.functor, e.counit_iso.symm, e.unit_iso.symm, e.inverse_counit_inv_comp⟩
variables {E : Type u₃} [ℰ : category.{v₃} E]
include ℰ
@[trans] def trans (e : C ≌ D) (f : D ≌ E) : C ≌ E :=
begin
apply equivalence.mk (e.functor ⋙ f.functor) (f.inverse ⋙ e.inverse),
{ refine iso.trans e.unit_iso _,
exact iso_whisker_left e.functor (iso_whisker_right f.unit_iso e.inverse) },
{ refine iso.trans _ f.counit_iso,
exact iso_whisker_left f.inverse (iso_whisker_right e.counit_iso f.functor) }
end
def fun_inv_id_assoc (e : C ≌ D) (F : C ⥤ E) : e.functor ⋙ e.inverse ⋙ F ≅ F :=
(functor.associator _ _ _).symm ≪≫ iso_whisker_right e.unit_iso.symm F ≪≫ F.left_unitor
@[simp] lemma fun_inv_id_assoc_hom_app (e : C ≌ D) (F : C ⥤ E) (X : C) :
(fun_inv_id_assoc e F).hom.app X = F.map (e.unit_inv.app X) :=
by { dsimp [fun_inv_id_assoc], tidy }
@[simp] lemma fun_inv_id_assoc_inv_app (e : C ≌ D) (F : C ⥤ E) (X : C) :
(fun_inv_id_assoc e F).inv.app X = F.map (e.unit.app X) :=
by { dsimp [fun_inv_id_assoc], tidy }
def inv_fun_id_assoc (e : C ≌ D) (F : D ⥤ E) : e.inverse ⋙ e.functor ⋙ F ≅ F :=
(functor.associator _ _ _).symm ≪≫ iso_whisker_right e.counit_iso F ≪≫ F.left_unitor
@[simp] lemma inv_fun_id_assoc_hom_app (e : C ≌ D) (F : D ⥤ E) (X : D) :
(inv_fun_id_assoc e F).hom.app X = F.map (e.counit.app X) :=
by { dsimp [inv_fun_id_assoc], tidy }
@[simp] lemma inv_fun_id_assoc_inv_app (e : C ≌ D) (F : D ⥤ E) (X : D) :
(inv_fun_id_assoc e F).inv.app X = F.map (e.counit_inv.app X) :=
by { dsimp [inv_fun_id_assoc], tidy }
end equivalence
/-- A functor that is part of a (half) adjoint equivalence -/
class is_equivalence (F : C ⥤ D) :=
mk' ::
(inverse : D ⥤ C)
(unit_iso : 𝟭 C ≅ F ⋙ inverse)
(counit_iso : inverse ⋙ F ≅ 𝟭 D)
(functor_unit_iso_comp' : ∀(X : C), F.map ((unit_iso.hom : 𝟭 C ⟶ F ⋙ inverse).app X) ≫
counit_iso.hom.app (F.obj X) = 𝟙 (F.obj X) . obviously)
restate_axiom is_equivalence.functor_unit_iso_comp'
namespace is_equivalence
instance of_equivalence (F : C ≌ D) : is_equivalence F.functor :=
{ ..F }
instance of_equivalence_inverse (F : C ≌ D) : is_equivalence F.inverse :=
is_equivalence.of_equivalence F.symm
open equivalence
protected definition mk {F : C ⥤ D} (G : D ⥤ C)
(η : 𝟭 C ≅ F ⋙ G) (ε : G ⋙ F ≅ 𝟭 D) : is_equivalence F :=
⟨G, adjointify_η η ε, ε, adjointify_η_ε η ε⟩
end is_equivalence
namespace functor
def as_equivalence (F : C ⥤ D) [is_equivalence F] : C ≌ D :=
⟨F, is_equivalence.inverse F, is_equivalence.unit_iso F, is_equivalence.counit_iso F,
is_equivalence.functor_unit_iso_comp F⟩
omit 𝒟
instance is_equivalence_refl : is_equivalence (functor.id C) :=
is_equivalence.of_equivalence equivalence.refl
include 𝒟
def inv (F : C ⥤ D) [is_equivalence F] : D ⥤ C :=
is_equivalence.inverse F
instance is_equivalence_inv (F : C ⥤ D) [is_equivalence F] : is_equivalence F.inv :=
is_equivalence.of_equivalence F.as_equivalence.symm
def fun_inv_id (F : C ⥤ D) [is_equivalence F] : F ⋙ F.inv ≅ functor.id C :=
(is_equivalence.unit_iso F).symm
def inv_fun_id (F : C ⥤ D) [is_equivalence F] : F.inv ⋙ F ≅ functor.id D :=
is_equivalence.counit_iso F
variables {E : Type u₃} [ℰ : category.{v₃} E]
include ℰ
instance is_equivalence_trans (F : C ⥤ D) (G : D ⥤ E) [is_equivalence F] [is_equivalence G] :
is_equivalence (F ⋙ G) :=
is_equivalence.of_equivalence (equivalence.trans (as_equivalence F) (as_equivalence G))
end functor
namespace is_equivalence
@[simp] lemma fun_inv_map (F : C ⥤ D) [is_equivalence F] (X Y : D) (f : X ⟶ Y) :
F.map (F.inv.map f) = (F.inv_fun_id.hom.app X) ≫ f ≫ (F.inv_fun_id.inv.app Y) :=
begin
erw [nat_iso.naturality_2],
refl
end
@[simp] lemma inv_fun_map (F : C ⥤ D) [is_equivalence F] (X Y : C) (f : X ⟶ Y) :
F.inv.map (F.map f) = (F.fun_inv_id.hom.app X) ≫ f ≫ (F.fun_inv_id.inv.app Y) :=
begin
erw [nat_iso.naturality_2],
refl
end
end is_equivalence
class ess_surj (F : C ⥤ D) :=
(obj_preimage (d : D) : C)
(iso' (d : D) : F.obj (obj_preimage d) ≅ d . obviously)
restate_axiom ess_surj.iso'
namespace functor
def obj_preimage (F : C ⥤ D) [ess_surj F] (d : D) : C := ess_surj.obj_preimage.{v₁ v₂} F d
def fun_obj_preimage_iso (F : C ⥤ D) [ess_surj F] (d : D) : F.obj (F.obj_preimage d) ≅ d :=
ess_surj.iso F d
end functor
namespace equivalence
def ess_surj_of_equivalence (F : C ⥤ D) [is_equivalence F] : ess_surj F :=
⟨ λ Y : D, F.inv.obj Y, λ Y : D, (F.inv_fun_id.app Y) ⟩
instance faithful_of_equivalence (F : C ⥤ D) [is_equivalence F] : faithful F :=
{ injectivity' := λ X Y f g w,
begin
have p := congr_arg (@category_theory.functor.map _ _ _ _ F.inv _ _) w,
simpa only [cancel_epi, cancel_mono, is_equivalence.inv_fun_map] using p
end }.
instance full_of_equivalence (F : C ⥤ D) [is_equivalence F] : full F :=
{ preimage := λ X Y f, (F.fun_inv_id.app X).inv ≫ (F.inv.map f) ≫ (F.fun_inv_id.app Y).hom,
witness' := λ X Y f,
begin
apply F.inv.injectivity,
/- obviously can finish from here... -/
dsimp, simp, dsimp,
slice_lhs 4 6 {
rw [←functor.map_comp, ←functor.map_comp],
rw [←is_equivalence.fun_inv_map],
},
slice_lhs 1 2 { simp },
dsimp, simp,
slice_lhs 2 4 {
rw [←functor.map_comp, ←functor.map_comp],
erw [nat_iso.naturality_2],
},
erw [nat_iso.naturality_1], refl
end }.
@[simp] private def equivalence_inverse (F : C ⥤ D) [full F] [faithful F] [ess_surj F] : D ⥤ C :=
{ obj := λ X, F.obj_preimage X,
map := λ X Y f, F.preimage ((F.fun_obj_preimage_iso X).hom ≫ f ≫ (F.fun_obj_preimage_iso Y).inv),
map_id' := λ X, begin apply F.injectivity, tidy end,
map_comp' := λ X Y Z f g, by apply F.injectivity; simp }.
def equivalence_of_fully_faithfully_ess_surj
(F : C ⥤ D) [full F] [faithful : faithful F] [ess_surj F] : is_equivalence F :=
is_equivalence.mk (equivalence_inverse F)
(nat_iso.of_components
(λ X, (preimage_iso $ F.fun_obj_preimage_iso $ F.obj X).symm)
(λ X Y f, by { apply F.injectivity, obviously }))
(nat_iso.of_components
(λ Y, F.fun_obj_preimage_iso Y)
(by obviously))
end equivalence
end category_theory
|
910026fc3ca684c84f8820c41f5195974316e981
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/linear_algebra/basic.lean
|
21ca3a4f006a377eacde15bdb467500a79423147
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 84,431
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Kevin Buzzard, Yury Kudryashov, Frédéric Dupuis,
Heather Macbeth
-/
import algebra.big_operators.pi
import algebra.module.hom
import algebra.module.prod
import algebra.module.submodule.lattice
import data.dfinsupp.basic
import data.finsupp.basic
import order.compactly_generated
/-!
# Linear algebra
This file defines the basics of linear algebra. It sets up the "categorical/lattice structure" of
modules over a ring, submodules, and linear maps.
Many of the relevant definitions, including `module`, `submodule`, and `linear_map`, are found in
`src/algebra/module`.
## Main definitions
* Many constructors for (semi)linear maps
* The kernel `ker` and range `range` of a linear map are submodules of the domain and codomain
respectively.
* The general linear group is defined to be the group of invertible linear maps from `M` to itself.
See `linear_algebra.span` for the span of a set (as a submodule),
and `linear_algebra.quotient` for quotients by submodules.
## Main theorems
See `linear_algebra.isomorphisms` for Noether's three isomorphism theorems for modules.
## Notations
* We continue to use the notations `M →ₛₗ[σ] M₂` and `M →ₗ[R] M₂` for the type of semilinear
(resp. linear) maps from `M` to `M₂` over the ring homomorphism `σ` (resp. over the ring `R`).
## Implementation notes
We note that, when constructing linear maps, it is convenient to use operations defined on bundled
maps (`linear_map.prod`, `linear_map.coprod`, arithmetic operations like `+`) instead of defining a
function and proving it is linear.
## TODO
* Parts of this file have not yet been generalized to semilinear maps
## Tags
linear algebra, vector space, module
-/
open function
open_locale big_operators pointwise
variables {R : Type*} {R₁ : Type*} {R₂ : Type*} {R₃ : Type*} {R₄ : Type*}
variables {S : Type*}
variables {K : Type*} {K₂ : Type*}
variables {M : Type*} {M' : Type*} {M₁ : Type*} {M₂ : Type*} {M₃ : Type*} {M₄ : Type*}
variables {N : Type*} {N₂ : Type*}
variables {ι : Type*}
variables {V : Type*} {V₂ : Type*}
namespace finsupp
lemma smul_sum {α : Type*} {β : Type*} {R : Type*} {M : Type*}
[has_zero β] [monoid R] [add_comm_monoid M] [distrib_mul_action R M]
{v : α →₀ β} {c : R} {h : α → β → M} :
c • (v.sum h) = v.sum (λa b, c • h a b) :=
finset.smul_sum
@[simp]
lemma sum_smul_index_linear_map' {α : Type*} {R : Type*} {M : Type*} {M₂ : Type*}
[semiring R] [add_comm_monoid M] [module R M] [add_comm_monoid M₂] [module R M₂]
{v : α →₀ M} {c : R} {h : α → M →ₗ[R] M₂} :
(c • v).sum (λ a, h a) = c • (v.sum (λ a, h a)) :=
begin
rw [finsupp.sum_smul_index', finsupp.smul_sum],
{ simp only [map_smul], },
{ intro i, exact (h i).map_zero },
end
variables (α : Type*) [fintype α]
variables (R M) [add_comm_monoid M] [semiring R] [module R M]
/-- Given `fintype α`, `linear_equiv_fun_on_fintype R` is the natural `R`-linear equivalence between
`α →₀ β` and `α → β`. -/
@[simps apply] noncomputable def linear_equiv_fun_on_fintype :
(α →₀ M) ≃ₗ[R] (α → M) :=
{ to_fun := coe_fn,
map_add' := λ f g, by { ext, refl },
map_smul' := λ c f, by { ext, refl },
.. equiv_fun_on_fintype }
@[simp] lemma linear_equiv_fun_on_fintype_single [decidable_eq α] (x : α) (m : M) :
(linear_equiv_fun_on_fintype R M α) (single x m) = pi.single x m :=
begin
ext a,
change (equiv_fun_on_fintype (single x m)) a = _,
convert _root_.congr_fun (equiv_fun_on_fintype_single x m) a,
end
@[simp] lemma linear_equiv_fun_on_fintype_symm_single [decidable_eq α]
(x : α) (m : M) : (linear_equiv_fun_on_fintype R M α).symm (pi.single x m) = single x m :=
begin
ext a,
change (equiv_fun_on_fintype.symm (pi.single x m)) a = _,
convert congr_fun (equiv_fun_on_fintype_symm_single x m) a,
end
@[simp] lemma linear_equiv_fun_on_fintype_symm_coe (f : α →₀ M) :
(linear_equiv_fun_on_fintype R M α).symm f = f :=
by { ext, simp [linear_equiv_fun_on_fintype], }
/-- If `α` has a unique term, then the type of finitely supported functions `α →₀ M` is
`R`-linearly equivalent to `M`. -/
noncomputable def linear_equiv.finsupp_unique (α : Type*) [unique α] : (α →₀ M) ≃ₗ[R] M :=
{ map_add' := λ x y, rfl,
map_smul' := λ r x, rfl,
..finsupp.equiv_fun_on_fintype.trans (equiv.fun_unique α M) }
variables {R M α}
@[simp] lemma linear_equiv.finsupp_unique_apply (α : Type*) [unique α] (f : α →₀ M) :
linear_equiv.finsupp_unique R M α f = f default := rfl
@[simp] lemma linear_equiv.finsupp_unique_symm_apply {α : Type*} [unique α] (m : M) :
(linear_equiv.finsupp_unique R M α).symm m = finsupp.single default m :=
by ext; simp [linear_equiv.finsupp_unique]
end finsupp
/-- decomposing `x : ι → R` as a sum along the canonical basis -/
lemma pi_eq_sum_univ {ι : Type*} [fintype ι] [decidable_eq ι] {R : Type*} [semiring R] (x : ι → R) :
x = ∑ i, x i • (λj, if i = j then 1 else 0) :=
by { ext, simp }
/-! ### Properties of linear maps -/
namespace linear_map
section add_comm_monoid
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂]
variables [add_comm_monoid M₃] [add_comm_monoid M₄]
variables [module R M] [module R M₁] [module R₂ M₂] [module R₃ M₃] [module R₄ M₄]
variables {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₃₄ : R₃ →+* R₄}
variables {σ₁₃ : R →+* R₃} {σ₂₄ : R₂ →+* R₄} {σ₁₄ : R →+* R₄}
variables [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃] [ring_hom_comp_triple σ₂₃ σ₃₄ σ₂₄]
variables [ring_hom_comp_triple σ₁₃ σ₃₄ σ₁₄] [ring_hom_comp_triple σ₁₂ σ₂₄ σ₁₄]
variables (f : M →ₛₗ[σ₁₂] M₂) (g : M₂ →ₛₗ[σ₂₃] M₃)
include R R₂
theorem comp_assoc (h : M₃ →ₛₗ[σ₃₄] M₄) :
((h.comp g : M₂ →ₛₗ[σ₂₄] M₄).comp f : M →ₛₗ[σ₁₄] M₄)
= h.comp (g.comp f : M →ₛₗ[σ₁₃] M₃) := rfl
omit R R₂
/-- The restriction of a linear map `f : M → M₂` to a submodule `p ⊆ M` gives a linear map
`p → M₂`. -/
def dom_restrict (f : M →ₛₗ[σ₁₂] M₂) (p : submodule R M) : p →ₛₗ[σ₁₂] M₂ := f.comp p.subtype
@[simp] lemma dom_restrict_apply (f : M →ₛₗ[σ₁₂] M₂) (p : submodule R M) (x : p) :
f.dom_restrict p x = f x := rfl
/-- A linear map `f : M₂ → M` whose values lie in a submodule `p ⊆ M` can be restricted to a
linear map M₂ → p. -/
def cod_restrict (p : submodule R₂ M₂) (f : M →ₛₗ[σ₁₂] M₂) (h : ∀c, f c ∈ p) : M →ₛₗ[σ₁₂] p :=
by refine {to_fun := λc, ⟨f c, h c⟩, ..}; intros; apply set_coe.ext; simp
@[simp] theorem cod_restrict_apply (p : submodule R₂ M₂) (f : M →ₛₗ[σ₁₂] M₂) {h} (x : M) :
(cod_restrict p f h x : M₂) = f x := rfl
@[simp] lemma comp_cod_restrict (p : submodule R₃ M₃) (h : ∀b, g b ∈ p) :
((cod_restrict p g h).comp f : M →ₛₗ[σ₁₃] p) = cod_restrict p (g.comp f) (assume b, h _) :=
ext $ assume b, rfl
@[simp] lemma subtype_comp_cod_restrict (p : submodule R₂ M₂) (h : ∀b, f b ∈ p) :
p.subtype.comp (cod_restrict p f h) = f :=
ext $ assume b, rfl
/-- Restrict domain and codomain of an endomorphism. -/
def restrict (f : M →ₗ[R] M) {p : submodule R M} (hf : ∀ x ∈ p, f x ∈ p) : p →ₗ[R] p :=
(f.dom_restrict p).cod_restrict p $ set_like.forall.2 hf
lemma restrict_apply
{f : M →ₗ[R] M} {p : submodule R M} (hf : ∀ x ∈ p, f x ∈ p) (x : p) :
f.restrict hf x = ⟨f x, hf x.1 x.2⟩ := rfl
lemma subtype_comp_restrict {f : M →ₗ[R] M} {p : submodule R M} (hf : ∀ x ∈ p, f x ∈ p) :
p.subtype.comp (f.restrict hf) = f.dom_restrict p := rfl
lemma restrict_eq_cod_restrict_dom_restrict
{f : M →ₗ[R] M} {p : submodule R M} (hf : ∀ x ∈ p, f x ∈ p) :
f.restrict hf = (f.dom_restrict p).cod_restrict p (λ x, hf x.1 x.2) := rfl
lemma restrict_eq_dom_restrict_cod_restrict
{f : M →ₗ[R] M} {p : submodule R M} (hf : ∀ x, f x ∈ p) :
f.restrict (λ x _, hf x) = (f.cod_restrict p hf).dom_restrict p := rfl
instance unique_of_left [subsingleton M] : unique (M →ₛₗ[σ₁₂] M₂) :=
{ uniq := λ f, ext $ λ x, by rw [subsingleton.elim x 0, map_zero, map_zero],
.. linear_map.inhabited }
instance unique_of_right [subsingleton M₂] : unique (M →ₛₗ[σ₁₂] M₂) :=
coe_injective.unique
/-- Evaluation of a `σ₁₂`-linear map at a fixed `a`, as an `add_monoid_hom`. -/
def eval_add_monoid_hom (a : M) : (M →ₛₗ[σ₁₂] M₂) →+ M₂ :=
{ to_fun := λ f, f a,
map_add' := λ f g, linear_map.add_apply f g a,
map_zero' := rfl }
/-- `linear_map.to_add_monoid_hom` promoted to an `add_monoid_hom` -/
def to_add_monoid_hom' : (M →ₛₗ[σ₁₂] M₂) →+ (M →+ M₂) :=
{ to_fun := to_add_monoid_hom,
map_zero' := by ext; refl,
map_add' := by intros; ext; refl }
lemma sum_apply (t : finset ι) (f : ι → M →ₛₗ[σ₁₂] M₂) (b : M) :
(∑ d in t, f d) b = ∑ d in t, f d b :=
add_monoid_hom.map_sum ((add_monoid_hom.eval b).comp to_add_monoid_hom') f _
section smul_right
variables [semiring S] [module R S] [module S M] [is_scalar_tower R S M]
/-- When `f` is an `R`-linear map taking values in `S`, then `λb, f b • x` is an `R`-linear map. -/
def smul_right (f : M₁ →ₗ[R] S) (x : M) : M₁ →ₗ[R] M :=
{ to_fun := λb, f b • x,
map_add' := λ x y, by rw [f.map_add, add_smul],
map_smul' := λ b y, by dsimp; rw [map_smul, smul_assoc] }
@[simp] theorem coe_smul_right (f : M₁ →ₗ[R] S) (x : M) :
(smul_right f x : M₁ → M) = λ c, f c • x := rfl
theorem smul_right_apply (f : M₁ →ₗ[R] S) (x : M) (c : M₁) :
smul_right f x c = f c • x := rfl
end smul_right
instance [nontrivial M] : nontrivial (module.End R M) :=
begin
obtain ⟨m, ne⟩ := (nontrivial_iff_exists_ne (0 : M)).mp infer_instance,
exact nontrivial_of_ne 1 0 (λ p, ne (linear_map.congr_fun p m)),
end
@[simp, norm_cast] lemma coe_fn_sum {ι : Type*} (t : finset ι) (f : ι → M →ₛₗ[σ₁₂] M₂) :
⇑(∑ i in t, f i) = ∑ i in t, (f i : M → M₂) :=
add_monoid_hom.map_sum ⟨@to_fun R R₂ _ _ σ₁₂ M M₂ _ _ _ _, rfl, λ x y, rfl⟩ _ _
@[simp] lemma pow_apply (f : M →ₗ[R] M) (n : ℕ) (m : M) :
(f^n) m = (f^[n] m) :=
begin
induction n with n ih,
{ refl, },
{ simp only [function.comp_app, function.iterate_succ, linear_map.mul_apply, pow_succ, ih],
exact (function.commute.iterate_self _ _ m).symm, },
end
lemma pow_map_zero_of_le
{f : module.End R M} {m : M} {k l : ℕ} (hk : k ≤ l) (hm : (f^k) m = 0) : (f^l) m = 0 :=
by rw [← tsub_add_cancel_of_le hk, pow_add, mul_apply, hm, map_zero]
lemma commute_pow_left_of_commute
{f : M →ₛₗ[σ₁₂] M₂} {g : module.End R M} {g₂ : module.End R₂ M₂}
(h : g₂.comp f = f.comp g) (k : ℕ) : (g₂^k).comp f = f.comp (g^k) :=
begin
induction k with k ih,
{ simpa only [pow_zero], },
{ rw [pow_succ, pow_succ, linear_map.mul_eq_comp, linear_map.comp_assoc, ih,
← linear_map.comp_assoc, h, linear_map.comp_assoc, linear_map.mul_eq_comp], },
end
lemma submodule_pow_eq_zero_of_pow_eq_zero {N : submodule R M}
{g : module.End R N} {G : module.End R M} (h : G.comp N.subtype = N.subtype.comp g)
{k : ℕ} (hG : G^k = 0) : g^k = 0 :=
begin
ext m,
have hg : N.subtype.comp (g^k) m = 0,
{ rw [← commute_pow_left_of_commute h, hG, zero_comp, zero_apply], },
simp only [submodule.subtype_apply, comp_app, submodule.coe_eq_zero, coe_comp] at hg,
rw [hg, linear_map.zero_apply],
end
lemma coe_pow (f : M →ₗ[R] M) (n : ℕ) : ⇑(f^n) = (f^[n]) :=
by { ext m, apply pow_apply, }
@[simp] lemma id_pow (n : ℕ) : (id : M →ₗ[R] M)^n = id := one_pow n
section
variables {f' : M →ₗ[R] M}
lemma iterate_succ (n : ℕ) : (f' ^ (n + 1)) = comp (f' ^ n) f' :=
by rw [pow_succ', mul_eq_comp]
lemma iterate_surjective (h : surjective f') : ∀ n : ℕ, surjective ⇑(f' ^ n)
| 0 := surjective_id
| (n + 1) := by { rw [iterate_succ], exact surjective.comp (iterate_surjective n) h, }
lemma iterate_injective (h : injective f') : ∀ n : ℕ, injective ⇑(f' ^ n)
| 0 := injective_id
| (n + 1) := by { rw [iterate_succ], exact injective.comp (iterate_injective n) h, }
lemma iterate_bijective (h : bijective f') : ∀ n : ℕ, bijective ⇑(f' ^ n)
| 0 := bijective_id
| (n + 1) := by { rw [iterate_succ], exact bijective.comp (iterate_bijective n) h, }
lemma injective_of_iterate_injective {n : ℕ} (hn : n ≠ 0) (h : injective ⇑(f' ^ n)) :
injective f' :=
begin
rw [← nat.succ_pred_eq_of_pos (pos_iff_ne_zero.mpr hn), iterate_succ, coe_comp] at h,
exact injective.of_comp h,
end
lemma surjective_of_iterate_surjective {n : ℕ} (hn : n ≠ 0) (h : surjective ⇑(f' ^ n)) :
surjective f' :=
begin
rw [← nat.succ_pred_eq_of_pos (pos_iff_ne_zero.mpr hn),
nat.succ_eq_add_one, add_comm, pow_add] at h,
exact surjective.of_comp h,
end
lemma pow_apply_mem_of_forall_mem {p : submodule R M}
(n : ℕ) (h : ∀ x ∈ p, f' x ∈ p) (x : M) (hx : x ∈ p) :
(f'^n) x ∈ p :=
begin
induction n with n ih generalizing x, { simpa, },
simpa only [iterate_succ, coe_comp, function.comp_app, restrict_apply] using ih _ (h _ hx),
end
lemma pow_restrict {p : submodule R M} (n : ℕ)
(h : ∀ x ∈ p, f' x ∈ p) (h' := pow_apply_mem_of_forall_mem n h) :
(f'.restrict h)^n = (f'^n).restrict h' :=
begin
induction n with n ih;
ext,
{ simp [restrict_apply], },
{ simp [restrict_apply, linear_map.iterate_succ, -linear_map.pow_apply, ih], },
end
end
/-- A linear map `f` applied to `x : ι → R` can be computed using the image under `f` of elements
of the canonical basis. -/
lemma pi_apply_eq_sum_univ [fintype ι] [decidable_eq ι] (f : (ι → R) →ₗ[R] M) (x : ι → R) :
f x = ∑ i, x i • (f (λj, if i = j then 1 else 0)) :=
begin
conv_lhs { rw [pi_eq_sum_univ x, f.map_sum] },
apply finset.sum_congr rfl (λl hl, _),
rw map_smul
end
end add_comm_monoid
section module
variables [semiring R] [semiring S]
[add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
[module R M] [module R M₂] [module R M₃]
[module S M₂] [module S M₃] [smul_comm_class R S M₂] [smul_comm_class R S M₃]
(f : M →ₗ[R] M₂)
variable (S)
/-- Applying a linear map at `v : M`, seen as `S`-linear map from `M →ₗ[R] M₂` to `M₂`.
See `linear_map.applyₗ` for a version where `S = R`. -/
@[simps]
def applyₗ' : M →+ (M →ₗ[R] M₂) →ₗ[S] M₂ :=
{ to_fun := λ v,
{ to_fun := λ f, f v,
map_add' := λ f g, f.add_apply g v,
map_smul' := λ x f, f.smul_apply x v },
map_zero' := linear_map.ext $ λ f, f.map_zero,
map_add' := λ x y, linear_map.ext $ λ f, f.map_add _ _ }
section
variables (R M)
/--
The equivalence between R-linear maps from `R` to `M`, and points of `M` itself.
This says that the forgetful functor from `R`-modules to types is representable, by `R`.
This as an `S`-linear equivalence, under the assumption that `S` acts on `M` commuting with `R`.
When `R` is commutative, we can take this to be the usual action with `S = R`.
Otherwise, `S = ℕ` shows that the equivalence is additive.
See note [bundled maps over different rings].
-/
@[simps]
def ring_lmap_equiv_self [module S M] [smul_comm_class R S M] : (R →ₗ[R] M) ≃ₗ[S] M :=
{ to_fun := λ f, f 1,
inv_fun := smul_right (1 : R →ₗ[R] R),
left_inv := λ f, by { ext, simp },
right_inv := λ x, by simp,
.. applyₗ' S (1 : R) }
end
end module
section comm_semiring
variables [comm_semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [module R M] [module R M₂] [module R M₃]
variables (f g : M →ₗ[R] M₂)
include R
/-- Composition by `f : M₂ → M₃` is a linear map from the space of linear maps `M → M₂`
to the space of linear maps `M₂ → M₃`. -/
def comp_right (f : M₂ →ₗ[R] M₃) : (M →ₗ[R] M₂) →ₗ[R] (M →ₗ[R] M₃) :=
{ to_fun := f.comp,
map_add' := λ _ _, linear_map.ext $ λ _, map_add f _ _,
map_smul' := λ _ _, linear_map.ext $ λ _, map_smul f _ _ }
@[simp]
lemma comp_right_apply (f : M₂ →ₗ[R] M₃) (g : M →ₗ[R] M₂) :
comp_right f g = f.comp g := rfl
/-- Applying a linear map at `v : M`, seen as a linear map from `M →ₗ[R] M₂` to `M₂`.
See also `linear_map.applyₗ'` for a version that works with two different semirings.
This is the `linear_map` version of `add_monoid_hom.eval`. -/
@[simps]
def applyₗ : M →ₗ[R] (M →ₗ[R] M₂) →ₗ[R] M₂ :=
{ to_fun := λ v, { to_fun := λ f, f v, ..applyₗ' R v },
map_smul' := λ x y, linear_map.ext $ λ f, map_smul f _ _,
..applyₗ' R }
/-- Alternative version of `dom_restrict` as a linear map. -/
def dom_restrict'
(p : submodule R M) : (M →ₗ[R] M₂) →ₗ[R] (p →ₗ[R] M₂) :=
{ to_fun := λ φ, φ.dom_restrict p,
map_add' := by simp [linear_map.ext_iff],
map_smul' := by simp [linear_map.ext_iff] }
@[simp] lemma dom_restrict'_apply (f : M →ₗ[R] M₂) (p : submodule R M) (x : p) :
dom_restrict' p f x = f x := rfl
/--
The family of linear maps `M₂ → M` parameterised by `f ∈ M₂ → R`, `x ∈ M`, is linear in `f`, `x`.
-/
def smul_rightₗ : (M₂ →ₗ[R] R) →ₗ[R] M →ₗ[R] M₂ →ₗ[R] M :=
{ to_fun := λ f,
{ to_fun := linear_map.smul_right f,
map_add' := λ m m', by { ext, apply smul_add, },
map_smul' := λ c m, by { ext, apply smul_comm, } },
map_add' := λ f f', by { ext, apply add_smul, },
map_smul' := λ c f, by { ext, apply mul_smul, } }
@[simp] lemma smul_rightₗ_apply (f : M₂ →ₗ[R] R) (x : M) (c : M₂) :
(smul_rightₗ : (M₂ →ₗ[R] R) →ₗ[R] M →ₗ[R] M₂ →ₗ[R] M) f x c = (f c) • x := rfl
end comm_semiring
end linear_map
/--
The `R`-linear equivalence between additive morphisms `A →+ B` and `ℕ`-linear morphisms `A →ₗ[ℕ] B`.
-/
@[simps]
def add_monoid_hom_lequiv_nat {A B : Type*} (R : Type*)
[semiring R] [add_comm_monoid A] [add_comm_monoid B] [module R B] :
(A →+ B) ≃ₗ[R] (A →ₗ[ℕ] B) :=
{ to_fun := add_monoid_hom.to_nat_linear_map,
inv_fun := linear_map.to_add_monoid_hom,
map_add' := by { intros, ext, refl },
map_smul' := by { intros, ext, refl },
left_inv := by { intros f, ext, refl },
right_inv := by { intros f, ext, refl } }
/--
The `R`-linear equivalence between additive morphisms `A →+ B` and `ℤ`-linear morphisms `A →ₗ[ℤ] B`.
-/
@[simps]
def add_monoid_hom_lequiv_int {A B : Type*} (R : Type*)
[semiring R] [add_comm_group A] [add_comm_group B] [module R B] :
(A →+ B) ≃ₗ[R] (A →ₗ[ℤ] B) :=
{ to_fun := add_monoid_hom.to_int_linear_map,
inv_fun := linear_map.to_add_monoid_hom,
map_add' := by { intros, ext, refl },
map_smul' := by { intros, ext, refl },
left_inv := by { intros f, ext, refl },
right_inv := by { intros f, ext, refl } }
/-! ### Properties of submodules -/
namespace submodule
section add_comm_monoid
variables [semiring R] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M']
variables [module R M] [module R M'] [module R₂ M₂] [module R₃ M₃]
variables {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃}
variables {σ₂₁ : R₂ →+* R}
variables [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
variables [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
variables (p p' : submodule R M) (q q' : submodule R₂ M₂)
variables (q₁ q₁' : submodule R M')
variables {r : R} {x y : M}
open set
variables {p p'}
/-- If two submodules `p` and `p'` satisfy `p ⊆ p'`, then `of_le p p'` is the linear map version of
this inclusion. -/
def of_le (h : p ≤ p') : p →ₗ[R] p' :=
p.subtype.cod_restrict p' $ λ ⟨x, hx⟩, h hx
@[simp] theorem coe_of_le (h : p ≤ p') (x : p) :
(of_le h x : M) = x := rfl
theorem of_le_apply (h : p ≤ p') (x : p) : of_le h x = ⟨x, h x.2⟩ := rfl
theorem of_le_injective (h : p ≤ p') : function.injective (of_le h) :=
λ x y h, subtype.val_injective (subtype.mk.inj h)
variables (p p')
lemma subtype_comp_of_le (p q : submodule R M) (h : p ≤ q) :
q.subtype.comp (of_le h) = p.subtype :=
by { ext ⟨b, hb⟩, refl }
variables (R)
@[simp] lemma subsingleton_iff : subsingleton (submodule R M) ↔ subsingleton M :=
have h : subsingleton (submodule R M) ↔ subsingleton (add_submonoid M),
{ rw [←subsingleton_iff_bot_eq_top, ←subsingleton_iff_bot_eq_top],
convert to_add_submonoid_eq.symm; refl, },
h.trans add_submonoid.subsingleton_iff
@[simp] lemma nontrivial_iff : nontrivial (submodule R M) ↔ nontrivial M :=
not_iff_not.mp (
(not_nontrivial_iff_subsingleton.trans $ subsingleton_iff R).trans
not_nontrivial_iff_subsingleton.symm)
variables {R}
instance [subsingleton M] : unique (submodule R M) :=
⟨⟨⊥⟩, λ a, @subsingleton.elim _ ((subsingleton_iff R).mpr ‹_›) a _⟩
instance unique' [subsingleton R] : unique (submodule R M) :=
by haveI := module.subsingleton R M; apply_instance
instance [nontrivial M] : nontrivial (submodule R M) := (nontrivial_iff R).mpr ‹_›
theorem mem_right_iff_eq_zero_of_disjoint {p p' : submodule R M} (h : disjoint p p') {x : p} :
(x:M) ∈ p' ↔ x = 0 :=
⟨λ hx, coe_eq_zero.1 $ disjoint_def.1 h x x.2 hx, λ h, h.symm ▸ p'.zero_mem⟩
theorem mem_left_iff_eq_zero_of_disjoint {p p' : submodule R M} (h : disjoint p p') {x : p'} :
(x:M) ∈ p ↔ x = 0 :=
⟨λ hx, coe_eq_zero.1 $ disjoint_def.1 h x hx x.2, λ h, h.symm ▸ p.zero_mem⟩
section
variables [ring_hom_surjective σ₁₂] {F : Type*} [sc : semilinear_map_class F σ₁₂ M M₂]
include sc
/-- The pushforward of a submodule `p ⊆ M` by `f : M → M₂` -/
def map (f : F) (p : submodule R M) : submodule R₂ M₂ :=
{ carrier := f '' p,
smul_mem' :=
begin
rintro c x ⟨y, hy, rfl⟩,
obtain ⟨a, rfl⟩ := σ₁₂.is_surjective c,
exact ⟨_, p.smul_mem a hy, map_smulₛₗ f _ _⟩,
end,
.. p.to_add_submonoid.map f }
@[simp] lemma map_coe (f : F) (p : submodule R M) :
(map f p : set M₂) = f '' p := rfl
omit sc
lemma map_to_add_submonoid (f : M →ₛₗ[σ₁₂] M₂) (p : submodule R M) :
(p.map f).to_add_submonoid = p.to_add_submonoid.map (f : M →+ M₂) :=
set_like.coe_injective rfl
lemma map_to_add_submonoid' (f : M →ₛₗ[σ₁₂] M₂) (p : submodule R M) :
(p.map f).to_add_submonoid = p.to_add_submonoid.map f :=
set_like.coe_injective rfl
include sc
@[simp] lemma mem_map {f : F} {p : submodule R M} {x : M₂} :
x ∈ map f p ↔ ∃ y, y ∈ p ∧ f y = x := iff.rfl
theorem mem_map_of_mem {f : F} {p : submodule R M} {r} (h : r ∈ p) :
f r ∈ map f p := set.mem_image_of_mem _ h
lemma apply_coe_mem_map (f : F) {p : submodule R M} (r : p) :
f r ∈ map f p := mem_map_of_mem r.prop
omit sc
@[simp] lemma map_id : map (linear_map.id : M →ₗ[R] M) p = p :=
submodule.ext $ λ a, by simp
lemma map_comp [ring_hom_surjective σ₂₃] [ring_hom_surjective σ₁₃]
(f : M →ₛₗ[σ₁₂] M₂) (g : M₂ →ₛₗ[σ₂₃] M₃)
(p : submodule R M) : map (g.comp f : M →ₛₗ[σ₁₃] M₃) p = map g (map f p) :=
set_like.coe_injective $ by simp [map_coe]; rw ← image_comp
include sc
lemma map_mono {f : F} {p p' : submodule R M} :
p ≤ p' → map f p ≤ map f p' := image_subset _
omit sc
@[simp] lemma map_zero : map (0 : M →ₛₗ[σ₁₂] M₂) p = ⊥ :=
have ∃ (x : M), x ∈ p := ⟨0, p.zero_mem⟩,
ext $ by simp [this, eq_comm]
lemma map_add_le (f g : M →ₛₗ[σ₁₂] M₂) : map (f + g) p ≤ map f p ⊔ map g p :=
begin
rintros x ⟨m, hm, rfl⟩,
exact add_mem_sup (mem_map_of_mem hm) (mem_map_of_mem hm),
end
lemma range_map_nonempty (N : submodule R M) :
(set.range (λ ϕ, submodule.map ϕ N : (M →ₛₗ[σ₁₂] M₂) → submodule R₂ M₂)).nonempty :=
⟨_, set.mem_range.mpr ⟨0, rfl⟩⟩
end
variables {F : Type*} [sc : semilinear_map_class F σ₁₂ M M₂]
include σ₂₁ sc
/-- The pushforward of a submodule by an injective linear map is
linearly equivalent to the original submodule. See also `linear_equiv.submodule_map` for a
computable version when `f` has an explicit inverse. -/
noncomputable def equiv_map_of_injective (f : F) (i : injective f)
(p : submodule R M) : p ≃ₛₗ[σ₁₂] p.map f :=
{ map_add' := by { intros, simp, refl },
map_smul' := by { intros, simp, refl },
..(equiv.set.image f p i) }
@[simp] lemma coe_equiv_map_of_injective_apply (f : F) (i : injective f)
(p : submodule R M) (x : p) :
(equiv_map_of_injective f i p x : M₂) = f x := rfl
omit σ₂₁
/-- The pullback of a submodule `p ⊆ M₂` along `f : M → M₂` -/
def comap (f : F) (p : submodule R₂ M₂) : submodule R M :=
{ carrier := f ⁻¹' p,
smul_mem' := λ a x h, by simp [p.smul_mem _ h],
.. p.to_add_submonoid.comap f }
@[simp] lemma comap_coe (f : F) (p : submodule R₂ M₂) :
(comap f p : set M) = f ⁻¹' p := rfl
@[simp] lemma mem_comap {f : F} {p : submodule R₂ M₂} :
x ∈ comap f p ↔ f x ∈ p := iff.rfl
omit sc
@[simp] lemma comap_id : comap (linear_map.id : M →ₗ[R] M) p = p :=
set_like.coe_injective rfl
lemma comap_comp (f : M →ₛₗ[σ₁₂] M₂) (g : M₂ →ₛₗ[σ₂₃] M₃)
(p : submodule R₃ M₃) : comap (g.comp f : M →ₛₗ[σ₁₃] M₃) p = comap f (comap g p) :=
rfl
include sc
lemma comap_mono {f : F} {q q' : submodule R₂ M₂} :
q ≤ q' → comap f q ≤ comap f q' := preimage_mono
omit sc
lemma le_comap_pow_of_le_comap (p : submodule R M) {f : M →ₗ[R] M} (h : p ≤ p.comap f) (k : ℕ) :
p ≤ p.comap (f^k) :=
begin
induction k with k ih,
{ simp [linear_map.one_eq_id], },
{ simp [linear_map.iterate_succ, comap_comp, h.trans (comap_mono ih)], },
end
section
variables [ring_hom_surjective σ₁₂]
include sc
lemma map_le_iff_le_comap {f : F} {p : submodule R M} {q : submodule R₂ M₂} :
map f p ≤ q ↔ p ≤ comap f q := image_subset_iff
lemma gc_map_comap (f : F) : galois_connection (map f) (comap f)
| p q := map_le_iff_le_comap
@[simp] lemma map_bot (f : F) : map f ⊥ = ⊥ :=
(gc_map_comap f).l_bot
@[simp] lemma map_sup (f : F) : map f (p ⊔ p') = map f p ⊔ map f p' :=
(gc_map_comap f : galois_connection (map f) (comap f)).l_sup
@[simp] lemma map_supr {ι : Sort*} (f : F) (p : ι → submodule R M) :
map f (⨆i, p i) = (⨆i, map f (p i)) :=
(gc_map_comap f : galois_connection (map f) (comap f)).l_supr
end
include sc
@[simp] lemma comap_top (f : F) : comap f ⊤ = ⊤ := rfl
@[simp] lemma comap_inf (f : F) : comap f (q ⊓ q') = comap f q ⊓ comap f q' := rfl
@[simp] lemma comap_infi [ring_hom_surjective σ₁₂] {ι : Sort*} (f : F)
(p : ι → submodule R₂ M₂) :
comap f (⨅i, p i) = (⨅i, comap f (p i)) :=
(gc_map_comap f : galois_connection (map f) (comap f)).u_infi
omit sc
@[simp] lemma comap_zero : comap (0 : M →ₛₗ[σ₁₂] M₂) q = ⊤ :=
ext $ by simp
include sc
lemma map_comap_le [ring_hom_surjective σ₁₂] (f : F) (q : submodule R₂ M₂) :
map f (comap f q) ≤ q :=
(gc_map_comap f).l_u_le _
lemma le_comap_map [ring_hom_surjective σ₁₂] (f : F) (p : submodule R M) :
p ≤ comap f (map f p) :=
(gc_map_comap f).le_u_l _
section galois_insertion
variables {f : F} (hf : surjective f)
variables [ring_hom_surjective σ₁₂]
include hf
/-- `map f` and `comap f` form a `galois_insertion` when `f` is surjective. -/
def gi_map_comap : galois_insertion (map f) (comap f) :=
(gc_map_comap f).to_galois_insertion
(λ S x hx, begin
rcases hf x with ⟨y, rfl⟩,
simp only [mem_map, mem_comap],
exact ⟨y, hx, rfl⟩
end)
lemma map_comap_eq_of_surjective (p : submodule R₂ M₂) : (p.comap f).map f = p :=
(gi_map_comap hf).l_u_eq _
lemma map_surjective_of_surjective : function.surjective (map f) :=
(gi_map_comap hf).l_surjective
lemma comap_injective_of_surjective : function.injective (comap f) :=
(gi_map_comap hf).u_injective
lemma map_sup_comap_of_surjective (p q : submodule R₂ M₂) :
(p.comap f ⊔ q.comap f).map f = p ⊔ q :=
(gi_map_comap hf).l_sup_u _ _
lemma map_supr_comap_of_sujective {ι : Sort*} (S : ι → submodule R₂ M₂) :
(⨆ i, (S i).comap f).map f = supr S :=
(gi_map_comap hf).l_supr_u _
lemma map_inf_comap_of_surjective (p q : submodule R₂ M₂) :
(p.comap f ⊓ q.comap f).map f = p ⊓ q :=
(gi_map_comap hf).l_inf_u _ _
lemma map_infi_comap_of_surjective {ι : Sort*} (S : ι → submodule R₂ M₂) :
(⨅ i, (S i).comap f).map f = infi S :=
(gi_map_comap hf).l_infi_u _
lemma comap_le_comap_iff_of_surjective (p q : submodule R₂ M₂) :
p.comap f ≤ q.comap f ↔ p ≤ q :=
(gi_map_comap hf).u_le_u_iff
lemma comap_strict_mono_of_surjective : strict_mono (comap f) :=
(gi_map_comap hf).strict_mono_u
end galois_insertion
section galois_coinsertion
variables [ring_hom_surjective σ₁₂] {f : F} (hf : injective f)
include hf
/-- `map f` and `comap f` form a `galois_coinsertion` when `f` is injective. -/
def gci_map_comap : galois_coinsertion (map f) (comap f) :=
(gc_map_comap f).to_galois_coinsertion
(λ S x, by simp [mem_comap, mem_map, hf.eq_iff])
lemma comap_map_eq_of_injective (p : submodule R M) : (p.map f).comap f = p :=
(gci_map_comap hf).u_l_eq _
lemma comap_surjective_of_injective : function.surjective (comap f) :=
(gci_map_comap hf).u_surjective
lemma map_injective_of_injective : function.injective (map f) :=
(gci_map_comap hf).l_injective
lemma comap_inf_map_of_injective (p q : submodule R M) : (p.map f ⊓ q.map f).comap f = p ⊓ q :=
(gci_map_comap hf).u_inf_l _ _
lemma comap_infi_map_of_injective {ι : Sort*} (S : ι → submodule R M) :
(⨅ i, (S i).map f).comap f = infi S :=
(gci_map_comap hf).u_infi_l _
lemma comap_sup_map_of_injective (p q : submodule R M) : (p.map f ⊔ q.map f).comap f = p ⊔ q :=
(gci_map_comap hf).u_sup_l _ _
lemma comap_supr_map_of_injective {ι : Sort*} (S : ι → submodule R M) :
(⨆ i, (S i).map f).comap f = supr S :=
(gci_map_comap hf).u_supr_l _
lemma map_le_map_iff_of_injective (p q : submodule R M) : p.map f ≤ q.map f ↔ p ≤ q :=
(gci_map_comap hf).l_le_l_iff
lemma map_strict_mono_of_injective : strict_mono (map f) :=
(gci_map_comap hf).strict_mono_l
end galois_coinsertion
--TODO(Mario): is there a way to prove this from order properties?
lemma map_inf_eq_map_inf_comap [ring_hom_surjective σ₁₂] {f : F}
{p : submodule R M} {p' : submodule R₂ M₂} :
map f p ⊓ p' = map f (p ⊓ comap f p') :=
le_antisymm
(by rintro _ ⟨⟨x, h₁, rfl⟩, h₂⟩; exact ⟨_, ⟨h₁, h₂⟩, rfl⟩)
(le_inf (map_mono inf_le_left) (map_le_iff_le_comap.2 inf_le_right))
omit sc
lemma map_comap_subtype : map p.subtype (comap p.subtype p') = p ⊓ p' :=
ext $ λ x, ⟨by rintro ⟨⟨_, h₁⟩, h₂, rfl⟩; exact ⟨h₁, h₂⟩, λ ⟨h₁, h₂⟩, ⟨⟨_, h₁⟩, h₂, rfl⟩⟩
lemma eq_zero_of_bot_submodule : ∀(b : (⊥ : submodule R M)), b = 0
| ⟨b', hb⟩ := subtype.eq $ show b' = 0, from (mem_bot R).1 hb
/-- The infimum of a family of invariant submodule of an endomorphism is also an invariant
submodule. -/
lemma _root_.linear_map.infi_invariant {σ : R →+* R} [ring_hom_surjective σ] {ι : Sort*}
(f : M →ₛₗ[σ] M) {p : ι → submodule R M} (hf : ∀ i, ∀ v ∈ (p i), f v ∈ p i) :
∀ v ∈ infi p, f v ∈ infi p :=
begin
have : ∀ i, (p i).map f ≤ p i,
{ rintros i - ⟨v, hv, rfl⟩,
exact hf i v hv },
suffices : (infi p).map f ≤ infi p,
{ exact λ v hv, this ⟨v, hv, rfl⟩, },
exact le_infi (λ i, (submodule.map_mono (infi_le p i)).trans (this i)),
end
end add_comm_monoid
section add_comm_group
variables [ring R] [add_comm_group M] [module R M] (p : submodule R M)
variables [add_comm_group M₂] [module R M₂]
@[simp] lemma neg_coe : -(p : set M) = p := set.ext $ λ x, p.neg_mem_iff
@[simp] protected lemma map_neg (f : M →ₗ[R] M₂) : map (-f) p = map f p :=
ext $ λ y, ⟨λ ⟨x, hx, hy⟩, hy ▸ ⟨-x, show -x ∈ p, from neg_mem hx, map_neg f x⟩,
λ ⟨x, hx, hy⟩, hy ▸ ⟨-x, show -x ∈ p, from neg_mem hx, (map_neg (-f) _).trans (neg_neg (f x))⟩⟩
end add_comm_group
end submodule
namespace submodule
variables [field K]
variables [add_comm_group V] [module K V]
variables [add_comm_group V₂] [module K V₂]
lemma comap_smul (f : V →ₗ[K] V₂) (p : submodule K V₂) (a : K) (h : a ≠ 0) :
p.comap (a • f) = p.comap f :=
by ext b; simp only [submodule.mem_comap, p.smul_mem_iff h, linear_map.smul_apply]
lemma map_smul (f : V →ₗ[K] V₂) (p : submodule K V) (a : K) (h : a ≠ 0) :
p.map (a • f) = p.map f :=
le_antisymm
begin rw [map_le_iff_le_comap, comap_smul f _ a h, ← map_le_iff_le_comap], exact le_rfl end
begin rw [map_le_iff_le_comap, ← comap_smul f _ a h, ← map_le_iff_le_comap], exact le_rfl end
lemma comap_smul' (f : V →ₗ[K] V₂) (p : submodule K V₂) (a : K) :
p.comap (a • f) = (⨅ h : a ≠ 0, p.comap f) :=
by classical; by_cases a = 0; simp [h, comap_smul]
lemma map_smul' (f : V →ₗ[K] V₂) (p : submodule K V) (a : K) :
p.map (a • f) = (⨆ h : a ≠ 0, p.map f) :=
by classical; by_cases a = 0; simp [h, map_smul]
end submodule
/-! ### Properties of linear maps -/
namespace linear_map
section add_comm_monoid
variables [semiring R] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃}
variables [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
variables [module R M] [module R₂ M₂] [module R₃ M₃]
include R
open submodule
section finsupp
variables {γ : Type*} [has_zero γ]
@[simp] lemma map_finsupp_sum (f : M →ₛₗ[σ₁₂] M₂) {t : ι →₀ γ} {g : ι → γ → M} :
f (t.sum g) = t.sum (λ i d, f (g i d)) := f.map_sum
lemma coe_finsupp_sum (t : ι →₀ γ) (g : ι → γ → M →ₛₗ[σ₁₂] M₂) :
⇑(t.sum g) = t.sum (λ i d, g i d) := coe_fn_sum _ _
@[simp] lemma finsupp_sum_apply (t : ι →₀ γ) (g : ι → γ → M →ₛₗ[σ₁₂] M₂) (b : M) :
(t.sum g) b = t.sum (λ i d, g i d b) := sum_apply _ _ _
end finsupp
section dfinsupp
open dfinsupp
variables {γ : ι → Type*} [decidable_eq ι]
section sum
variables [Π i, has_zero (γ i)] [Π i (x : γ i), decidable (x ≠ 0)]
@[simp] lemma map_dfinsupp_sum (f : M →ₛₗ[σ₁₂] M₂) {t : Π₀ i, γ i} {g : Π i, γ i → M} :
f (t.sum g) = t.sum (λ i d, f (g i d)) := f.map_sum
lemma coe_dfinsupp_sum (t : Π₀ i, γ i) (g : Π i, γ i → M →ₛₗ[σ₁₂] M₂) :
⇑(t.sum g) = t.sum (λ i d, g i d) := coe_fn_sum _ _
@[simp] lemma dfinsupp_sum_apply (t : Π₀ i, γ i) (g : Π i, γ i → M →ₛₗ[σ₁₂] M₂) (b : M) :
(t.sum g) b = t.sum (λ i d, g i d b) := sum_apply _ _ _
end sum
section sum_add_hom
variables [Π i, add_zero_class (γ i)]
@[simp] lemma map_dfinsupp_sum_add_hom (f : M →ₛₗ[σ₁₂] M₂) {t : Π₀ i, γ i} {g : Π i, γ i →+ M} :
f (sum_add_hom g t) = sum_add_hom (λ i, f.to_add_monoid_hom.comp (g i)) t :=
f.to_add_monoid_hom.map_dfinsupp_sum_add_hom _ _
end sum_add_hom
end dfinsupp
variables {σ₂₁ : R₂ →+* R} {τ₁₂ : R →+* R₂} {τ₂₃ : R₂ →+* R₃} {τ₁₃ : R →+* R₃}
variables [ring_hom_comp_triple τ₁₂ τ₂₃ τ₁₃]
theorem map_cod_restrict [ring_hom_surjective σ₂₁] (p : submodule R M) (f : M₂ →ₛₗ[σ₂₁] M) (h p') :
submodule.map (cod_restrict p f h) p' = comap p.subtype (p'.map f) :=
submodule.ext $ λ ⟨x, hx⟩, by simp [subtype.ext_iff_val]
theorem comap_cod_restrict (p : submodule R M) (f : M₂ →ₛₗ[σ₂₁] M) (hf p') :
submodule.comap (cod_restrict p f hf) p' = submodule.comap f (map p.subtype p') :=
submodule.ext $ λ x, ⟨λ h, ⟨⟨_, hf x⟩, h, rfl⟩, by rintro ⟨⟨_, _⟩, h, ⟨⟩⟩; exact h⟩
section
variables {F : Type*} [sc : semilinear_map_class F τ₁₂ M M₂]
include sc
/-- The range of a linear map `f : M → M₂` is a submodule of `M₂`.
See Note [range copy pattern]. -/
def range [ring_hom_surjective τ₁₂] (f : F) : submodule R₂ M₂ :=
(map f ⊤).copy (set.range f) set.image_univ.symm
theorem range_coe [ring_hom_surjective τ₁₂] (f : F) :
(range f : set M₂) = set.range f := rfl
omit sc
lemma range_to_add_submonoid [ring_hom_surjective τ₁₂] (f : M →ₛₗ[τ₁₂] M₂) :
f.range.to_add_submonoid = f.to_add_monoid_hom.mrange := rfl
include sc
@[simp] theorem mem_range [ring_hom_surjective τ₁₂]
{f : F} {x} : x ∈ range f ↔ ∃ y, f y = x :=
iff.rfl
lemma range_eq_map [ring_hom_surjective τ₁₂]
(f : F) : range f = map f ⊤ :=
by { ext, simp }
theorem mem_range_self [ring_hom_surjective τ₁₂]
(f : F) (x : M) : f x ∈ range f := ⟨x, rfl⟩
omit sc
@[simp] theorem range_id : range (linear_map.id : M →ₗ[R] M) = ⊤ :=
set_like.coe_injective set.range_id
theorem range_comp [ring_hom_surjective τ₁₂] [ring_hom_surjective τ₂₃] [ring_hom_surjective τ₁₃]
(f : M →ₛₗ[τ₁₂] M₂) (g : M₂ →ₛₗ[τ₂₃] M₃) :
range (g.comp f : M →ₛₗ[τ₁₃] M₃) = map g (range f) :=
set_like.coe_injective (set.range_comp g f)
theorem range_comp_le_range [ring_hom_surjective τ₂₃] [ring_hom_surjective τ₁₃]
(f : M →ₛₗ[τ₁₂] M₂) (g : M₂ →ₛₗ[τ₂₃] M₃) :
range (g.comp f : M →ₛₗ[τ₁₃] M₃) ≤ range g :=
set_like.coe_mono (set.range_comp_subset_range f g)
include sc
theorem range_eq_top [ring_hom_surjective τ₁₂] {f : F} :
range f = ⊤ ↔ surjective f :=
by rw [set_like.ext'_iff, range_coe, top_coe, set.range_iff_surjective]
lemma range_le_iff_comap [ring_hom_surjective τ₁₂] {f : F} {p : submodule R₂ M₂} :
range f ≤ p ↔ comap f p = ⊤ :=
by rw [range_eq_map, map_le_iff_le_comap, eq_top_iff]
lemma map_le_range [ring_hom_surjective τ₁₂] {f : F} {p : submodule R M} :
map f p ≤ range f :=
set_like.coe_mono (set.image_subset_range f p)
omit sc
@[simp] lemma range_neg {R : Type*} {R₂ : Type*} {M : Type*} {M₂ : Type*}
[semiring R] [ring R₂] [add_comm_monoid M] [add_comm_group M₂] [module R M] [module R₂ M₂]
{τ₁₂ : R →+* R₂} [ring_hom_surjective τ₁₂] (f : M →ₛₗ[τ₁₂] M₂) :
(-f).range = f.range :=
begin
change ((-linear_map.id : M₂ →ₗ[R₂] M₂).comp f).range = _,
rw [range_comp, submodule.map_neg, submodule.map_id],
end
end
/--
The decreasing sequence of submodules consisting of the ranges of the iterates of a linear map.
-/
@[simps]
def iterate_range (f : M →ₗ[R] M) : ℕ →o (submodule R M)ᵒᵈ :=
⟨λ n, (f ^ n).range, λ n m w x h, begin
obtain ⟨c, rfl⟩ := le_iff_exists_add.mp w,
rw linear_map.mem_range at h,
obtain ⟨m, rfl⟩ := h,
rw linear_map.mem_range,
use (f ^ c) m,
rw [pow_add, linear_map.mul_apply],
end⟩
/-- Restrict the codomain of a linear map `f` to `f.range`.
This is the bundled version of `set.range_factorization`. -/
@[reducible] def range_restrict [ring_hom_surjective τ₁₂] (f : M →ₛₗ[τ₁₂] M₂) :
M →ₛₗ[τ₁₂] f.range := f.cod_restrict f.range f.mem_range_self
/-- The range of a linear map is finite if the domain is finite.
Note: this instance can form a diamond with `subtype.fintype` in the
presence of `fintype M₂`. -/
instance fintype_range [fintype M] [decidable_eq M₂] [ring_hom_surjective τ₁₂]
(f : M →ₛₗ[τ₁₂] M₂) : fintype (range f) :=
set.fintype_range f
variables {F : Type*} [sc : semilinear_map_class F τ₁₂ M M₂]
include sc
/-- The kernel of a linear map `f : M → M₂` is defined to be `comap f ⊥`. This is equivalent to the
set of `x : M` such that `f x = 0`. The kernel is a submodule of `M`. -/
def ker (f : F) : submodule R M := comap f ⊥
@[simp] theorem mem_ker {f : F} {y} : y ∈ ker f ↔ f y = 0 := mem_bot R₂
omit sc
@[simp] theorem ker_id : ker (linear_map.id : M →ₗ[R] M) = ⊥ := rfl
include sc
@[simp] theorem map_coe_ker (f : F) (x : ker f) : f x = 0 := mem_ker.1 x.2
omit sc
lemma ker_to_add_submonoid (f : M →ₛₗ[τ₁₂] M₂) :
f.ker.to_add_submonoid = f.to_add_monoid_hom.mker := rfl
lemma comp_ker_subtype (f : M →ₛₗ[τ₁₂] M₂) : f.comp f.ker.subtype = 0 :=
linear_map.ext $ λ x, suffices f x = 0, by simp [this], mem_ker.1 x.2
theorem ker_comp (f : M →ₛₗ[τ₁₂] M₂) (g : M₂ →ₛₗ[τ₂₃] M₃) :
ker (g.comp f : M →ₛₗ[τ₁₃] M₃) = comap f (ker g) := rfl
theorem ker_le_ker_comp (f : M →ₛₗ[τ₁₂] M₂) (g : M₂ →ₛₗ[τ₂₃] M₃) :
ker f ≤ ker (g.comp f : M →ₛₗ[τ₁₃] M₃) :=
by rw ker_comp; exact comap_mono bot_le
include sc
theorem disjoint_ker {f : F} {p : submodule R M} :
disjoint p (ker f) ↔ ∀ x ∈ p, f x = 0 → x = 0 :=
by simp [disjoint_def]
theorem ker_eq_bot' {f : F} :
ker f = ⊥ ↔ (∀ m, f m = 0 → m = 0) :=
by simpa [disjoint] using @disjoint_ker _ _ _ _ _ _ _ _ _ _ _ _ _ f ⊤
omit sc
theorem ker_eq_bot_of_inverse {τ₂₁ : R₂ →+* R} [ring_hom_inv_pair τ₁₂ τ₂₁]
{f : M →ₛₗ[τ₁₂] M₂} {g : M₂ →ₛₗ[τ₂₁] M} (h : (g.comp f : M →ₗ[R] M) = id) :
ker f = ⊥ :=
ker_eq_bot'.2 $ λ m hm, by rw [← id_apply m, ← h, comp_apply, hm, g.map_zero]
include sc
lemma le_ker_iff_map [ring_hom_surjective τ₁₂] {f : F} {p : submodule R M} :
p ≤ ker f ↔ map f p = ⊥ :=
by rw [ker, eq_bot_iff, map_le_iff_le_comap]
omit sc
lemma ker_cod_restrict {τ₂₁ : R₂ →+* R} (p : submodule R M) (f : M₂ →ₛₗ[τ₂₁] M) (hf) :
ker (cod_restrict p f hf) = ker f :=
by rw [ker, comap_cod_restrict, map_bot]; refl
lemma range_cod_restrict {τ₂₁ : R₂ →+* R} [ring_hom_surjective τ₂₁] (p : submodule R M)
(f : M₂ →ₛₗ[τ₂₁] M) (hf) :
range (cod_restrict p f hf) = comap p.subtype f.range :=
by simpa only [range_eq_map] using map_cod_restrict _ _ _ _
lemma ker_restrict {p : submodule R M} {f : M →ₗ[R] M} (hf : ∀ x : M, x ∈ p → f x ∈ p) :
ker (f.restrict hf) = (f.dom_restrict p).ker :=
by rw [restrict_eq_cod_restrict_dom_restrict, ker_cod_restrict]
include sc
lemma _root_.submodule.map_comap_eq [ring_hom_surjective τ₁₂]
(f : F) (q : submodule R₂ M₂) : map f (comap f q) = range f ⊓ q :=
le_antisymm (le_inf map_le_range (map_comap_le _ _)) $
by rintro _ ⟨⟨x, _, rfl⟩, hx⟩; exact ⟨x, hx, rfl⟩
lemma _root_.submodule.map_comap_eq_self [ring_hom_surjective τ₁₂]
{f : F} {q : submodule R₂ M₂} (h : q ≤ range f) : map f (comap f q) = q :=
by rwa [submodule.map_comap_eq, inf_eq_right]
omit sc
@[simp] theorem ker_zero : ker (0 : M →ₛₗ[τ₁₂] M₂) = ⊤ :=
eq_top_iff'.2 $ λ x, by simp
@[simp] theorem range_zero [ring_hom_surjective τ₁₂] : range (0 : M →ₛₗ[τ₁₂] M₂) = ⊥ :=
by simpa only [range_eq_map] using submodule.map_zero _
theorem ker_eq_top {f : M →ₛₗ[τ₁₂] M₂} : ker f = ⊤ ↔ f = 0 :=
⟨λ h, ext $ λ x, mem_ker.1 $ h.symm ▸ trivial, λ h, h.symm ▸ ker_zero⟩
section
variables [ring_hom_surjective τ₁₂]
lemma range_le_bot_iff (f : M →ₛₗ[τ₁₂] M₂) : range f ≤ ⊥ ↔ f = 0 :=
by rw [range_le_iff_comap]; exact ker_eq_top
theorem range_eq_bot {f : M →ₛₗ[τ₁₂] M₂} : range f = ⊥ ↔ f = 0 :=
by rw [← range_le_bot_iff, le_bot_iff]
lemma range_le_ker_iff {f : M →ₛₗ[τ₁₂] M₂} {g : M₂ →ₛₗ[τ₂₃] M₃} :
range f ≤ ker g ↔ (g.comp f : M →ₛₗ[τ₁₃] M₃) = 0 :=
⟨λ h, ker_eq_top.1 $ eq_top_iff'.2 $ λ x, h $ ⟨_, rfl⟩,
λ h x hx, mem_ker.2 $ exists.elim hx $ λ y hy, by rw [←hy, ←comp_apply, h, zero_apply]⟩
include sc
theorem comap_le_comap_iff {f : F} (hf : range f = ⊤) {p p'} :
comap f p ≤ comap f p' ↔ p ≤ p' :=
⟨λ H x hx, by rcases range_eq_top.1 hf x with ⟨y, hy, rfl⟩; exact H hx, comap_mono⟩
theorem comap_injective {f : F} (hf : range f = ⊤) : injective (comap f) :=
λ p p' h, le_antisymm ((comap_le_comap_iff hf).1 (le_of_eq h))
((comap_le_comap_iff hf).1 (ge_of_eq h))
end
include sc
theorem ker_eq_bot_of_injective {f : F} (hf : injective f) : ker f = ⊥ :=
begin
have : disjoint ⊤ (ker f), by { rw [disjoint_ker, ← map_zero f], exact λ x hx H, hf H },
simpa [disjoint]
end
omit sc
/--
The increasing sequence of submodules consisting of the kernels of the iterates of a linear map.
-/
@[simps]
def iterate_ker (f : M →ₗ[R] M) : ℕ →o submodule R M :=
⟨λ n, (f ^ n).ker, λ n m w x h, begin
obtain ⟨c, rfl⟩ := le_iff_exists_add.mp w,
rw linear_map.mem_ker at h,
rw [linear_map.mem_ker, add_comm, pow_add, linear_map.mul_apply, h, linear_map.map_zero],
end⟩
end add_comm_monoid
section ring
variables [ring R] [ring R₂] [ring R₃]
variables [add_comm_group M] [add_comm_group M₂] [add_comm_group M₃]
variables [module R M] [module R₂ M₂] [module R₃ M₃]
variables {τ₁₂ : R →+* R₂} {τ₂₃ : R₂ →+* R₃} {τ₁₃ : R →+* R₃}
variables [ring_hom_comp_triple τ₁₂ τ₂₃ τ₁₃]
variables {F : Type*} [sc : semilinear_map_class F τ₁₂ M M₂]
variables {f : M →ₛₗ[τ₁₂] M₂}
include R
open submodule
lemma range_to_add_subgroup [ring_hom_surjective τ₁₂] (f : M →ₛₗ[τ₁₂] M₂) :
f.range.to_add_subgroup = f.to_add_monoid_hom.range := rfl
lemma ker_to_add_subgroup (f : M →ₛₗ[τ₁₂] M₂) :
f.ker.to_add_subgroup = f.to_add_monoid_hom.ker := rfl
theorem sub_mem_ker_iff {x y} : x - y ∈ f.ker ↔ f x = f y :=
by rw [mem_ker, map_sub, sub_eq_zero]
theorem disjoint_ker' {p : submodule R M} :
disjoint p (ker f) ↔ ∀ x y ∈ p, f x = f y → x = y :=
disjoint_ker.trans
⟨λ H x hx y hy h, eq_of_sub_eq_zero $ H _ (sub_mem hx hy) (by simp [h]),
λ H x h₁ h₂, H x h₁ 0 (zero_mem _) (by simpa using h₂)⟩
theorem inj_of_disjoint_ker {p : submodule R M}
{s : set M} (h : s ⊆ p) (hd : disjoint p (ker f)) :
∀ x y ∈ s, f x = f y → x = y :=
λ x hx y hy, disjoint_ker'.1 hd _ (h hx) _ (h hy)
theorem ker_eq_bot : ker f = ⊥ ↔ injective f :=
by simpa [disjoint] using @disjoint_ker' _ _ _ _ _ _ _ _ _ _ _ f ⊤
lemma ker_le_iff [ring_hom_surjective τ₁₂] {p : submodule R M} :
ker f ≤ p ↔ ∃ (y ∈ range f), f ⁻¹' {y} ⊆ p :=
begin
split,
{ intros h, use 0, rw [← set_like.mem_coe, f.range_coe], exact ⟨⟨0, map_zero f⟩, h⟩, },
{ rintros ⟨y, h₁, h₂⟩,
rw set_like.le_def, intros z hz, simp only [mem_ker, set_like.mem_coe] at hz,
rw [← set_like.mem_coe, f.range_coe, set.mem_range] at h₁, obtain ⟨x, hx⟩ := h₁,
have hx' : x ∈ p, { exact h₂ hx, },
have hxz : z + x ∈ p, { apply h₂, simp [hx, hz], },
suffices : z + x - x ∈ p, { simpa only [this, add_sub_cancel], },
exact p.sub_mem hxz hx', },
end
end ring
section field
variables [field K] [field K₂]
variables [add_comm_group V] [module K V]
variables [add_comm_group V₂] [module K V₂]
lemma ker_smul (f : V →ₗ[K] V₂) (a : K) (h : a ≠ 0) : ker (a • f) = ker f :=
submodule.comap_smul f _ a h
lemma ker_smul' (f : V →ₗ[K] V₂) (a : K) : ker (a • f) = ⨅(h : a ≠ 0), ker f :=
submodule.comap_smul' f _ a
lemma range_smul (f : V →ₗ[K] V₂) (a : K) (h : a ≠ 0) : range (a • f) = range f :=
by simpa only [range_eq_map] using submodule.map_smul f _ a h
lemma range_smul' (f : V →ₗ[K] V₂) (a : K) : range (a • f) = ⨆(h : a ≠ 0), range f :=
by simpa only [range_eq_map] using submodule.map_smul' f _ a
end field
end linear_map
namespace is_linear_map
lemma is_linear_map_add [semiring R] [add_comm_monoid M] [module R M] :
is_linear_map R (λ (x : M × M), x.1 + x.2) :=
begin
apply is_linear_map.mk,
{ intros x y,
simp, cc },
{ intros x y,
simp [smul_add] }
end
lemma is_linear_map_sub {R M : Type*} [semiring R] [add_comm_group M] [module R M]:
is_linear_map R (λ (x : M × M), x.1 - x.2) :=
begin
apply is_linear_map.mk,
{ intros x y,
simp [add_comm, add_left_comm, sub_eq_add_neg] },
{ intros x y,
simp [smul_sub] }
end
end is_linear_map
namespace submodule
section add_comm_monoid
variables [semiring R] [semiring R₂] [add_comm_monoid M] [add_comm_monoid M₂]
variables [module R M] [module R₂ M₂]
variables (p p' : submodule R M) (q : submodule R₂ M₂)
variables {τ₁₂ : R →+* R₂}
variables {F : Type*} [sc : semilinear_map_class F τ₁₂ M M₂]
open linear_map
include sc
@[simp] theorem map_top [ring_hom_surjective τ₁₂] (f : F) : map f ⊤ = range f :=
(range_eq_map f).symm
@[simp] theorem comap_bot (f : F) : comap f ⊥ = ker f := rfl
omit sc
@[simp] theorem ker_subtype : p.subtype.ker = ⊥ :=
ker_eq_bot_of_injective $ λ x y, subtype.ext_val
@[simp] theorem range_subtype : p.subtype.range = p :=
by simpa using map_comap_subtype p ⊤
lemma map_subtype_le (p' : submodule R p) : map p.subtype p' ≤ p :=
by simpa using (map_le_range : map p.subtype p' ≤ p.subtype.range)
/-- Under the canonical linear map from a submodule `p` to the ambient space `M`, the image of the
maximal submodule of `p` is just `p `. -/
@[simp] lemma map_subtype_top : map p.subtype (⊤ : submodule R p) = p :=
by simp
@[simp] lemma comap_subtype_eq_top {p p' : submodule R M} :
comap p.subtype p' = ⊤ ↔ p ≤ p' :=
eq_top_iff.trans $ map_le_iff_le_comap.symm.trans $ by rw [map_subtype_top]
@[simp] lemma comap_subtype_self : comap p.subtype p = ⊤ :=
comap_subtype_eq_top.2 le_rfl
@[simp] theorem ker_of_le (p p' : submodule R M) (h : p ≤ p') : (of_le h).ker = ⊥ :=
by rw [of_le, ker_cod_restrict, ker_subtype]
lemma range_of_le (p q : submodule R M) (h : p ≤ q) : (of_le h).range = comap q.subtype p :=
by rw [← map_top, of_le, linear_map.map_cod_restrict, map_top, range_subtype]
@[simp] lemma map_subtype_range_of_le {p p' : submodule R M} (h : p ≤ p') :
map p'.subtype (of_le h).range = p :=
by simp [range_of_le, map_comap_eq, h]
lemma disjoint_iff_comap_eq_bot {p q : submodule R M} :
disjoint p q ↔ comap p.subtype q = ⊥ :=
by rw [←(map_injective_of_injective (show injective p.subtype, from subtype.coe_injective)).eq_iff,
map_comap_subtype, map_bot, disjoint_iff]
/-- If `N ⊆ M` then submodules of `N` are the same as submodules of `M` contained in `N` -/
def map_subtype.rel_iso : submodule R p ≃o {p' : submodule R M // p' ≤ p} :=
{ to_fun := λ p', ⟨map p.subtype p', map_subtype_le p _⟩,
inv_fun := λ q, comap p.subtype q,
left_inv := λ p', comap_map_eq_of_injective subtype.coe_injective p',
right_inv := λ ⟨q, hq⟩, subtype.ext_val $ by simp [map_comap_subtype p, inf_of_le_right hq],
map_rel_iff' := λ p₁ p₂, subtype.coe_le_coe.symm.trans begin
dsimp,
rw [map_le_iff_le_comap,
comap_map_eq_of_injective (show injective p.subtype, from subtype.coe_injective) p₂],
end }
/-- If `p ⊆ M` is a submodule, the ordering of submodules of `p` is embedded in the ordering of
submodules of `M`. -/
def map_subtype.order_embedding : submodule R p ↪o submodule R M :=
(rel_iso.to_rel_embedding $ map_subtype.rel_iso p).trans (subtype.rel_embedding _ _)
@[simp] lemma map_subtype_embedding_eq (p' : submodule R p) :
map_subtype.order_embedding p p' = map p.subtype p' := rfl
end add_comm_monoid
end submodule
namespace linear_map
section semiring
variables [semiring R] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [module R M] [module R₂ M₂] [module R₃ M₃]
variables {τ₁₂ : R →+* R₂} {τ₂₃ : R₂ →+* R₃} {τ₁₃ : R →+* R₃}
variables [ring_hom_comp_triple τ₁₂ τ₂₃ τ₁₃]
/-- A monomorphism is injective. -/
lemma ker_eq_bot_of_cancel {f : M →ₛₗ[τ₁₂] M₂}
(h : ∀ (u v : f.ker →ₗ[R] M), f.comp u = f.comp v → u = v) : f.ker = ⊥ :=
begin
have h₁ : f.comp (0 : f.ker →ₗ[R] M) = 0 := comp_zero _,
rw [←submodule.range_subtype f.ker, ←h 0 f.ker.subtype (eq.trans h₁ (comp_ker_subtype f).symm)],
exact range_zero
end
lemma range_comp_of_range_eq_top [ring_hom_surjective τ₁₂] [ring_hom_surjective τ₂₃]
[ring_hom_surjective τ₁₃]
{f : M →ₛₗ[τ₁₂] M₂} (g : M₂ →ₛₗ[τ₂₃] M₃) (hf : range f = ⊤) :
range (g.comp f : M →ₛₗ[τ₁₃] M₃) = range g :=
by rw [range_comp, hf, submodule.map_top]
lemma ker_comp_of_ker_eq_bot (f : M →ₛₗ[τ₁₂] M₂) {g : M₂ →ₛₗ[τ₂₃] M₃}
(hg : ker g = ⊥) : ker (g.comp f : M →ₛₗ[τ₁₃] M₃) = ker f :=
by rw [ker_comp, hg, submodule.comap_bot]
section image
/-- If `O` is a submodule of `M`, and `Φ : O →ₗ M'` is a linear map,
then `(ϕ : O →ₗ M').submodule_image N` is `ϕ(N)` as a submodule of `M'` -/
def submodule_image {M' : Type*} [add_comm_monoid M'] [module R M']
{O : submodule R M} (ϕ : O →ₗ[R] M') (N : submodule R M) : submodule R M' :=
(N.comap O.subtype).map ϕ
@[simp] lemma mem_submodule_image {M' : Type*} [add_comm_monoid M'] [module R M']
{O : submodule R M} {ϕ : O →ₗ[R] M'} {N : submodule R M} {x : M'} :
x ∈ ϕ.submodule_image N ↔ ∃ y (yO : y ∈ O) (yN : y ∈ N), ϕ ⟨y, yO⟩ = x :=
begin
refine submodule.mem_map.trans ⟨_, _⟩; simp_rw submodule.mem_comap,
{ rintro ⟨⟨y, yO⟩, (yN : y ∈ N), h⟩,
exact ⟨y, yO, yN, h⟩ },
{ rintro ⟨y, yO, yN, h⟩,
exact ⟨⟨y, yO⟩, yN, h⟩ }
end
lemma mem_submodule_image_of_le {M' : Type*} [add_comm_monoid M'] [module R M']
{O : submodule R M} {ϕ : O →ₗ[R] M'} {N : submodule R M} (hNO : N ≤ O) {x : M'} :
x ∈ ϕ.submodule_image N ↔ ∃ y (yN : y ∈ N), ϕ ⟨y, hNO yN⟩ = x :=
begin
refine mem_submodule_image.trans ⟨_, _⟩,
{ rintro ⟨y, yO, yN, h⟩,
exact ⟨y, yN, h⟩ },
{ rintro ⟨y, yN, h⟩,
exact ⟨y, hNO yN, yN, h⟩ }
end
lemma submodule_image_apply_of_le {M' : Type*} [add_comm_group M'] [module R M']
{O : submodule R M} (ϕ : O →ₗ[R] M') (N : submodule R M) (hNO : N ≤ O) :
ϕ.submodule_image N = (ϕ.comp (submodule.of_le hNO)).range :=
by rw [submodule_image, range_comp, submodule.range_of_le]
end image
end semiring
end linear_map
@[simp] lemma linear_map.range_range_restrict [semiring R] [add_comm_monoid M] [add_comm_monoid M₂]
[module R M] [module R M₂] (f : M →ₗ[R] M₂) :
f.range_restrict.range = ⊤ :=
by simp [f.range_cod_restrict _]
/-! ### Linear equivalences -/
namespace linear_equiv
section add_comm_monoid
section subsingleton
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M₄]
variables [module R M] [module R₂ M₂]
variables [subsingleton M] [subsingleton M₂]
variables {σ₁₂ : R →+* R₂} {σ₂₁ : R₂ →+* R}
variables [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
include σ₂₁
/-- Between two zero modules, the zero map is an equivalence. -/
instance : has_zero (M ≃ₛₗ[σ₁₂] M₂) :=
⟨{ to_fun := 0,
inv_fun := 0,
right_inv := λ x, subsingleton.elim _ _,
left_inv := λ x, subsingleton.elim _ _,
..(0 : M →ₛₗ[σ₁₂] M₂)}⟩
omit σ₂₁
-- Even though these are implied by `subsingleton.elim` via the `unique` instance below, they're
-- nice to have as `rfl`-lemmas for `dsimp`.
include σ₂₁
@[simp] lemma zero_symm : (0 : M ≃ₛₗ[σ₁₂] M₂).symm = 0 := rfl
@[simp] lemma coe_zero : ⇑(0 : M ≃ₛₗ[σ₁₂] M₂) = 0 := rfl
lemma zero_apply (x : M) : (0 : M ≃ₛₗ[σ₁₂] M₂) x = 0 := rfl
/-- Between two zero modules, the zero map is the only equivalence. -/
instance : unique (M ≃ₛₗ[σ₁₂] M₂) :=
{ uniq := λ f, to_linear_map_injective (subsingleton.elim _ _),
default := 0 }
omit σ₂₁
end subsingleton
section
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M₄]
variables {module_M : module R M} {module_M₂ : module R₂ M₂}
variables {σ₁₂ : R →+* R₂} {σ₂₁ : R₂ →+* R}
variables {re₁₂ : ring_hom_inv_pair σ₁₂ σ₂₁} {re₂₁ : ring_hom_inv_pair σ₂₁ σ₁₂}
variables (e e' : M ≃ₛₗ[σ₁₂] M₂)
lemma map_eq_comap {p : submodule R M} :
(p.map (e : M →ₛₗ[σ₁₂] M₂) : submodule R₂ M₂) = p.comap (e.symm : M₂ →ₛₗ[σ₂₁] M) :=
set_like.coe_injective $ by simp [e.image_eq_preimage]
/-- A linear equivalence of two modules restricts to a linear equivalence from any submodule
`p` of the domain onto the image of that submodule.
This is the linear version of `add_equiv.submonoid_map` and `add_equiv.subgroup_map`.
This is `linear_equiv.of_submodule'` but with `map` on the right instead of `comap` on the left. -/
def submodule_map (p : submodule R M) :
p ≃ₛₗ[σ₁₂] ↥(p.map (e : M →ₛₗ[σ₁₂] M₂) : submodule R₂ M₂) :=
{ inv_fun := λ y, ⟨(e.symm : M₂ →ₛₗ[σ₂₁] M) y, by
{ rcases y with ⟨y', hy⟩, rw submodule.mem_map at hy, rcases hy with ⟨x, hx, hxy⟩, subst hxy,
simp only [symm_apply_apply, submodule.coe_mk, coe_coe, hx], }⟩,
left_inv := λ x, by simp only [linear_map.dom_restrict_apply, linear_map.cod_restrict_apply,
linear_map.to_fun_eq_coe, linear_equiv.coe_coe, linear_equiv.symm_apply_apply, set_like.eta],
right_inv := λ y, by { apply set_coe.ext, simp only [linear_map.dom_restrict_apply,
linear_map.cod_restrict_apply, linear_map.to_fun_eq_coe, linear_equiv.coe_coe, set_like.coe_mk,
linear_equiv.apply_symm_apply] },
..((e : M →ₛₗ[σ₁₂] M₂).dom_restrict p).cod_restrict (p.map (e : M →ₛₗ[σ₁₂] M₂))
(λ x, ⟨x, by simp only [linear_map.dom_restrict_apply, eq_self_iff_true, and_true,
set_like.coe_mem, set_like.mem_coe]⟩) }
include σ₂₁
@[simp] lemma submodule_map_apply (p : submodule R M) (x : p) :
↑(e.submodule_map p x) = e x := rfl
@[simp] lemma submodule_map_symm_apply (p : submodule R M)
(x : (p.map (e : M →ₛₗ[σ₁₂] M₂) : submodule R₂ M₂)) :
↑((e.submodule_map p).symm x) = e.symm x :=
rfl
omit σ₂₁
end
section finsupp
variables {γ : Type*}
variables [semiring R] [semiring R₂]
variables [add_comm_monoid M] [add_comm_monoid M₂]
variables [module R M] [module R₂ M₂] [has_zero γ]
variables {τ₁₂ : R →+* R₂} {τ₂₁ : R₂ →+* R}
variables [ring_hom_inv_pair τ₁₂ τ₂₁] [ring_hom_inv_pair τ₂₁ τ₁₂]
include τ₂₁
@[simp] lemma map_finsupp_sum (f : M ≃ₛₗ[τ₁₂] M₂) {t : ι →₀ γ} {g : ι → γ → M} :
f (t.sum g) = t.sum (λ i d, f (g i d)) := f.map_sum _
omit τ₂₁
end finsupp
section dfinsupp
open dfinsupp
variables [semiring R] [semiring R₂]
variables [add_comm_monoid M] [add_comm_monoid M₂]
variables [module R M] [module R₂ M₂]
variables {τ₁₂ : R →+* R₂} {τ₂₁ : R₂ →+* R}
variables [ring_hom_inv_pair τ₁₂ τ₂₁] [ring_hom_inv_pair τ₂₁ τ₁₂]
variables {γ : ι → Type*} [decidable_eq ι]
include τ₂₁
@[simp] lemma map_dfinsupp_sum [Π i, has_zero (γ i)] [Π i (x : γ i), decidable (x ≠ 0)]
(f : M ≃ₛₗ[τ₁₂] M₂) (t : Π₀ i, γ i) (g : Π i, γ i → M) :
f (t.sum g) = t.sum (λ i d, f (g i d)) := f.map_sum _
@[simp] lemma map_dfinsupp_sum_add_hom [Π i, add_zero_class (γ i)] (f : M ≃ₛₗ[τ₁₂] M₂)
(t : Π₀ i, γ i) (g : Π i, γ i →+ M) :
f (sum_add_hom g t) = sum_add_hom (λ i, f.to_add_equiv.to_add_monoid_hom.comp (g i)) t :=
f.to_add_equiv.map_dfinsupp_sum_add_hom _ _
end dfinsupp
section uncurry
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M₄]
variables (V V₂ R)
/-- Linear equivalence between a curried and uncurried function.
Differs from `tensor_product.curry`. -/
protected def curry :
(V × V₂ → R) ≃ₗ[R] (V → V₂ → R) :=
{ map_add' := λ _ _, by { ext, refl },
map_smul' := λ _ _, by { ext, refl },
.. equiv.curry _ _ _ }
@[simp] lemma coe_curry : ⇑(linear_equiv.curry R V V₂) = curry := rfl
@[simp] lemma coe_curry_symm : ⇑(linear_equiv.curry R V V₂).symm = uncurry := rfl
end uncurry
section
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M₄]
variables {module_M : module R M} {module_M₂ : module R₂ M₂} {module_M₃ : module R₃ M₃}
variables {σ₁₂ : R →+* R₂} {σ₂₁ : R₂ →+* R}
variables {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃} [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
variables {σ₃₂ : R₃ →+* R₂}
variables {re₁₂ : ring_hom_inv_pair σ₁₂ σ₂₁} {re₂₁ : ring_hom_inv_pair σ₂₁ σ₁₂}
variables {re₂₃ : ring_hom_inv_pair σ₂₃ σ₃₂} {re₃₂ : ring_hom_inv_pair σ₃₂ σ₂₃}
variables (f : M →ₛₗ[σ₁₂] M₂) (g : M₂ →ₛₗ[σ₂₁] M) (e : M ≃ₛₗ[σ₁₂] M₂) (h : M₂ →ₛₗ[σ₂₃] M₃)
variables (e'' : M₂ ≃ₛₗ[σ₂₃] M₃)
variables (p q : submodule R M)
/-- Linear equivalence between two equal submodules. -/
def of_eq (h : p = q) : p ≃ₗ[R] q :=
{ map_smul' := λ _ _, rfl, map_add' := λ _ _, rfl, .. equiv.set.of_eq (congr_arg _ h) }
variables {p q}
@[simp] lemma coe_of_eq_apply (h : p = q) (x : p) : (of_eq p q h x : M) = x := rfl
@[simp] lemma of_eq_symm (h : p = q) : (of_eq p q h).symm = of_eq q p h.symm := rfl
@[simp] lemma of_eq_rfl : of_eq p p rfl = linear_equiv.refl R p := by ext; refl
include σ₂₁
/-- A linear equivalence which maps a submodule of one module onto another, restricts to a linear
equivalence of the two submodules. -/
def of_submodules (p : submodule R M) (q : submodule R₂ M₂) (h : p.map (e : M →ₛₗ[σ₁₂] M₂) = q) :
p ≃ₛₗ[σ₁₂] q := (e.submodule_map p).trans (linear_equiv.of_eq _ _ h)
@[simp] lemma of_submodules_apply {p : submodule R M} {q : submodule R₂ M₂}
(h : p.map ↑e = q) (x : p) : ↑(e.of_submodules p q h x) = e x := rfl
@[simp] lemma of_submodules_symm_apply {p : submodule R M} {q : submodule R₂ M₂}
(h : p.map ↑e = q) (x : q) : ↑((e.of_submodules p q h).symm x) = e.symm x := rfl
include re₁₂ re₂₁
/-- A linear equivalence of two modules restricts to a linear equivalence from the preimage of any
submodule to that submodule.
This is `linear_equiv.of_submodule` but with `comap` on the left instead of `map` on the right. -/
def of_submodule' [module R M] [module R₂ M₂] (f : M ≃ₛₗ[σ₁₂] M₂) (U : submodule R₂ M₂) :
U.comap (f : M →ₛₗ[σ₁₂] M₂) ≃ₛₗ[σ₁₂] U :=
(f.symm.of_submodules _ _ f.symm.map_eq_comap).symm
lemma of_submodule'_to_linear_map [module R M] [module R₂ M₂]
(f : M ≃ₛₗ[σ₁₂] M₂) (U : submodule R₂ M₂) :
(f.of_submodule' U).to_linear_map =
(f.to_linear_map.dom_restrict _).cod_restrict _ subtype.prop :=
by { ext, refl }
@[simp]
lemma of_submodule'_apply [module R M] [module R₂ M₂]
(f : M ≃ₛₗ[σ₁₂] M₂) (U : submodule R₂ M₂) (x : U.comap (f : M →ₛₗ[σ₁₂] M₂)) :
(f.of_submodule' U x : M₂) = f (x : M) := rfl
@[simp]
lemma of_submodule'_symm_apply [module R M] [module R₂ M₂]
(f : M ≃ₛₗ[σ₁₂] M₂) (U : submodule R₂ M₂) (x : U) :
((f.of_submodule' U).symm x : M) = f.symm (x : M₂) := rfl
variable (p)
omit σ₂₁ re₁₂ re₂₁
/-- The top submodule of `M` is linearly equivalent to `M`. -/
def of_top (h : p = ⊤) : p ≃ₗ[R] M :=
{ inv_fun := λ x, ⟨x, h.symm ▸ trivial⟩,
left_inv := λ ⟨x, h⟩, rfl,
right_inv := λ x, rfl,
.. p.subtype }
@[simp] theorem of_top_apply {h} (x : p) : of_top p h x = x := rfl
@[simp] theorem coe_of_top_symm_apply {h} (x : M) : ((of_top p h).symm x : M) = x := rfl
theorem of_top_symm_apply {h} (x : M) : (of_top p h).symm x = ⟨x, h.symm ▸ trivial⟩ := rfl
include σ₂₁ re₁₂ re₂₁
/-- If a linear map has an inverse, it is a linear equivalence. -/
def of_linear (h₁ : f.comp g = linear_map.id) (h₂ : g.comp f = linear_map.id) :
M ≃ₛₗ[σ₁₂] M₂ :=
{ inv_fun := g,
left_inv := linear_map.ext_iff.1 h₂,
right_inv := linear_map.ext_iff.1 h₁,
..f }
omit σ₂₁ re₁₂ re₂₁
include σ₂₁ re₁₂ re₂₁
@[simp] theorem of_linear_apply {h₁ h₂} (x : M) : of_linear f g h₁ h₂ x = f x := rfl
omit σ₂₁ re₁₂ re₂₁
include σ₂₁ re₁₂ re₂₁
@[simp] theorem of_linear_symm_apply {h₁ h₂} (x : M₂) : (of_linear f g h₁ h₂).symm x = g x :=
rfl
omit σ₂₁ re₁₂ re₂₁
@[simp] protected theorem range : (e : M →ₛₗ[σ₁₂] M₂).range = ⊤ :=
linear_map.range_eq_top.2 e.to_equiv.surjective
include σ₂₁ re₁₂ re₂₁
@[simp] protected theorem _root_.linear_equiv_class.range [module R M] [module R₂ M₂] {F : Type*}
[semilinear_equiv_class F σ₁₂ M M₂] (e : F) : linear_map.range e = ⊤ :=
linear_map.range_eq_top.2 (equiv_like.surjective e)
lemma eq_bot_of_equiv [module R₂ M₂] (e : p ≃ₛₗ[σ₁₂] (⊥ : submodule R₂ M₂)) : p = ⊥ :=
begin
refine bot_unique (set_like.le_def.2 $ assume b hb, (submodule.mem_bot R).2 _),
rw [← p.mk_eq_zero hb, ← e.map_eq_zero_iff],
apply submodule.eq_zero_of_bot_submodule
end
omit σ₂₁ re₁₂ re₂₁
@[simp] protected theorem ker : (e : M →ₛₗ[σ₁₂] M₂).ker = ⊥ :=
linear_map.ker_eq_bot_of_injective e.to_equiv.injective
@[simp] theorem range_comp [ring_hom_surjective σ₁₂] [ring_hom_surjective σ₂₃]
[ring_hom_surjective σ₁₃] :
(h.comp (e : M →ₛₗ[σ₁₂] M₂) : M →ₛₗ[σ₁₃] M₃).range = h.range :=
linear_map.range_comp_of_range_eq_top _ e.range
include module_M
@[simp] theorem ker_comp (l : M →ₛₗ[σ₁₂] M₂) :
(((e'' : M₂ →ₛₗ[σ₂₃] M₃).comp l : M →ₛₗ[σ₁₃] M₃) : M →ₛₗ[σ₁₃] M₃).ker = l.ker :=
linear_map.ker_comp_of_ker_eq_bot _ e''.ker
omit module_M
variables {f g}
include σ₂₁
/-- An linear map `f : M →ₗ[R] M₂` with a left-inverse `g : M₂ →ₗ[R] M` defines a linear
equivalence between `M` and `f.range`.
This is a computable alternative to `linear_equiv.of_injective`, and a bidirectional version of
`linear_map.range_restrict`. -/
def of_left_inverse [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
{g : M₂ → M} (h : function.left_inverse g f) : M ≃ₛₗ[σ₁₂] f.range :=
{ to_fun := f.range_restrict,
inv_fun := g ∘ f.range.subtype,
left_inv := h,
right_inv := λ x, subtype.ext $
let ⟨x', hx'⟩ := linear_map.mem_range.mp x.prop in
show f (g x) = x, by rw [←hx', h x'],
.. f.range_restrict }
omit σ₂₁
@[simp] lemma of_left_inverse_apply [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
(h : function.left_inverse g f) (x : M) :
↑(of_left_inverse h x) = f x := rfl
include σ₂₁
@[simp] lemma of_left_inverse_symm_apply [ring_hom_inv_pair σ₁₂ σ₂₁]
[ring_hom_inv_pair σ₂₁ σ₁₂] (h : function.left_inverse g f) (x : f.range) :
(of_left_inverse h).symm x = g x := rfl
omit σ₂₁
variables (f)
/-- An `injective` linear map `f : M →ₗ[R] M₂` defines a linear equivalence
between `M` and `f.range`. See also `linear_map.of_left_inverse`. -/
noncomputable def of_injective [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
(h : injective f) : M ≃ₛₗ[σ₁₂] f.range :=
of_left_inverse $ classical.some_spec h.has_left_inverse
@[simp] theorem of_injective_apply [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
{h : injective f} (x : M) : ↑(of_injective f h x) = f x := rfl
/-- A bijective linear map is a linear equivalence. -/
noncomputable def of_bijective [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
(hf₁ : injective f) (hf₂ : surjective f) : M ≃ₛₗ[σ₁₂] M₂ :=
(of_injective f hf₁).trans (of_top _ $ linear_map.range_eq_top.2 hf₂)
@[simp] theorem of_bijective_apply [ring_hom_inv_pair σ₁₂ σ₂₁] [ring_hom_inv_pair σ₂₁ σ₁₂]
{hf₁ hf₂} (x : M) : of_bijective f hf₁ hf₂ x = f x := rfl
end
end add_comm_monoid
section add_comm_group
variables [semiring R] [semiring R₂] [semiring R₃] [semiring R₄]
variables [add_comm_group M] [add_comm_group M₂] [add_comm_group M₃] [add_comm_group M₄]
variables {module_M : module R M} {module_M₂ : module R₂ M₂}
variables {module_M₃ : module R₃ M₃} {module_M₄ : module R₄ M₄}
variables {σ₁₂ : R →+* R₂} {σ₃₄ : R₃ →+* R₄}
variables {σ₂₁ : R₂ →+* R} {σ₄₃ : R₄ →+* R₃}
variables {re₁₂ : ring_hom_inv_pair σ₁₂ σ₂₁} {re₂₁ : ring_hom_inv_pair σ₂₁ σ₁₂}
variables {re₃₄ : ring_hom_inv_pair σ₃₄ σ₄₃} {re₄₃ : ring_hom_inv_pair σ₄₃ σ₃₄}
variables (e e₁ : M ≃ₛₗ[σ₁₂] M₂) (e₂ : M₃ ≃ₛₗ[σ₃₄] M₄)
@[simp] theorem map_neg (a : M) : e (-a) = -e a := e.to_linear_map.map_neg a
@[simp] theorem map_sub (a b : M) : e (a - b) = e a - e b :=
e.to_linear_map.map_sub a b
end add_comm_group
section neg
variables (R) [semiring R] [add_comm_group M] [module R M]
/-- `x ↦ -x` as a `linear_equiv` -/
def neg : M ≃ₗ[R] M := { .. equiv.neg M, .. (-linear_map.id : M →ₗ[R] M) }
variable {R}
@[simp] lemma coe_neg : ⇑(neg R : M ≃ₗ[R] M) = -id := rfl
lemma neg_apply (x : M) : neg R x = -x := by simp
@[simp] lemma symm_neg : (neg R : M ≃ₗ[R] M).symm = neg R := rfl
end neg
section comm_semiring
variables [comm_semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [module R M] [module R M₂] [module R M₃]
open _root_.linear_map
/-- Multiplying by a unit `a` of the ring `R` is a linear equivalence. -/
def smul_of_unit (a : Rˣ) : M ≃ₗ[R] M :=
distrib_mul_action.to_linear_equiv R M a
/-- A linear isomorphism between the domains and codomains of two spaces of linear maps gives a
linear isomorphism between the two function spaces. -/
def arrow_congr {R M₁ M₂ M₂₁ M₂₂ : Sort*} [comm_semiring R]
[add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₂₁] [add_comm_monoid M₂₂]
[module R M₁] [module R M₂] [module R M₂₁] [module R M₂₂]
(e₁ : M₁ ≃ₗ[R] M₂) (e₂ : M₂₁ ≃ₗ[R] M₂₂) :
(M₁ →ₗ[R] M₂₁) ≃ₗ[R] (M₂ →ₗ[R] M₂₂) :=
{ to_fun := λ f : M₁ →ₗ[R] M₂₁, (e₂ : M₂₁ →ₗ[R] M₂₂).comp $ f.comp (e₁.symm : M₂ →ₗ[R] M₁),
inv_fun := λ f, (e₂.symm : M₂₂ →ₗ[R] M₂₁).comp $ f.comp (e₁ : M₁ →ₗ[R] M₂),
left_inv := λ f, by { ext x, simp only [symm_apply_apply, comp_app, coe_comp, coe_coe]},
right_inv := λ f, by { ext x, simp only [comp_app, apply_symm_apply, coe_comp, coe_coe]},
map_add' := λ f g, by { ext x, simp only [map_add, add_apply, comp_app, coe_comp, coe_coe]},
map_smul' := λ c f, by { ext x, simp only [smul_apply, comp_app, coe_comp, map_smulₛₗ e₂,
coe_coe]} }
@[simp] lemma arrow_congr_apply {R M₁ M₂ M₂₁ M₂₂ : Sort*} [comm_semiring R]
[add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₂₁] [add_comm_monoid M₂₂]
[module R M₁] [module R M₂] [module R M₂₁] [module R M₂₂]
(e₁ : M₁ ≃ₗ[R] M₂) (e₂ : M₂₁ ≃ₗ[R] M₂₂) (f : M₁ →ₗ[R] M₂₁) (x : M₂) :
arrow_congr e₁ e₂ f x = e₂ (f (e₁.symm x)) :=
rfl
@[simp] lemma arrow_congr_symm_apply {R M₁ M₂ M₂₁ M₂₂ : Sort*} [comm_semiring R]
[add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₂₁] [add_comm_monoid M₂₂]
[module R M₁] [module R M₂] [module R M₂₁] [module R M₂₂]
(e₁ : M₁ ≃ₗ[R] M₂) (e₂ : M₂₁ ≃ₗ[R] M₂₂) (f : M₂ →ₗ[R] M₂₂) (x : M₁) :
(arrow_congr e₁ e₂).symm f x = e₂.symm (f (e₁ x)) :=
rfl
lemma arrow_congr_comp {N N₂ N₃ : Sort*}
[add_comm_monoid N] [add_comm_monoid N₂] [add_comm_monoid N₃]
[module R N] [module R N₂] [module R N₃]
(e₁ : M ≃ₗ[R] N) (e₂ : M₂ ≃ₗ[R] N₂) (e₃ : M₃ ≃ₗ[R] N₃) (f : M →ₗ[R] M₂) (g : M₂ →ₗ[R] M₃) :
arrow_congr e₁ e₃ (g.comp f) = (arrow_congr e₂ e₃ g).comp (arrow_congr e₁ e₂ f) :=
by { ext, simp only [symm_apply_apply, arrow_congr_apply, linear_map.comp_apply], }
lemma arrow_congr_trans {M₁ M₂ M₃ N₁ N₂ N₃ : Sort*}
[add_comm_monoid M₁] [module R M₁] [add_comm_monoid M₂] [module R M₂]
[add_comm_monoid M₃] [module R M₃] [add_comm_monoid N₁] [module R N₁]
[add_comm_monoid N₂] [module R N₂] [add_comm_monoid N₃] [module R N₃]
(e₁ : M₁ ≃ₗ[R] M₂) (e₂ : N₁ ≃ₗ[R] N₂) (e₃ : M₂ ≃ₗ[R] M₃) (e₄ : N₂ ≃ₗ[R] N₃) :
(arrow_congr e₁ e₂).trans (arrow_congr e₃ e₄) = arrow_congr (e₁.trans e₃) (e₂.trans e₄) :=
rfl
/-- If `M₂` and `M₃` are linearly isomorphic then the two spaces of linear maps from `M` into `M₂`
and `M` into `M₃` are linearly isomorphic. -/
def congr_right (f : M₂ ≃ₗ[R] M₃) : (M →ₗ[R] M₂) ≃ₗ[R] (M →ₗ[R] M₃) :=
arrow_congr (linear_equiv.refl R M) f
/-- If `M` and `M₂` are linearly isomorphic then the two spaces of linear maps from `M` and `M₂` to
themselves are linearly isomorphic. -/
def conj (e : M ≃ₗ[R] M₂) : (module.End R M) ≃ₗ[R] (module.End R M₂) := arrow_congr e e
lemma conj_apply (e : M ≃ₗ[R] M₂) (f : module.End R M) :
e.conj f = ((↑e : M →ₗ[R] M₂).comp f).comp (e.symm : M₂ →ₗ[R] M) := rfl
lemma symm_conj_apply (e : M ≃ₗ[R] M₂) (f : module.End R M₂) :
e.symm.conj f = ((↑e.symm : M₂ →ₗ[R] M).comp f).comp (e : M →ₗ[R] M₂) := rfl
lemma conj_comp (e : M ≃ₗ[R] M₂) (f g : module.End R M) :
e.conj (g.comp f) = (e.conj g).comp (e.conj f) :=
arrow_congr_comp e e e f g
lemma conj_trans (e₁ : M ≃ₗ[R] M₂) (e₂ : M₂ ≃ₗ[R] M₃) :
e₁.conj.trans e₂.conj = (e₁.trans e₂).conj :=
by { ext f x, refl, }
@[simp] lemma conj_id (e : M ≃ₗ[R] M₂) : e.conj linear_map.id = linear_map.id :=
by { ext, simp [conj_apply], }
end comm_semiring
section field
variables [field K] [add_comm_group M] [add_comm_group M₂] [add_comm_group M₃]
variables [module K M] [module K M₂] [module K M₃]
variables (K) (M)
open _root_.linear_map
/-- Multiplying by a nonzero element `a` of the field `K` is a linear equivalence. -/
@[simps] def smul_of_ne_zero (a : K) (ha : a ≠ 0) : M ≃ₗ[K] M :=
smul_of_unit $ units.mk0 a ha
end field
end linear_equiv
namespace submodule
section module
variables [semiring R] [add_comm_monoid M] [module R M]
/-- Given `p` a submodule of the module `M` and `q` a submodule of `p`, `p.equiv_subtype_map q`
is the natural `linear_equiv` between `q` and `q.map p.subtype`. -/
def equiv_subtype_map (p : submodule R M) (q : submodule R p) :
q ≃ₗ[R] q.map p.subtype :=
{ inv_fun :=
begin
rintro ⟨x, hx⟩,
refine ⟨⟨x, _⟩, _⟩;
rcases hx with ⟨⟨_, h⟩, _, rfl⟩;
assumption
end,
left_inv := λ ⟨⟨_, _⟩, _⟩, rfl,
right_inv := λ ⟨x, ⟨_, h⟩, _, rfl⟩, rfl,
.. (p.subtype.dom_restrict q).cod_restrict _
begin
rintro ⟨x, hx⟩,
refine ⟨x, hx, rfl⟩,
end }
@[simp]
lemma equiv_subtype_map_apply {p : submodule R M} {q : submodule R p} (x : q) :
(p.equiv_subtype_map q x : M) = p.subtype.dom_restrict q x :=
rfl
@[simp]
lemma equiv_subtype_map_symm_apply {p : submodule R M} {q : submodule R p} (x : q.map p.subtype) :
((p.equiv_subtype_map q).symm x : M) = x :=
by { cases x, refl }
/-- If `s ≤ t`, then we can view `s` as a submodule of `t` by taking the comap
of `t.subtype`. -/
@[simps]
def comap_subtype_equiv_of_le {p q : submodule R M} (hpq : p ≤ q) :
comap q.subtype p ≃ₗ[R] p :=
{ to_fun := λ x, ⟨x, x.2⟩,
inv_fun := λ x, ⟨⟨x, hpq x.2⟩, x.2⟩,
left_inv := λ x, by simp only [coe_mk, set_like.eta, coe_coe],
right_inv := λ x, by simp only [subtype.coe_mk, set_like.eta, coe_coe],
map_add' := λ x y, rfl,
map_smul' := λ c x, rfl }
end module
end submodule
namespace submodule
variables [comm_semiring R] [comm_semiring R₂]
variables [add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module R₂ M₂]
variables [add_comm_monoid N] [add_comm_monoid N₂] [module R N] [module R N₂]
variables {τ₁₂ : R →+* R₂} {τ₂₁ : R₂ →+* R}
variables [ring_hom_inv_pair τ₁₂ τ₂₁] [ring_hom_inv_pair τ₂₁ τ₁₂]
variables (p : submodule R M) (q : submodule R₂ M₂)
variables (pₗ : submodule R N) (qₗ : submodule R N₂)
include τ₂₁
@[simp] lemma mem_map_equiv {e : M ≃ₛₗ[τ₁₂] M₂} {x : M₂} : x ∈ p.map (e : M →ₛₗ[τ₁₂] M₂) ↔
e.symm x ∈ p :=
begin
rw submodule.mem_map, split,
{ rintros ⟨y, hy, hx⟩, simp [←hx, hy], },
{ intros hx, refine ⟨e.symm x, hx, by simp⟩, },
end
omit τ₂₁
lemma map_equiv_eq_comap_symm (e : M ≃ₛₗ[τ₁₂] M₂) (K : submodule R M) :
K.map (e : M →ₛₗ[τ₁₂] M₂) = K.comap (e.symm : M₂ →ₛₗ[τ₂₁] M) :=
submodule.ext (λ _, by rw [mem_map_equiv, mem_comap, linear_equiv.coe_coe])
lemma comap_equiv_eq_map_symm (e : M ≃ₛₗ[τ₁₂] M₂) (K : submodule R₂ M₂) :
K.comap (e : M →ₛₗ[τ₁₂] M₂) = K.map (e.symm : M₂ →ₛₗ[τ₂₁] M) :=
(map_equiv_eq_comap_symm e.symm K).symm
lemma comap_le_comap_smul (fₗ : N →ₗ[R] N₂) (c : R) :
comap fₗ qₗ ≤ comap (c • fₗ) qₗ :=
begin
rw set_like.le_def,
intros m h,
change c • (fₗ m) ∈ qₗ,
change fₗ m ∈ qₗ at h,
apply qₗ.smul_mem _ h,
end
lemma inf_comap_le_comap_add (f₁ f₂ : M →ₛₗ[τ₁₂] M₂) :
comap f₁ q ⊓ comap f₂ q ≤ comap (f₁ + f₂) q :=
begin
rw set_like.le_def,
intros m h,
change f₁ m + f₂ m ∈ q,
change f₁ m ∈ q ∧ f₂ m ∈ q at h,
apply q.add_mem h.1 h.2,
end
/-- Given modules `M`, `M₂` over a commutative ring, together with submodules `p ⊆ M`, `q ⊆ M₂`,
the set of maps $\{f ∈ Hom(M, M₂) | f(p) ⊆ q \}$ is a submodule of `Hom(M, M₂)`. -/
def compatible_maps : submodule R (N →ₗ[R] N₂) :=
{ carrier := {fₗ | pₗ ≤ comap fₗ qₗ},
zero_mem' := by { change pₗ ≤ comap (0 : N →ₗ[R] N₂) qₗ, rw comap_zero, refine le_top, },
add_mem' := λ f₁ f₂ h₁ h₂, by { apply le_trans _ (inf_comap_le_comap_add qₗ f₁ f₂),
rw le_inf_iff, exact ⟨h₁, h₂⟩, },
smul_mem' := λ c fₗ h, le_trans h (comap_le_comap_smul qₗ fₗ c), }
end submodule
namespace equiv
variables [semiring R] [add_comm_monoid M] [module R M] [add_comm_monoid M₂] [module R M₂]
/-- An equivalence whose underlying function is linear is a linear equivalence. -/
def to_linear_equiv (e : M ≃ M₂) (h : is_linear_map R (e : M → M₂)) : M ≃ₗ[R] M₂ :=
{ .. e, .. h.mk' e}
end equiv
section fun_left
variables (R M) [semiring R] [add_comm_monoid M] [module R M]
variables {m n p : Type*}
namespace linear_map
/-- Given an `R`-module `M` and a function `m → n` between arbitrary types,
construct a linear map `(n → M) →ₗ[R] (m → M)` -/
def fun_left (f : m → n) : (n → M) →ₗ[R] (m → M) :=
{ to_fun := (∘ f), map_add' := λ _ _, rfl, map_smul' := λ _ _, rfl }
@[simp] theorem fun_left_apply (f : m → n) (g : n → M) (i : m) : fun_left R M f g i = g (f i) :=
rfl
@[simp] theorem fun_left_id (g : n → M) : fun_left R M _root_.id g = g :=
rfl
theorem fun_left_comp (f₁ : n → p) (f₂ : m → n) :
fun_left R M (f₁ ∘ f₂) = (fun_left R M f₂).comp (fun_left R M f₁) :=
rfl
theorem fun_left_surjective_of_injective (f : m → n) (hf : injective f) :
surjective (fun_left R M f) :=
begin
classical,
intro g,
refine ⟨λ x, if h : ∃ y, f y = x then g h.some else 0, _⟩,
{ ext,
dsimp only [fun_left_apply],
split_ifs with w,
{ congr,
exact hf w.some_spec, },
{ simpa only [not_true, exists_apply_eq_apply] using w } },
end
theorem fun_left_injective_of_surjective (f : m → n) (hf : surjective f) :
injective (fun_left R M f) :=
begin
obtain ⟨g, hg⟩ := hf.has_right_inverse,
suffices : left_inverse (fun_left R M g) (fun_left R M f),
{ exact this.injective },
intro x,
rw [←linear_map.comp_apply, ← fun_left_comp, hg.id, fun_left_id],
end
end linear_map
namespace linear_equiv
open _root_.linear_map
/-- Given an `R`-module `M` and an equivalence `m ≃ n` between arbitrary types,
construct a linear equivalence `(n → M) ≃ₗ[R] (m → M)` -/
def fun_congr_left (e : m ≃ n) : (n → M) ≃ₗ[R] (m → M) :=
linear_equiv.of_linear (fun_left R M e) (fun_left R M e.symm)
(linear_map.ext $ λ x, funext $ λ i,
by rw [id_apply, ← fun_left_comp, equiv.symm_comp_self, fun_left_id])
(linear_map.ext $ λ x, funext $ λ i,
by rw [id_apply, ← fun_left_comp, equiv.self_comp_symm, fun_left_id])
@[simp] theorem fun_congr_left_apply (e : m ≃ n) (x : n → M) :
fun_congr_left R M e x = fun_left R M e x :=
rfl
@[simp] theorem fun_congr_left_id :
fun_congr_left R M (equiv.refl n) = linear_equiv.refl R (n → M) :=
rfl
@[simp] theorem fun_congr_left_comp (e₁ : m ≃ n) (e₂ : n ≃ p) :
fun_congr_left R M (equiv.trans e₁ e₂) =
linear_equiv.trans (fun_congr_left R M e₂) (fun_congr_left R M e₁) :=
rfl
@[simp] lemma fun_congr_left_symm (e : m ≃ n) :
(fun_congr_left R M e).symm = fun_congr_left R M e.symm :=
rfl
end linear_equiv
end fun_left
namespace linear_map
variables [semiring R] [add_comm_monoid M] [module R M]
variables (R M)
/-- The group of invertible linear maps from `M` to itself -/
@[reducible] def general_linear_group := (M →ₗ[R] M)ˣ
namespace general_linear_group
variables {R M}
instance : has_coe_to_fun (general_linear_group R M) (λ _, M → M) := by apply_instance
/-- An invertible linear map `f` determines an equivalence from `M` to itself. -/
def to_linear_equiv (f : general_linear_group R M) : (M ≃ₗ[R] M) :=
{ inv_fun := f.inv.to_fun,
left_inv := λ m, show (f.inv * f.val) m = m,
by erw f.inv_val; simp,
right_inv := λ m, show (f.val * f.inv) m = m,
by erw f.val_inv; simp,
..f.val }
/-- An equivalence from `M` to itself determines an invertible linear map. -/
def of_linear_equiv (f : (M ≃ₗ[R] M)) : general_linear_group R M :=
{ val := f,
inv := (f.symm : M →ₗ[R] M),
val_inv := linear_map.ext $ λ _, f.apply_symm_apply _,
inv_val := linear_map.ext $ λ _, f.symm_apply_apply _ }
variables (R M)
/-- The general linear group on `R` and `M` is multiplicatively equivalent to the type of linear
equivalences between `M` and itself. -/
def general_linear_equiv : general_linear_group R M ≃* (M ≃ₗ[R] M) :=
{ to_fun := to_linear_equiv,
inv_fun := of_linear_equiv,
left_inv := λ f, by { ext, refl },
right_inv := λ f, by { ext, refl },
map_mul' := λ x y, by {ext, refl} }
@[simp] lemma general_linear_equiv_to_linear_map (f : general_linear_group R M) :
(general_linear_equiv R M f : M →ₗ[R] M) = f :=
by {ext, refl}
@[simp] lemma coe_fn_general_linear_equiv (f : general_linear_group R M) :
⇑(general_linear_equiv R M f) = (f : M → M) :=
rfl
end general_linear_group
end linear_map
|
7e4b25ea26a4d507c43b2e9cb14f9db141d3915b
|
8b9f17008684d796c8022dab552e42f0cb6fb347
|
/library/logic/connectives.lean
|
cfce06b3e122a294d1fe367549928cd74557d0ed
|
[
"Apache-2.0"
] |
permissive
|
chubbymaggie/lean
|
0d06ae25f9dd396306fb02190e89422ea94afd7b
|
d2c7b5c31928c98f545b16420d37842c43b4ae9a
|
refs/heads/master
| 1,611,313,622,901
| 1,430,266,839,000
| 1,430,267,083,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 8,055
|
lean
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Module: logic.connectives
Authors: Jeremy Avigad, Leonardo de Moura
The propositional connectives. See also init.datatypes and init.logic.
-/
variables {a b c d : Prop}
/- false -/
theorem false.elim {c : Prop} (H : false) : c :=
false.rec c H
/- implies -/
definition imp (a b : Prop) : Prop := a → b
theorem mt (H1 : a → b) (H2 : ¬b) : ¬a :=
assume Ha : a, absurd (H1 Ha) H2
theorem imp_true (a : Prop) : (a → true) ↔ true :=
iff.intro (assume H, trivial) (assume H H1, trivial)
theorem true_imp (a : Prop) : (true → a) ↔ a :=
iff.intro (assume H, H trivial) (assume H H1, H)
theorem imp_false (a : Prop) : (a → false) ↔ ¬ a := iff.rfl
theorem false_imp (a : Prop) : (false → a) ↔ true :=
iff.intro (assume H, trivial) (assume H H1, false.elim H1)
/- not -/
theorem not.elim (H1 : ¬a) (H2 : a) : false := H1 H2
theorem not.intro (H : a → false) : ¬a := H
theorem not_not_intro (Ha : a) : ¬¬a :=
assume Hna : ¬a, absurd Ha Hna
theorem not_not_of_not_implies (H : ¬(a → b)) : ¬¬a :=
assume Hna : ¬a, absurd (assume Ha : a, absurd Ha Hna) H
theorem not_of_not_implies (H : ¬(a → b)) : ¬b :=
assume Hb : b, absurd (assume Ha : a, Hb) H
theorem not_not_em : ¬¬(a ∨ ¬a) :=
assume not_em : ¬(a ∨ ¬a),
have Hnp : ¬a, from
assume Hp : a, absurd (or.inl Hp) not_em,
absurd (or.inr Hnp) not_em
theorem not_true : ¬ true ↔ false :=
iff.intro (assume H, H trivial) (assume H, false.elim H)
theorem not_false_iff : ¬ false ↔ true :=
iff.intro (assume H, trivial) (assume H H1, H1)
/- and -/
definition not_and_of_not_left (b : Prop) (Hna : ¬a) : ¬(a ∧ b) :=
assume H : a ∧ b, absurd (and.elim_left H) Hna
definition not_and_of_not_right (a : Prop) {b : Prop} (Hnb : ¬b) : ¬(a ∧ b) :=
assume H : a ∧ b, absurd (and.elim_right H) Hnb
theorem and.swap (H : a ∧ b) : b ∧ a :=
and.intro (and.elim_right H) (and.elim_left H)
theorem and_of_and_of_imp_of_imp (H₁ : a ∧ b) (H₂ : a → c) (H₃ : b → d) : c ∧ d :=
and.elim H₁ (assume Ha : a, assume Hb : b, and.intro (H₂ Ha) (H₃ Hb))
theorem and_of_and_of_imp_left (H₁ : a ∧ c) (H : a → b) : b ∧ c :=
and.elim H₁ (assume Ha : a, assume Hc : c, and.intro (H Ha) Hc)
theorem and_of_and_of_imp_right (H₁ : c ∧ a) (H : a → b) : c ∧ b :=
and.elim H₁ (assume Hc : c, assume Ha : a, and.intro Hc (H Ha))
theorem and.comm : a ∧ b ↔ b ∧ a :=
iff.intro (λH, and.swap H) (λH, and.swap H)
theorem and.assoc : (a ∧ b) ∧ c ↔ a ∧ (b ∧ c) :=
iff.intro
(assume H, and.intro
(and.elim_left (and.elim_left H))
(and.intro (and.elim_right (and.elim_left H)) (and.elim_right H)))
(assume H, and.intro
(and.intro (and.elim_left H) (and.elim_left (and.elim_right H)))
(and.elim_right (and.elim_right H)))
theorem and_true (a : Prop) : a ∧ true ↔ a :=
iff.intro (assume H, and.left H) (assume H, and.intro H trivial)
theorem true_and (a : Prop) : true ∧ a ↔ a :=
iff.intro (assume H, and.right H) (assume H, and.intro trivial H)
theorem and_false (a : Prop) : a ∧ false ↔ false :=
iff.intro (assume H, and.right H) (assume H, false.elim H)
theorem false_and (a : Prop) : false ∧ a ↔ false :=
iff.intro (assume H, and.left H) (assume H, false.elim H)
theorem and_self (a : Prop) : a ∧ a ↔ a :=
iff.intro (assume H, and.left H) (assume H, and.intro H H)
/- or -/
definition not_or (Hna : ¬a) (Hnb : ¬b) : ¬(a ∨ b) :=
assume H : a ∨ b, or.rec_on H
(assume Ha, absurd Ha Hna)
(assume Hb, absurd Hb Hnb)
theorem or_of_or_of_imp_of_imp (H₁ : a ∨ b) (H₂ : a → c) (H₃ : b → d) : c ∨ d :=
or.elim H₁
(assume Ha : a, or.inl (H₂ Ha))
(assume Hb : b, or.inr (H₃ Hb))
theorem or_of_or_of_imp_left (H₁ : a ∨ c) (H : a → b) : b ∨ c :=
or.elim H₁
(assume H₂ : a, or.inl (H H₂))
(assume H₂ : c, or.inr H₂)
theorem or_of_or_of_imp_right (H₁ : c ∨ a) (H : a → b) : c ∨ b :=
or.elim H₁
(assume H₂ : c, or.inl H₂)
(assume H₂ : a, or.inr (H H₂))
theorem or.elim3 (H : a ∨ b ∨ c) (Ha : a → d) (Hb : b → d) (Hc : c → d) : d :=
or.elim H Ha (assume H₂, or.elim H₂ Hb Hc)
theorem or_resolve_right (H₁ : a ∨ b) (H₂ : ¬a) : b :=
or.elim H₁ (assume Ha, absurd Ha H₂) (assume Hb, Hb)
theorem or_resolve_left (H₁ : a ∨ b) (H₂ : ¬b) : a :=
or.elim H₁ (assume Ha, Ha) (assume Hb, absurd Hb H₂)
theorem or.swap (H : a ∨ b) : b ∨ a :=
or.elim H (assume Ha, or.inr Ha) (assume Hb, or.inl Hb)
theorem or.comm : a ∨ b ↔ b ∨ a :=
iff.intro (λH, or.swap H) (λH, or.swap H)
theorem or.assoc : (a ∨ b) ∨ c ↔ a ∨ (b ∨ c) :=
iff.intro
(assume H, or.elim H
(assume H₁, or.elim H₁
(assume Ha, or.inl Ha)
(assume Hb, or.inr (or.inl Hb)))
(assume Hc, or.inr (or.inr Hc)))
(assume H, or.elim H
(assume Ha, (or.inl (or.inl Ha)))
(assume H₁, or.elim H₁
(assume Hb, or.inl (or.inr Hb))
(assume Hc, or.inr Hc)))
theorem or_true (a : Prop) : a ∨ true ↔ true :=
iff.intro (assume H, trivial) (assume H, or.inr H)
theorem true_or (a : Prop) : true ∨ a ↔ true :=
iff.intro (assume H, trivial) (assume H, or.inl H)
theorem or_false (a : Prop) : a ∨ false ↔ a :=
iff.intro
(assume H, or.elim H (assume H1 : a, H1) (assume H1 : false, false.elim H1))
(assume H, or.inl H)
theorem false_or (a : Prop) : false ∨ a ↔ a :=
iff.intro
(assume H, or.elim H (assume H1 : false, false.elim H1) (assume H1 : a, H1))
(assume H, or.inr H)
theorem or_self (a : Prop) : a ∨ a ↔ a :=
iff.intro
(assume H, or.elim H (assume H1, H1) (assume H1, H1))
(assume H, or.inl H)
/- iff -/
definition iff.def : (a ↔ b) = ((a → b) ∧ (b → a)) :=
!eq.refl
theorem iff_true (a : Prop) : (a ↔ true) ↔ a :=
iff.intro
(assume H, iff.mp' H trivial)
(assume H, iff.intro (assume H1, trivial) (assume H1, H))
theorem true_iff (a : Prop) : (true ↔ a) ↔ a :=
iff.intro
(assume H, iff.mp H trivial)
(assume H, iff.intro (assume H1, H) (assume H1, trivial))
theorem iff_false (a : Prop) : (a ↔ false) ↔ ¬ a :=
iff.intro
(assume H, and.left H)
(assume H, and.intro H (assume H1, false.elim H1))
theorem false_iff (a : Prop) : (false ↔ a) ↔ ¬ a :=
iff.intro
(assume H, and.right H)
(assume H, and.intro (assume H1, false.elim H1) H)
theorem iff_true_of_self (Ha : a) : a ↔ true :=
iff.intro (assume H, trivial) (assume H, Ha)
theorem iff_self (a : Prop) : (a ↔ a) ↔ true :=
iff_true_of_self !iff.refl
/- if-then-else -/
section
open eq.ops
variables {A : Type} {c₁ c₂ : Prop}
definition if_true (t e : A) : (if true then t else e) = t :=
if_pos trivial
definition if_false (t e : A) : (if false then t else e) = e :=
if_neg not_false
theorem if_congr_cond [H₁ : decidable c₁] [H₂ : decidable c₂] (Heq : c₁ ↔ c₂) (t e : A) :
(if c₁ then t else e) = (if c₂ then t else e) :=
decidable.rec_on H₁
(λ Hc₁ : c₁, decidable.rec_on H₂
(λ Hc₂ : c₂, if_pos Hc₁ ⬝ (if_pos Hc₂)⁻¹)
(λ Hnc₂ : ¬c₂, absurd (iff.elim_left Heq Hc₁) Hnc₂))
(λ Hnc₁ : ¬c₁, decidable.rec_on H₂
(λ Hc₂ : c₂, absurd (iff.elim_right Heq Hc₂) Hnc₁)
(λ Hnc₂ : ¬c₂, if_neg Hnc₁ ⬝ (if_neg Hnc₂)⁻¹))
theorem if_congr_aux [H₁ : decidable c₁] [H₂ : decidable c₂] {t₁ t₂ e₁ e₂ : A}
(Hc : c₁ ↔ c₂) (Ht : t₁ = t₂) (He : e₁ = e₂) :
(if c₁ then t₁ else e₁) = (if c₂ then t₂ else e₂) :=
Ht ▸ He ▸ (if_congr_cond Hc t₁ e₁)
theorem if_congr [H₁ : decidable c₁] {t₁ t₂ e₁ e₂ : A} (Hc : c₁ ↔ c₂) (Ht : t₁ = t₂)
(He : e₁ = e₂) :
(if c₁ then t₁ else e₁) = (@ite c₂ (decidable_of_decidable_of_iff H₁ Hc) A t₂ e₂) :=
assert H2 : decidable c₂, from (decidable_of_decidable_of_iff H₁ Hc),
if_congr_aux Hc Ht He
end
|
4e1c0beee939cf670e73e3a244217ba88b64abe4
|
46125763b4dbf50619e8846a1371029346f4c3db
|
/src/category_theory/functor.lean
|
08941ded3c7030d6296b9add573b5bd7f396a82f
|
[
"Apache-2.0"
] |
permissive
|
thjread/mathlib
|
a9d97612cedc2c3101060737233df15abcdb9eb1
|
7cffe2520a5518bba19227a107078d83fa725ddc
|
refs/heads/master
| 1,615,637,696,376
| 1,583,953,063,000
| 1,583,953,063,000
| 246,680,271
| 0
| 0
|
Apache-2.0
| 1,583,960,875,000
| 1,583,960,875,000
| null |
UTF-8
|
Lean
| false
| false
| 3,490
|
lean
|
/-
Copyright (c) 2017 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Tim Baumann, Stephen Morgan, Scott Morrison
Defines a functor between categories.
(As it is a 'bundled' object rather than the `is_functorial` typeclass parametrised
by the underlying function on objects, the name is capitalised.)
Introduces notations
`C ⥤ D` for the type of all functors from `C` to `D`.
(I would like a better arrow here, unfortunately ⇒ (`\functor`) is taken by core.)
-/
import category_theory.category tactic.reassoc_axiom
namespace category_theory
universes v v₁ v₂ v₃ u u₁ u₂ u₃ -- declare the `v`'s first; see `category_theory.category` for an explanation
/--
`functor C D` represents a functor between categories `C` and `D`.
To apply a functor `F` to an object use `F.obj X`, and to a morphism use `F.map f`.
The axiom `map_id_lemma` expresses preservation of identities, and
`map_comp_lemma` expresses functoriality.
-/
structure functor (C : Type u₁) [category.{v₁} C] (D : Type u₂) [category.{v₂} D] :
Type (max v₁ v₂ u₁ u₂) :=
(obj : C → D)
(map : Π {X Y : C}, (X ⟶ Y) → ((obj X) ⟶ (obj Y)))
(map_id' : ∀ (X : C), map (𝟙 X) = 𝟙 (obj X) . obviously)
(map_comp' : ∀ {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z), map (f ≫ g) = (map f) ≫ (map g) . obviously)
-- A functor is basically a function, so give ⥤ a similar precedence to → (25).
-- For example, `C × D ⥤ E` should parse as `(C × D) ⥤ E` not `C × (D ⥤ E)`.
infixr ` ⥤ `:26 := functor -- type as \func --
restate_axiom functor.map_id'
attribute [simp] functor.map_id
restate_axiom functor.map_comp'
attribute [reassoc, simp] functor.map_comp
namespace functor
section
variables (C : Type u₁) [𝒞 : category.{v₁} C]
include 𝒞
/-- `𝟭 C` is the identity functor on a category `C`. -/
protected def id : C ⥤ C :=
{ obj := λ X, X,
map := λ _ _ f, f }
notation `𝟭` := functor.id
variable {C}
@[simp] lemma id_obj (X : C) : (𝟭 C).obj X = X := rfl
@[simp] lemma id_map {X Y : C} (f : X ⟶ Y) : (𝟭 C).map f = f := rfl
end
section
variables {C : Type u₁} [𝒞 : category.{v₁} C]
{D : Type u₂} [𝒟 : category.{v₂} D]
{E : Type u₃} [ℰ : category.{v₃} E]
include 𝒞 𝒟 ℰ
/--
`F ⋙ G` is the composition of a functor `F` and a functor `G` (`F` first, then `G`).
-/
def comp (F : C ⥤ D) (G : D ⥤ E) : C ⥤ E :=
{ obj := λ X, G.obj (F.obj X),
map := λ _ _ f, G.map (F.map f) }
infixr ` ⋙ `:80 := comp
@[simp] lemma comp_obj (F : C ⥤ D) (G : D ⥤ E) (X : C) : (F ⋙ G).obj X = G.obj (F.obj X) := rfl
@[simp] lemma comp_map (F : C ⥤ D) (G : D ⥤ E) {X Y : C} (f : X ⟶ Y) :
(F ⋙ G).map f = G.map (F.map f) := rfl
omit ℰ
-- These are not simp lemmas because rewriting along equalities between functors
-- is not necessarily a good idea.
-- Natural isomorphisms are also provided in `whiskering.lean`.
protected lemma comp_id (F : C ⥤ D) : F ⋙ (𝟭 D) = F := by cases F; refl
protected lemma id_comp (F : C ⥤ D) : (𝟭 C) ⋙ F = F := by cases F; refl
end
section
variables (C : Type u₁) [𝒞 : category.{v₁} C]
include 𝒞
@[simp] def ulift_down : (ulift.{u₂} C) ⥤ C :=
{ obj := λ X, X.down,
map := λ X Y f, f }
@[simp] def ulift_up : C ⥤ (ulift.{u₂} C) :=
{ obj := λ X, ⟨ X ⟩,
map := λ X Y f, f }
end
end functor
end category_theory
|
bd003d31c6e8374ebecb44cfee1614eaf9113570
|
0845ae2ca02071debcfd4ac24be871236c01784f
|
/library/init/lean/compiler/inlineattrs.lean
|
3a14aede8714ed8203f86b6b14af89ea36ec7d80
|
[
"Apache-2.0"
] |
permissive
|
GaloisInc/lean4
|
74c267eb0e900bfaa23df8de86039483ecbd60b7
|
228ddd5fdcd98dd4e9c009f425284e86917938aa
|
refs/heads/master
| 1,643,131,356,301
| 1,562,715,572,000
| 1,562,715,572,000
| 192,390,898
| 0
| 0
| null | 1,560,792,750,000
| 1,560,792,749,000
| null |
UTF-8
|
Lean
| false
| false
| 2,535
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.lean.attributes
import init.lean.compiler.util
namespace Lean
namespace Compiler
inductive InlineAttributeKind
| inline | noinline | macroInline | inlineIfReduce
namespace InlineAttributeKind
instance : Inhabited InlineAttributeKind := ⟨InlineAttributeKind.inline⟩
protected def beq : InlineAttributeKind → InlineAttributeKind → Bool
| inline inline := true
| noinline noinline := true
| macroInline macroInline := true
| inlineIfReduce inlineIfReduce := true
| _ _ := false
instance : HasBeq InlineAttributeKind := ⟨InlineAttributeKind.beq⟩
end InlineAttributeKind
def mkInlineAttrs : IO (EnumAttributes InlineAttributeKind) :=
registerEnumAttributes `inlineAttrs
[(`inline, "mark definition to always be inlined", InlineAttributeKind.inline),
(`inlineIfReduce, "mark definition to be inlined when resultant term after reduction is not a `cases_on` application", InlineAttributeKind.inlineIfReduce),
(`noinline, "mark definition to never be inlined", InlineAttributeKind.noinline),
(`macroInline, "mark definition to always be inlined before ANF conversion", InlineAttributeKind.macroInline)]
(fun env declName _ => checkIsDefinition env declName)
@[init mkInlineAttrs]
constant inlineAttrs : EnumAttributes InlineAttributeKind := default _
private partial def hasInlineAttrAux (env : Environment) (kind : InlineAttributeKind) : Name → Bool
| n :=
/- We never inline auxiliary declarations created by eager lambda lifting -/
if isEagerLambdaLiftingName n then false
else match inlineAttrs.getValue env n with
| some k => kind == k
| none => if n.isInternal then hasInlineAttrAux n.getPrefix else false
@[export lean.has_inline_attribute_core]
def hasInlineAttribute (env : Environment) (n : Name) : Bool :=
hasInlineAttrAux env InlineAttributeKind.inline n
@[export lean.has_inline_if_reduce_attribute_core]
def hasInlineIfReduceAttribute (env : Environment) (n : Name) : Bool :=
hasInlineAttrAux env InlineAttributeKind.inlineIfReduce n
@[export lean.has_noinline_attribute_core]
def hasNoInlineAttribute (env : Environment) (n : Name) : Bool :=
hasInlineAttrAux env InlineAttributeKind.noinline n
@[export lean.has_macro_inline_attribute_core]
def hasMacroInlineAttribute (env : Environment) (n : Name) : Bool :=
hasInlineAttrAux env InlineAttributeKind.macroInline n
end Compiler
end Lean
|
a7d592511023f8f79445f2e9a63b3a0aba463a94
|
556aeb81a103e9e0ac4e1fe0ce1bc6e6161c3c5e
|
/src/starkware/cairo/common/cairo_secp/verification/verification/signature_recover_public_key_unreduced_mul_soundness.lean
|
7c4dcf8b30572eae68b8826dc679295ab4e2f775
|
[] |
permissive
|
starkware-libs/formal-proofs
|
d6b731604461bf99e6ba820e68acca62a21709e8
|
f5fa4ba6a471357fd171175183203d0b437f6527
|
refs/heads/master
| 1,691,085,444,753
| 1,690,507,386,000
| 1,690,507,386,000
| 410,476,629
| 32
| 9
|
Apache-2.0
| 1,690,506,773,000
| 1,632,639,790,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,684
|
lean
|
/-
File: signature_recover_public_key_unreduced_mul_soundness.lean
Autogenerated file.
-/
import starkware.cairo.lean.semantics.soundness.hoare
import .signature_recover_public_key_code
import ..signature_recover_public_key_spec
open tactic
open starkware.cairo.common.cairo_secp.field
open starkware.cairo.common.cairo_secp.bigint
open starkware.cairo.common.cairo_secp.constants
variables {F : Type} [field F] [decidable_eq F] [prelude_hyps F]
variable mem : F → F
variable σ : register_state F
/- starkware.cairo.common.cairo_secp.field.unreduced_mul autogenerated soundness theorem -/
theorem auto_sound_unreduced_mul
-- arguments
(a b : BigInt3 F)
-- code is in memory at σ.pc
(h_mem : mem_at mem code_unreduced_mul σ.pc)
-- input arguments on the stack
(hin_a : a = cast_BigInt3 mem (σ.fp - 8))
(hin_b : b = cast_BigInt3 mem (σ.fp - 5))
-- conclusion
: ensures_ret mem σ (λ κ τ, τ.ap = σ.ap + 17 ∧ spec_unreduced_mul mem κ a b (cast_UnreducedBigInt3 mem (τ.ap - 3))) :=
begin
apply ensures_of_ensuresb, intro νbound,
have h_mem_rec := h_mem,
unpack_memory code_unreduced_mul at h_mem with ⟨hpc0, hpc1, hpc2, hpc3, hpc4, hpc5, hpc6, hpc7, hpc8, hpc9, hpc10, hpc11, hpc12, hpc13, hpc14, hpc15, hpc16, hpc17, hpc18, hpc19⟩,
-- return
step_assert_eq hpc0 with hret0,
step_assert_eq hpc1 with hret1,
step_assert_eq hpc2 with hret2,
step_assert_eq hpc3 with hret3,
step_assert_eq hpc4 hpc5 with hret4,
step_assert_eq hpc6 with hret5,
step_assert_eq hpc7 with hret6,
step_assert_eq hpc8 with hret7,
step_assert_eq hpc9 hpc10 with hret8,
step_assert_eq hpc11 with hret9,
step_assert_eq hpc12 with hret10,
step_assert_eq hpc13 with hret11,
step_assert_eq hpc14 with hret12,
step_assert_eq hpc15 with hret13,
step_assert_eq hpc16 with hret14,
step_assert_eq hpc17 with hret15,
step_assert_eq hpc18 with hret16,
step_ret hpc19,
-- finish
step_done, use_only [rfl, rfl],
split, refl,
-- Final Proof
-- user-provided reduction
suffices auto_spec: auto_spec_unreduced_mul mem _ a b _,
{ apply sound_unreduced_mul, apply auto_spec },
-- prove the auto generated assertion
dsimp [auto_spec_unreduced_mul],
try { norm_num1 }, try { arith_simps },
try { split, linarith },
try { ensures_simps; try { simp only [add_neg_eq_sub, hin_a, hin_b] }, },
try { dsimp [cast_BigInt3, cast_UnreducedBigInt3] },
try { arith_simps }, try { simp only [hret0, hret1, hret2, hret3, hret4, hret5, hret6, hret7, hret8, hret9, hret10, hret11, hret12, hret13, hret14, hret15, hret16] },
try { arith_simps; try { split }; triv <|> refl <|> simp <|> abel; try { norm_num } },
end
|
2097559b9b2072c19e39d8d51ce55b167fec7e1f
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/measure_theory/function/l2_space.lean
|
1f500c7d371c7beb46162c7df38697fe1a01a4a7
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 9,983
|
lean
|
/-
Copyright (c) 2021 Rémy Degenne. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Rémy Degenne
-/
import analysis.inner_product_space.basic
import measure_theory.integral.set_integral
/-! # `L^2` space
If `E` is an inner product space over `𝕜` (`ℝ` or `ℂ`), then `Lp E 2 μ` (defined in `lp_space.lean`)
is also an inner product space, with inner product defined as `inner f g = ∫ a, ⟪f a, g a⟫ ∂μ`.
### Main results
* `mem_L1_inner` : for `f` and `g` in `Lp E 2 μ`, the pointwise inner product `λ x, ⟪f x, g x⟫`
belongs to `Lp 𝕜 1 μ`.
* `integrable_inner` : for `f` and `g` in `Lp E 2 μ`, the pointwise inner product `λ x, ⟪f x, g x⟫`
is integrable.
* `L2.inner_product_space` : `Lp E 2 μ` is an inner product space.
-/
noncomputable theory
open topological_space measure_theory measure_theory.Lp
open_locale nnreal ennreal measure_theory
namespace measure_theory
namespace L2
variables {α E F 𝕜 : Type*} [is_R_or_C 𝕜] [measurable_space α] {μ : measure α}
[measurable_space E] [inner_product_space 𝕜 E] [borel_space E] [second_countable_topology E]
[normed_group F] [measurable_space F] [borel_space F] [second_countable_topology F]
local notation `⟪`x`, `y`⟫` := @inner 𝕜 _ _ x y
lemma snorm_rpow_two_norm_lt_top (f : Lp F 2 μ) : snorm (λ x, ∥f x∥ ^ (2 : ℝ)) 1 μ < ∞ :=
begin
have h_two : ennreal.of_real (2 : ℝ) = 2, by simp [zero_le_one],
rw [snorm_norm_rpow f zero_lt_two, one_mul, h_two],
exact ennreal.rpow_lt_top_of_nonneg zero_le_two (Lp.snorm_ne_top f),
end
lemma snorm_inner_lt_top (f g : α →₂[μ] E) : snorm (λ (x : α), ⟪f x, g x⟫) 1 μ < ∞ :=
begin
have h : ∀ x, is_R_or_C.abs ⟪f x, g x⟫ ≤ ∥f x∥ * ∥g x∥, from λ x, abs_inner_le_norm _ _,
have h' : ∀ x, is_R_or_C.abs ⟪f x, g x⟫ ≤ is_R_or_C.abs (∥f x∥^2 + ∥g x∥^2),
{ refine λ x, le_trans (h x) _,
rw [is_R_or_C.abs_to_real, abs_eq_self.mpr],
swap, { exact add_nonneg (by simp) (by simp), },
refine le_trans _ (half_le_self (add_nonneg (sq_nonneg _) (sq_nonneg _))),
refine (le_div_iff (@zero_lt_two ℝ _ _)).mpr ((le_of_eq _).trans (two_mul_le_add_sq _ _)),
ring, },
simp_rw [← is_R_or_C.norm_eq_abs, ← real.rpow_nat_cast] at h',
refine (snorm_mono_ae (ae_of_all _ h')).trans_lt ((snorm_add_le _ _ le_rfl).trans_lt _),
{ exact (Lp.ae_measurable f).norm.pow_const _ },
{ exact (Lp.ae_measurable g).norm.pow_const _ },
simp only [nat.cast_bit0, ennreal.add_lt_top, nat.cast_one],
exact ⟨snorm_rpow_two_norm_lt_top f, snorm_rpow_two_norm_lt_top g⟩,
end
section inner_product_space
open_locale complex_conjugate
include 𝕜
instance : has_inner 𝕜 (α →₂[μ] E) := ⟨λ f g, ∫ a, ⟪f a, g a⟫ ∂μ⟩
lemma inner_def (f g : α →₂[μ] E) : ⟪f, g⟫ = ∫ a : α, ⟪f a, g a⟫ ∂μ := rfl
lemma integral_inner_eq_sq_snorm (f : α →₂[μ] E) :
∫ a, ⟪f a, f a⟫ ∂μ = ennreal.to_real ∫⁻ a, (nnnorm (f a) : ℝ≥0∞) ^ (2:ℝ) ∂μ :=
begin
simp_rw inner_self_eq_norm_sq_to_K,
norm_cast,
rw integral_eq_lintegral_of_nonneg_ae,
swap, { exact filter.eventually_of_forall (λ x, sq_nonneg _), },
swap, { exact (Lp.ae_measurable f).norm.pow_const _ },
congr,
ext1 x,
have h_two : (2 : ℝ) = ((2 : ℕ) : ℝ), by simp,
rw [← real.rpow_nat_cast _ 2, ← h_two,
← ennreal.of_real_rpow_of_nonneg (norm_nonneg _) zero_le_two, of_real_norm_eq_coe_nnnorm],
norm_cast,
end
private lemma norm_sq_eq_inner' (f : α →₂[μ] E) : ∥f∥ ^ 2 = is_R_or_C.re ⟪f, f⟫ :=
begin
have h_two : (2 : ℝ≥0∞).to_real = 2 := by simp,
rw [inner_def, integral_inner_eq_sq_snorm, norm_def, ← ennreal.to_real_pow, is_R_or_C.of_real_re,
ennreal.to_real_eq_to_real (ennreal.pow_ne_top (Lp.snorm_ne_top f)) _],
{ rw [←ennreal.rpow_nat_cast, snorm_eq_snorm' ennreal.two_ne_zero ennreal.two_ne_top, snorm',
← ennreal.rpow_mul, one_div, h_two],
simp, },
{ refine (lintegral_rpow_nnnorm_lt_top_of_snorm'_lt_top zero_lt_two _).ne,
rw [← h_two, ← snorm_eq_snorm' ennreal.two_ne_zero ennreal.two_ne_top],
exact Lp.snorm_lt_top f, },
end
lemma mem_L1_inner (f g : α →₂[μ] E) :
ae_eq_fun.mk (λ x, ⟪f x, g x⟫) ((Lp.ae_measurable f).inner (Lp.ae_measurable g)) ∈ Lp 𝕜 1 μ :=
by { simp_rw [mem_Lp_iff_snorm_lt_top, snorm_ae_eq_fun], exact snorm_inner_lt_top f g, }
lemma integrable_inner (f g : α →₂[μ] E) : integrable (λ x : α, ⟪f x, g x⟫) μ :=
(integrable_congr (ae_eq_fun.coe_fn_mk (λ x, ⟪f x, g x⟫)
((Lp.ae_measurable f).inner (Lp.ae_measurable g)))).mp
(ae_eq_fun.integrable_iff_mem_L1.mpr (mem_L1_inner f g))
private lemma add_left' (f f' g : α →₂[μ] E) : ⟪f + f', g⟫ = inner f g + inner f' g :=
begin
simp_rw [inner_def, ← integral_add (integrable_inner f g) (integrable_inner f' g),
←inner_add_left],
refine integral_congr_ae ((coe_fn_add f f').mono (λ x hx, _)),
congr,
rwa pi.add_apply at hx,
end
private lemma smul_left' (f g : α →₂[μ] E) (r : 𝕜) :
⟪r • f, g⟫ = conj r * inner f g :=
begin
rw [inner_def, inner_def, ← smul_eq_mul, ← integral_smul],
refine integral_congr_ae ((coe_fn_smul r f).mono (λ x hx, _)),
rw [smul_eq_mul, ← inner_smul_left],
congr,
rwa pi.smul_apply at hx,
end
instance inner_product_space : inner_product_space 𝕜 (α →₂[μ] E) :=
{ norm_sq_eq_inner := norm_sq_eq_inner',
conj_sym := λ _ _, by simp_rw [inner_def, ← integral_conj, inner_conj_sym],
add_left := add_left',
smul_left := smul_left', }
end inner_product_space
section indicator_const_Lp
variables (𝕜) {s : set α}
/-- The inner product in `L2` of the indicator of a set `indicator_const_Lp 2 hs hμs c` and `f` is
equal to the integral of the inner product over `s`: `∫ x in s, ⟪c, f x⟫ ∂μ`. -/
lemma inner_indicator_const_Lp_eq_set_integral_inner (f : Lp E 2 μ) (hs : measurable_set s) (c : E)
(hμs : μ s ≠ ∞) :
(⟪indicator_const_Lp 2 hs hμs c, f⟫ : 𝕜) = ∫ x in s, ⟪c, f x⟫ ∂μ :=
begin
rw [inner_def, ← integral_add_compl hs (L2.integrable_inner _ f)],
have h_left : ∫ x in s, ⟪(indicator_const_Lp 2 hs hμs c) x, f x⟫ ∂μ = ∫ x in s, ⟪c, f x⟫ ∂μ,
{ suffices h_ae_eq : ∀ᵐ x ∂μ, x ∈ s → ⟪indicator_const_Lp 2 hs hμs c x, f x⟫ = ⟪c, f x⟫,
from set_integral_congr_ae hs h_ae_eq,
have h_indicator : ∀ᵐ (x : α) ∂μ, x ∈ s → (indicator_const_Lp 2 hs hμs c x) = c,
from indicator_const_Lp_coe_fn_mem,
refine h_indicator.mono (λ x hx hxs, _),
congr,
exact hx hxs, },
have h_right : ∫ x in sᶜ, ⟪(indicator_const_Lp 2 hs hμs c) x, f x⟫ ∂μ = 0,
{ suffices h_ae_eq : ∀ᵐ x ∂μ, x ∉ s → ⟪indicator_const_Lp 2 hs hμs c x, f x⟫ = 0,
{ simp_rw ← set.mem_compl_iff at h_ae_eq,
suffices h_int_zero : ∫ x in sᶜ, inner (indicator_const_Lp 2 hs hμs c x) (f x) ∂μ
= ∫ x in sᶜ, (0 : 𝕜) ∂μ,
{ rw h_int_zero,
simp, },
exact set_integral_congr_ae hs.compl h_ae_eq, },
have h_indicator : ∀ᵐ (x : α) ∂μ, x ∉ s → (indicator_const_Lp 2 hs hμs c x) = 0,
from indicator_const_Lp_coe_fn_nmem,
refine h_indicator.mono (λ x hx hxs, _),
rw hx hxs,
exact inner_zero_left, },
rw [h_left, h_right, add_zero],
end
/-- The inner product in `L2` of the indicator of a set `indicator_const_Lp 2 hs hμs c` and `f` is
equal to the inner product of the constant `c` and the integral of `f` over `s`. -/
lemma inner_indicator_const_Lp_eq_inner_set_integral [complete_space E] [normed_space ℝ E]
(hs : measurable_set s) (hμs : μ s ≠ ∞) (c : E) (f : Lp E 2 μ) :
(⟪indicator_const_Lp 2 hs hμs c, f⟫ : 𝕜) = ⟪c, ∫ x in s, f x ∂μ⟫ :=
by rw [← integral_inner (integrable_on_Lp_of_measure_ne_top f fact_one_le_two_ennreal.elim hμs),
L2.inner_indicator_const_Lp_eq_set_integral_inner]
variables {𝕜}
/-- The inner product in `L2` of the indicator of a set `indicator_const_Lp 2 hs hμs (1 : 𝕜)` and
a real or complex function `f` is equal to the integral of `f` over `s`. -/
lemma inner_indicator_const_Lp_one (hs : measurable_set s) (hμs : μ s ≠ ∞) (f : Lp 𝕜 2 μ) :
⟪indicator_const_Lp 2 hs hμs (1 : 𝕜), f⟫ = ∫ x in s, f x ∂μ :=
by { rw L2.inner_indicator_const_Lp_eq_inner_set_integral 𝕜 hs hμs (1 : 𝕜) f, simp, }
end indicator_const_Lp
end L2
section inner_continuous
variables {α : Type*} [topological_space α] [measure_space α] [borel_space α] {𝕜 : Type*}
[is_R_or_C 𝕜]
variables (μ : measure α) [is_finite_measure μ]
open_locale bounded_continuous_function complex_conjugate
local notation `⟪`x`, `y`⟫` := @inner 𝕜 (α →₂[μ] 𝕜) _ x y
/-- For bounded continuous functions `f`, `g` on a finite-measure topological space `α`, the L^2
inner product is the integral of their pointwise inner product. -/
lemma bounded_continuous_function.inner_to_Lp (f g : α →ᵇ 𝕜) :
⟪bounded_continuous_function.to_Lp 2 μ 𝕜 f, bounded_continuous_function.to_Lp 2 μ 𝕜 g⟫
= ∫ x, conj (f x) * g x ∂μ :=
begin
apply integral_congr_ae,
have hf_ae := f.coe_fn_to_Lp μ,
have hg_ae := g.coe_fn_to_Lp μ,
filter_upwards [hf_ae, hg_ae] with _ hf hg,
rw [hf, hg],
simp
end
variables [compact_space α]
/-- For continuous functions `f`, `g` on a compact, finite-measure topological space `α`, the L^2
inner product is the integral of their pointwise inner product. -/
lemma continuous_map.inner_to_Lp (f g : C(α, 𝕜)) :
⟪continuous_map.to_Lp 2 μ 𝕜 f, continuous_map.to_Lp 2 μ 𝕜 g⟫
= ∫ x, conj (f x) * g x ∂μ :=
begin
apply integral_congr_ae,
have hf_ae := f.coe_fn_to_Lp μ,
have hg_ae := g.coe_fn_to_Lp μ,
filter_upwards [hf_ae, hg_ae] with _ hf hg,
rw [hf, hg],
simp
end
end inner_continuous
end measure_theory
|
1573e7e84c9f82a70654db2d73705129776c0e37
|
49bd2218ae088932d847f9030c8dbff1c5607bb7
|
/src/data/padics/padic_numbers.lean
|
ab356752c494abe4e58f950c151529d38b6905f9
|
[
"Apache-2.0"
] |
permissive
|
FredericLeRoux/mathlib
|
e8f696421dd3e4edc8c7edb3369421c8463d7bac
|
3645bf8fb426757e0a20af110d1fdded281d286e
|
refs/heads/master
| 1,607,062,870,732
| 1,578,513,186,000
| 1,578,513,186,000
| 231,653,181
| 0
| 0
|
Apache-2.0
| 1,578,080,327,000
| 1,578,080,326,000
| null |
UTF-8
|
Lean
| false
| false
| 32,491
|
lean
|
/-
Copyright (c) 2018 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Robert Y. Lewis
-/
import data.real.cau_seq_completion
import data.padics.padic_norm algebra.archimedean analysis.normed_space.basic
import tactic.norm_cast
/-!
# p-adic numbers
This file defines the p-adic numbers (rationals) ℚ_p as the completion of ℚ with respect to the
p-adic norm. We show that the p-adic norm on ℚ extends to ℚ_p, that ℚ is embedded in ℚ_p, and that
ℚ_p is Cauchy complete.
## Important definitions
* `padic` : the type of p-adic numbers
* `padic_norm_e` : the rational valued p-adic norm on ℚ_p
## Notation
We introduce the notation ℚ_[p] for the p-adic numbers.
## Implementation notes
Much, but not all, of this file assumes that `p` is prime. This assumption is inferred automatically
by taking (prime p) as a type class argument.
We use the same concrete Cauchy sequence construction that is used to construct ℝ. ℚ_p inherits a
field structure from this construction. The extension of the norm on ℚ to ℚ_p is *not* analogous to
extending the absolute value to ℝ, and hence the proof that ℚ_p is complete is different from the
proof that ℝ is complete.
A small special-purpose simplification tactic, `padic_index_simp`, is used to manipulate sequence
indices in the proof that the norm extends.
`padic_norm_e` is the rational-valued p-adic norm on ℚ_p. To instantiate ℚ_p as a normed field, we
must cast this into a ℝ-valued norm. The ℝ-valued norm, using notation ∥ ∥ from normed spaces, is
the canonical representation of this norm.
Coercions from ℚ to ℚ_p are set up to work with the `norm_cast` tactic.
## References
* [F. Q. Gouêva, *p-adic numbers*][gouvea1997]
* [R. Y. Lewis, *A formal proof of Hensel's lemma over the p-adic integers*][lewis2019]
* <https://en.wikipedia.org/wiki/P-adic_number>
## Tags
p-adic, p adic, padic, norm, valuation, cauchy, completion, p-adic completion
-/
noncomputable theory
open_locale classical
open nat multiplicity padic_norm cau_seq cau_seq.completion metric
/-- The type of Cauchy sequences of rationals with respect to the p-adic norm. -/
@[reducible] def padic_seq (p : ℕ) [p.prime] := cau_seq _ (padic_norm p)
namespace padic_seq
section
variables {p : ℕ} [nat.prime p]
/-- The p-adic norm of the entries of a nonzero Cauchy sequence of rationals is eventually
constant. -/
lemma stationary {f : cau_seq ℚ (padic_norm p)} (hf : ¬ f ≈ 0) :
∃ N, ∀ m n, N ≤ m → N ≤ n → padic_norm p (f n) = padic_norm p (f m) :=
have ∃ ε > 0, ∃ N1, ∀ j ≥ N1, ε ≤ padic_norm p (f j),
from cau_seq.abv_pos_of_not_lim_zero $ not_lim_zero_of_not_congr_zero hf,
let ⟨ε, hε, N1, hN1⟩ := this,
⟨N2, hN2⟩ := cau_seq.cauchy₂ f hε in
⟨ max N1 N2,
λ n m hn hm,
have padic_norm p (f n - f m) < ε, from hN2 _ _ (max_le_iff.1 hn).2 (max_le_iff.1 hm).2,
have padic_norm p (f n - f m) < padic_norm p (f n),
from lt_of_lt_of_le this $ hN1 _ (max_le_iff.1 hn).1,
have padic_norm p (f n - f m) < max (padic_norm p (f n)) (padic_norm p (f m)),
from lt_max_iff.2 (or.inl this),
begin
by_contradiction hne,
rw ←padic_norm.neg p (f m) at hne,
have hnam := add_eq_max_of_ne p hne,
rw [padic_norm.neg, max_comm] at hnam,
rw ←hnam at this,
apply _root_.lt_irrefl _ (by simp at this; exact this)
end ⟩
/-- For all n ≥ stationary_point f hf, the p-adic norm of f n is the same. -/
def stationary_point {f : padic_seq p} (hf : ¬ f ≈ 0) : ℕ :=
classical.some $ stationary hf
lemma stationary_point_spec {f : padic_seq p} (hf : ¬ f ≈ 0) :
∀ {m n}, m ≥ stationary_point hf → n ≥ stationary_point hf →
padic_norm p (f n) = padic_norm p (f m) :=
classical.some_spec $ stationary hf
/-- Since the norm of the entries of a Cauchy sequence is eventually stationary, we can lift the norm
to sequences. -/
def norm (f : padic_seq p) : ℚ :=
if hf : f ≈ 0 then 0 else padic_norm p (f (stationary_point hf))
lemma norm_zero_iff (f : padic_seq p) : f.norm = 0 ↔ f ≈ 0 :=
begin
constructor,
{ intro h,
by_contradiction hf,
unfold norm at h, split_ifs at h,
apply hf,
intros ε hε,
existsi stationary_point hf,
intros j hj,
have heq := stationary_point_spec hf (le_refl _) hj,
simpa [h, heq] },
{ intro h,
simp [norm, h] }
end
end
section embedding
open cau_seq
variables {p : ℕ} [nat.prime p]
lemma equiv_zero_of_val_eq_of_equiv_zero {f g : padic_seq p}
(h : ∀ k, padic_norm p (f k) = padic_norm p (g k)) (hf : f ≈ 0) : g ≈ 0 :=
λ ε hε, let ⟨i, hi⟩ := hf _ hε in
⟨i, λ j hj, by simpa [h] using hi _ hj⟩
lemma norm_nonzero_of_not_equiv_zero {f : padic_seq p} (hf : ¬ f ≈ 0) :
f.norm ≠ 0 :=
hf ∘ f.norm_zero_iff.1
lemma norm_eq_norm_app_of_nonzero {f : padic_seq p} (hf : ¬ f ≈ 0) :
∃ k, f.norm = padic_norm p k ∧ k ≠ 0 :=
have heq : f.norm = padic_norm p (f $ stationary_point hf), by simp [norm, hf],
⟨f $ stationary_point hf, heq,
λ h, norm_nonzero_of_not_equiv_zero hf (by simpa [h] using heq)⟩
lemma not_lim_zero_const_of_nonzero {q : ℚ} (hq : q ≠ 0) : ¬ lim_zero (const (padic_norm p) q) :=
λ h', hq $ const_lim_zero.1 h'
lemma not_equiv_zero_const_of_nonzero {q : ℚ} (hq : q ≠ 0) : ¬ (const (padic_norm p) q) ≈ 0 :=
λ h : lim_zero (const (padic_norm p) q - 0), not_lim_zero_const_of_nonzero hq $ by simpa using h
lemma norm_nonneg (f : padic_seq p) : f.norm ≥ 0 :=
if hf : f ≈ 0 then by simp [hf, norm]
else by simp [norm, hf, padic_norm.nonneg]
/-- An auxiliary lemma for manipulating sequence indices. -/
lemma lift_index_left_left {f : padic_seq p} (hf : ¬ f ≈ 0) (v2 v3 : ℕ) :
padic_norm p (f (stationary_point hf)) =
padic_norm p (f (max (stationary_point hf) (max v2 v3))) :=
let i := max (stationary_point hf) (max v2 v3) in
begin
apply stationary_point_spec hf,
{ apply le_max_left },
{ apply le_refl }
end
/-- An auxiliary lemma for manipulating sequence indices. -/
lemma lift_index_left {f : padic_seq p} (hf : ¬ f ≈ 0) (v1 v3 : ℕ) :
padic_norm p (f (stationary_point hf)) =
padic_norm p (f (max v1 (max (stationary_point hf) v3))) :=
let i := max v1 (max (stationary_point hf) v3) in
begin
apply stationary_point_spec hf,
{ apply le_trans,
{ apply le_max_left _ v3 },
{ apply le_max_right } },
{ apply le_refl }
end
/-- An auxiliary lemma for manipulating sequence indices. -/
lemma lift_index_right {f : padic_seq p} (hf : ¬ f ≈ 0) (v1 v2 : ℕ) :
padic_norm p (f (stationary_point hf)) =
padic_norm p (f (max v1 (max v2 (stationary_point hf)))) :=
let i := max v1 (max v2 (stationary_point hf)) in
begin
apply stationary_point_spec hf,
{ apply le_trans,
{ apply le_max_right v2 },
{ apply le_max_right } },
{ apply le_refl }
end
end embedding
end padic_seq
section
open padic_seq
private meta def index_simp_core (hh hf hg : expr)
(at_ : interactive.loc := interactive.loc.ns [none]) : tactic unit :=
do [v1, v2, v3] ← [hh, hf, hg].mmap
(λ n, tactic.mk_app ``stationary_point [n] <|> return n),
e1 ← tactic.mk_app ``lift_index_left_left [hh, v2, v3] <|> return `(true),
e2 ← tactic.mk_app ``lift_index_left [hf, v1, v3] <|> return `(true),
e3 ← tactic.mk_app ``lift_index_right [hg, v1, v2] <|> return `(true),
sl ← [e1, e2, e3].mfoldl (λ s e, simp_lemmas.add s e) simp_lemmas.mk,
when at_.include_goal (tactic.simp_target sl),
hs ← at_.get_locals, hs.mmap' (tactic.simp_hyp sl [])
/--
This is a special-purpose tactic that lifts padic_norm (f (stationary_point f)) to
padic_norm (f (max _ _ _)).
-/
meta def tactic.interactive.padic_index_simp (l : interactive.parse interactive.types.pexpr_list)
(at_ : interactive.parse interactive.types.location) : tactic unit :=
do [h, f, g] ← l.mmap tactic.i_to_expr,
index_simp_core h f g at_
end
namespace padic_seq
section embedding
open cau_seq
variables {p : ℕ} [hp : nat.prime p]
include hp
lemma norm_mul (f g : padic_seq p) : (f * g).norm = f.norm * g.norm :=
if hf : f ≈ 0 then
have hg : f * g ≈ 0, from mul_equiv_zero' _ hf,
by simp [hf, hg, norm]
else if hg : g ≈ 0 then
have hf : f * g ≈ 0, from mul_equiv_zero _ hg,
by simp [hf, hg, norm]
else
have hfg : ¬ f * g ≈ 0, by apply mul_not_equiv_zero; assumption,
begin
unfold norm,
split_ifs,
padic_index_simp [hfg, hf, hg],
apply padic_norm.mul
end
lemma eq_zero_iff_equiv_zero (f : padic_seq p) : mk f = 0 ↔ f ≈ 0 :=
mk_eq
lemma ne_zero_iff_nequiv_zero (f : padic_seq p) : mk f ≠ 0 ↔ ¬ f ≈ 0 :=
not_iff_not.2 (eq_zero_iff_equiv_zero _)
lemma norm_const (q : ℚ) : norm (const (padic_norm p) q) = padic_norm p q :=
if hq : q = 0 then
have (const (padic_norm p) q) ≈ 0,
by simp [hq]; apply setoid.refl (const (padic_norm p) 0),
by subst hq; simp [norm, this]
else
have ¬ (const (padic_norm p) q) ≈ 0, from not_equiv_zero_const_of_nonzero hq,
by simp [norm, this]
lemma norm_image (a : padic_seq p) (ha : ¬ a ≈ 0) :
(∃ (n : ℤ), a.norm = ↑p ^ (-n)) :=
let ⟨k, hk, hk'⟩ := norm_eq_norm_app_of_nonzero ha in
by simpa [hk] using padic_norm.image p hk'
lemma norm_one : norm (1 : padic_seq p) = 1 :=
have h1 : ¬ (1 : padic_seq p) ≈ 0, from one_not_equiv_zero _,
by simp [h1, norm, hp.one_lt]
private lemma norm_eq_of_equiv_aux {f g : padic_seq p} (hf : ¬ f ≈ 0) (hg : ¬ g ≈ 0) (hfg : f ≈ g)
(h : padic_norm p (f (stationary_point hf)) ≠ padic_norm p (g (stationary_point hg)))
(hgt : padic_norm p (f (stationary_point hf)) > padic_norm p (g (stationary_point hg))) :
false :=
begin
have hpn : padic_norm p (f (stationary_point hf)) - padic_norm p (g (stationary_point hg)) > 0,
from sub_pos_of_lt hgt,
cases hfg _ hpn with N hN,
let i := max N (max (stationary_point hf) (stationary_point hg)),
have hi : i ≥ N, from le_max_left _ _,
have hN' := hN _ hi,
padic_index_simp [N, hf, hg] at hN' h hgt,
have hpne : padic_norm p (f i) ≠ padic_norm p (-(g i)),
by rwa [ ←padic_norm.neg p (g i)] at h,
let hpnem := add_eq_max_of_ne p hpne,
have hpeq : padic_norm p ((f - g) i) = max (padic_norm p (f i)) (padic_norm p (g i)),
{ rwa padic_norm.neg at hpnem },
rw [hpeq, max_eq_left_of_lt hgt] at hN',
have : padic_norm p (f i) < padic_norm p (f i),
{ apply lt_of_lt_of_le hN', apply sub_le_self, apply padic_norm.nonneg },
exact lt_irrefl _ this
end
private lemma norm_eq_of_equiv {f g : padic_seq p} (hf : ¬ f ≈ 0) (hg : ¬ g ≈ 0) (hfg : f ≈ g) :
padic_norm p (f (stationary_point hf)) = padic_norm p (g (stationary_point hg)) :=
begin
by_contradiction h,
cases (decidable.em (padic_norm p (f (stationary_point hf)) >
padic_norm p (g (stationary_point hg))))
with hgt hngt,
{ exact norm_eq_of_equiv_aux hf hg hfg h hgt },
{ apply norm_eq_of_equiv_aux hg hf (setoid.symm hfg) (ne.symm h),
apply lt_of_le_of_ne,
apply le_of_not_gt hngt,
apply h }
end
theorem norm_equiv {f g : padic_seq p} (hfg : f ≈ g) : f.norm = g.norm :=
if hf : f ≈ 0 then
have hg : g ≈ 0, from setoid.trans (setoid.symm hfg) hf,
by simp [norm, hf, hg]
else have hg : ¬ g ≈ 0, from hf ∘ setoid.trans hfg,
by unfold norm; split_ifs; exact norm_eq_of_equiv hf hg hfg
private lemma norm_nonarchimedean_aux {f g : padic_seq p}
(hfg : ¬ f + g ≈ 0) (hf : ¬ f ≈ 0) (hg : ¬ g ≈ 0) : (f + g).norm ≤ max (f.norm) (g.norm) :=
begin
unfold norm, split_ifs,
padic_index_simp [hfg, hf, hg],
apply padic_norm.nonarchimedean
end
theorem norm_nonarchimedean (f g : padic_seq p) : (f + g).norm ≤ max (f.norm) (g.norm) :=
if hfg : f + g ≈ 0 then
have 0 ≤ max (f.norm) (g.norm), from le_max_left_of_le (norm_nonneg _),
by simpa [hfg, norm]
else if hf : f ≈ 0 then
have hfg' : f + g ≈ g,
{ change lim_zero (f - 0) at hf,
show lim_zero (f + g - g), by simpa using hf },
have hcfg : (f + g).norm = g.norm, from norm_equiv hfg',
have hcl : f.norm = 0, from (norm_zero_iff f).2 hf,
have max (f.norm) (g.norm) = g.norm,
by rw hcl; exact max_eq_right (norm_nonneg _),
by rw [this, hcfg]
else if hg : g ≈ 0 then
have hfg' : f + g ≈ f,
{ change lim_zero (g - 0) at hg,
show lim_zero (f + g - f), by simpa [add_sub_cancel'] using hg },
have hcfg : (f + g).norm = f.norm, from norm_equiv hfg',
have hcl : g.norm = 0, from (norm_zero_iff g).2 hg,
have max (f.norm) (g.norm) = f.norm,
by rw hcl; exact max_eq_left (norm_nonneg _),
by rw [this, hcfg]
else norm_nonarchimedean_aux hfg hf hg
lemma norm_eq {f g : padic_seq p} (h : ∀ k, padic_norm p (f k) = padic_norm p (g k)) :
f.norm = g.norm :=
if hf : f ≈ 0 then
have hg : g ≈ 0, from equiv_zero_of_val_eq_of_equiv_zero h hf,
by simp [hf, hg, norm]
else
have hg : ¬ g ≈ 0, from λ hg, hf $ equiv_zero_of_val_eq_of_equiv_zero (by simp [h]) hg,
begin
simp [hg, hf, norm],
let i := max (stationary_point hf) (stationary_point hg),
have hpf : padic_norm p (f (stationary_point hf)) = padic_norm p (f i),
{ apply stationary_point_spec, apply le_max_left, apply le_refl },
have hpg : padic_norm p (g (stationary_point hg)) = padic_norm p (g i),
{ apply stationary_point_spec, apply le_max_right, apply le_refl },
rw [hpf, hpg, h]
end
lemma norm_neg (a : padic_seq p) : (-a).norm = a.norm :=
norm_eq $ by simp
lemma norm_eq_of_add_equiv_zero {f g : padic_seq p} (h : f + g ≈ 0) : f.norm = g.norm :=
have lim_zero (f + g - 0), from h,
have f ≈ -g, from show lim_zero (f - (-g)), by simpa,
have f.norm = (-g).norm, from norm_equiv this,
by simpa [norm_neg] using this
lemma add_eq_max_of_ne {f g : padic_seq p} (hfgne : f.norm ≠ g.norm) :
(f + g).norm = max f.norm g.norm :=
have hfg : ¬f + g ≈ 0, from mt norm_eq_of_add_equiv_zero hfgne,
if hf : f ≈ 0 then
have lim_zero (f - 0), from hf,
have f + g ≈ g, from show lim_zero ((f + g) - g), by simpa,
have h1 : (f+g).norm = g.norm, from norm_equiv this,
have h2 : f.norm = 0, from (norm_zero_iff _).2 hf,
by rw [h1, h2]; rw max_eq_right (norm_nonneg _)
else if hg : g ≈ 0 then
have lim_zero (g - 0), from hg,
have f + g ≈ f, from show lim_zero ((f + g) - f), by rw [add_sub_cancel']; simpa,
have h1 : (f+g).norm = f.norm, from norm_equiv this,
have h2 : g.norm = 0, from (norm_zero_iff _).2 hg,
by rw [h1, h2]; rw max_eq_left (norm_nonneg _)
else
begin
unfold norm at ⊢ hfgne, split_ifs at ⊢ hfgne,
padic_index_simp [hfg, hf, hg] at ⊢ hfgne,
apply padic_norm.add_eq_max_of_ne,
simpa [hf, hg, norm] using hfgne
end
end embedding
end padic_seq
/-- The p-adic numbers `Q_[p]` are the Cauchy completion of `ℚ` with respect to the p-adic norm. -/
def padic (p : ℕ) [nat.prime p] := @cau_seq.completion.Cauchy _ _ _ _ (padic_norm p) _
notation `ℚ_[` p `]` := padic p
namespace padic
section completion
variables {p : ℕ} [nat.prime p]
/-- The discrete field structure on ℚ_p is inherited from the Cauchy completion construction. -/
instance discrete_field : discrete_field (ℚ_[p]) :=
cau_seq.completion.discrete_field
-- short circuits
instance : has_zero ℚ_[p] := by apply_instance
instance : has_one ℚ_[p] := by apply_instance
instance : has_add ℚ_[p] := by apply_instance
instance : has_mul ℚ_[p] := by apply_instance
instance : has_sub ℚ_[p] := by apply_instance
instance : has_neg ℚ_[p] := by apply_instance
instance : has_div ℚ_[p] := by apply_instance
instance : add_comm_group ℚ_[p] := by apply_instance
instance : comm_ring ℚ_[p] := by apply_instance
/-- Builds the equivalence class of a Cauchy sequence of rationals. -/
def mk : padic_seq p → ℚ_[p] := quotient.mk
end completion
section completion
variables (p : ℕ) [nat.prime p]
lemma mk_eq {f g : padic_seq p} : mk f = mk g ↔ f ≈ g := quotient.eq
/-- Embeds the rational numbers in the p-adic numbers. -/
def of_rat : ℚ → ℚ_[p] := cau_seq.completion.of_rat
@[simp] lemma of_rat_add : ∀ (x y : ℚ), of_rat p (x + y) = of_rat p x + of_rat p y :=
cau_seq.completion.of_rat_add
@[simp] lemma of_rat_neg : ∀ (x : ℚ), of_rat p (-x) = -of_rat p x :=
cau_seq.completion.of_rat_neg
@[simp] lemma of_rat_mul : ∀ (x y : ℚ), of_rat p (x * y) = of_rat p x * of_rat p y :=
cau_seq.completion.of_rat_mul
@[simp] lemma of_rat_sub : ∀ (x y : ℚ), of_rat p (x - y) = of_rat p x - of_rat p y :=
cau_seq.completion.of_rat_sub
@[simp] lemma of_rat_div : ∀ (x y : ℚ), of_rat p (x / y) = of_rat p x / of_rat p y :=
cau_seq.completion.of_rat_div
@[simp] lemma of_rat_one : of_rat p 1 = 1 := rfl
@[simp] lemma of_rat_zero : of_rat p 0 = 0 := rfl
@[simp] lemma cast_eq_of_rat_of_nat (n : ℕ) : (↑n : ℚ_[p]) = of_rat p n :=
begin
induction n with n ih,
{ refl },
{ simpa using ih }
end
-- without short circuits, this needs an increase of class.instance_max_depth
@[simp] lemma cast_eq_of_rat_of_int (n : ℤ) : ↑n = of_rat p n :=
by induction n; simp
lemma cast_eq_of_rat : ∀ (q : ℚ), (↑q : ℚ_[p]) = of_rat p q
| ⟨n, d, h1, h2⟩ :=
show ↑n / ↑d = _, from
have (⟨n, d, h1, h2⟩ : ℚ) = rat.mk n d, from rat.num_denom',
by simp [this, rat.mk_eq_div, of_rat_div]
@[move_cast] lemma coe_add : ∀ {x y : ℚ}, (↑(x + y) : ℚ_[p]) = ↑x + ↑y := by simp [cast_eq_of_rat]
@[move_cast] lemma coe_neg : ∀ {x : ℚ}, (↑(-x) : ℚ_[p]) = -↑x := by simp [cast_eq_of_rat]
@[move_cast] lemma coe_mul : ∀ {x y : ℚ}, (↑(x * y) : ℚ_[p]) = ↑x * ↑y := by simp [cast_eq_of_rat]
@[move_cast] lemma coe_sub : ∀ {x y : ℚ}, (↑(x - y) : ℚ_[p]) = ↑x - ↑y := by simp [cast_eq_of_rat]
@[move_cast] lemma coe_div : ∀ {x y : ℚ}, (↑(x / y) : ℚ_[p]) = ↑x / ↑y := by simp [cast_eq_of_rat]
@[squash_cast] lemma coe_one : (↑1 : ℚ_[p]) = 1 := rfl
@[squash_cast] lemma coe_zero : (↑0 : ℚ_[p]) = 0 := rfl
lemma const_equiv {q r : ℚ} : const (padic_norm p) q ≈ const (padic_norm p) r ↔ q = r :=
⟨ λ heq : lim_zero (const (padic_norm p) (q - r)),
eq_of_sub_eq_zero $ const_lim_zero.1 heq,
λ heq, by rw heq; apply setoid.refl _ ⟩
lemma of_rat_eq {q r : ℚ} : of_rat p q = of_rat p r ↔ q = r :=
⟨(const_equiv p).1 ∘ quotient.eq.1, λ h, by rw h⟩
@[elim_cast] lemma coe_inj {q r : ℚ} : (↑q : ℚ_[p]) = ↑r ↔ q = r :=
by simp [cast_eq_of_rat, of_rat_eq]
instance : char_zero ℚ_[p] :=
⟨λ m n, by { rw ← rat.cast_coe_nat, norm_cast }⟩
end completion
end padic
/-- The rational-valued p-adic norm on ℚ_p is lifted from the norm on Cauchy sequences. The
canonical form of this function is the normed space instance, with notation `∥ ∥`. -/
def padic_norm_e {p : ℕ} [hp : nat.prime p] : ℚ_[p] → ℚ :=
quotient.lift padic_seq.norm $ @padic_seq.norm_equiv _ _
namespace padic_norm_e
section embedding
open padic_seq
variables {p : ℕ} [nat.prime p]
lemma defn (f : padic_seq p) {ε : ℚ} (hε : ε > 0) : ∃ N, ∀ i ≥ N, padic_norm_e (⟦f⟧ - f i) < ε :=
begin
simp only [padic.cast_eq_of_rat],
change ∃ N, ∀ i ≥ N, (f - const _ (f i)).norm < ε,
by_contradiction h,
cases cauchy₂ f hε with N hN,
have : ∀ N, ∃ i ≥ N, (f - const _ (f i)).norm ≥ ε,
by simpa [not_forall] using h,
rcases this N with ⟨i, hi, hge⟩,
have hne : ¬ (f - const (padic_norm p) (f i)) ≈ 0,
{ intro h, unfold padic_seq.norm at hge; split_ifs at hge, exact not_lt_of_ge hge hε },
unfold padic_seq.norm at hge; split_ifs at hge,
apply not_le_of_gt _ hge,
cases decidable.em ((stationary_point hne) ≥ N) with hgen hngen,
{ apply hN; assumption },
{ have := stationary_point_spec hne (le_refl _) (le_of_not_le hngen),
rw ←this,
apply hN,
apply le_refl, assumption }
end
protected lemma nonneg (q : ℚ_[p]) : padic_norm_e q ≥ 0 :=
quotient.induction_on q $ norm_nonneg
lemma zero_def : (0 : ℚ_[p]) = ⟦0⟧ := rfl
lemma zero_iff (q : ℚ_[p]) : padic_norm_e q = 0 ↔ q = 0 :=
quotient.induction_on q $
by simpa only [zero_def, quotient.eq] using norm_zero_iff
@[simp] protected lemma zero : padic_norm_e (0 : ℚ_[p]) = 0 :=
(zero_iff _).2 rfl
/-- Theorems about `padic_norm_e` are named with a `'` so the names do not conflict with the
equivalent theorems about `norm` (`∥ ∥`). -/
@[simp] protected lemma one' : padic_norm_e (1 : ℚ_[p]) = 1 :=
norm_one
@[simp] protected lemma neg (q : ℚ_[p]) : padic_norm_e (-q) = padic_norm_e q :=
quotient.induction_on q $ norm_neg
/-- Theorems about `padic_norm_e` are named with a `'` so the names do not conflict with the
equivalent theorems about `norm` (`∥ ∥`). -/
theorem nonarchimedean' (q r : ℚ_[p]) :
padic_norm_e (q + r) ≤ max (padic_norm_e q) (padic_norm_e r) :=
quotient.induction_on₂ q r $ norm_nonarchimedean
/-- Theorems about `padic_norm_e` are named with a `'` so the names do not conflict with the
equivalent theorems about `norm` (`∥ ∥`). -/
theorem add_eq_max_of_ne' {q r : ℚ_[p]} :
padic_norm_e q ≠ padic_norm_e r → padic_norm_e (q + r) = max (padic_norm_e q) (padic_norm_e r) :=
quotient.induction_on₂ q r $ λ _ _, padic_seq.add_eq_max_of_ne
lemma triangle_ineq (x y z : ℚ_[p]) :
padic_norm_e (x - z) ≤ padic_norm_e (x - y) + padic_norm_e (y - z) :=
calc padic_norm_e (x - z) = padic_norm_e ((x - y) + (y - z)) : by rw sub_add_sub_cancel
... ≤ max (padic_norm_e (x - y)) (padic_norm_e (y - z)) : padic_norm_e.nonarchimedean' _ _
... ≤ padic_norm_e (x - y) + padic_norm_e (y - z) :
max_le_add_of_nonneg (padic_norm_e.nonneg _) (padic_norm_e.nonneg _)
protected lemma add (q r : ℚ_[p]) : padic_norm_e (q + r) ≤ (padic_norm_e q) + (padic_norm_e r) :=
calc
padic_norm_e (q + r) ≤ max (padic_norm_e q) (padic_norm_e r) : nonarchimedean' _ _
... ≤ (padic_norm_e q) + (padic_norm_e r) :
max_le_add_of_nonneg (padic_norm_e.nonneg _) (padic_norm_e.nonneg _)
protected lemma mul' (q r : ℚ_[p]) : padic_norm_e (q * r) = (padic_norm_e q) * (padic_norm_e r) :=
quotient.induction_on₂ q r $ norm_mul
instance : is_absolute_value (@padic_norm_e p _) :=
{ abv_nonneg := padic_norm_e.nonneg,
abv_eq_zero := zero_iff,
abv_add := padic_norm_e.add,
abv_mul := padic_norm_e.mul' }
@[simp] lemma eq_padic_norm' (q : ℚ) : padic_norm_e (padic.of_rat p q) = padic_norm p q :=
norm_const _
protected theorem image' {q : ℚ_[p]} : q ≠ 0 → ∃ n : ℤ, padic_norm_e q = p ^ (-n) :=
quotient.induction_on q $ λ f hf,
have ¬ f ≈ 0, from (ne_zero_iff_nequiv_zero f).1 hf,
norm_image f this
lemma sub_rev (q r : ℚ_[p]) : padic_norm_e (q - r) = padic_norm_e (r - q) :=
by rw ←(padic_norm_e.neg); simp
end embedding
end padic_norm_e
namespace padic
section complete
open padic_seq padic
theorem rat_dense' {p : ℕ} [nat.prime p] (q : ℚ_[p]) {ε : ℚ} (hε : ε > 0) :
∃ r : ℚ, padic_norm_e (q - r) < ε :=
quotient.induction_on q $ λ q',
have ∃ N, ∀ m n ≥ N, padic_norm p (q' m - q' n) < ε, from cauchy₂ _ hε,
let ⟨N, hN⟩ := this in
⟨q' N,
begin
simp only [padic.cast_eq_of_rat],
change padic_seq.norm (q' - const _ (q' N)) < ε,
cases decidable.em ((q' - const (padic_norm p) (q' N)) ≈ 0) with heq hne',
{ simpa only [heq, padic_seq.norm, dif_pos] },
{ simp only [padic_seq.norm, dif_neg hne'],
change padic_norm p (q' _ - q' _) < ε,
have := stationary_point_spec hne',
cases decidable.em (N ≥ stationary_point hne') with hle hle,
{ have := eq.symm (this (le_refl _) hle),
simp at this, simpa [this] },
{ apply hN,
apply le_of_lt, apply lt_of_not_ge, apply hle, apply le_refl }}
end⟩
variables {p : ℕ} [nat.prime p] (f : cau_seq _ (@padic_norm_e p _))
open classical
private lemma div_nat_pos (n : ℕ) : (1 / ((n + 1): ℚ)) > 0 :=
div_pos zero_lt_one (by exact_mod_cast succ_pos _)
def lim_seq : ℕ → ℚ := λ n, classical.some (rat_dense' (f n) (div_nat_pos n))
lemma exi_rat_seq_conv {ε : ℚ} (hε : 0 < ε) :
∃ N, ∀ i ≥ N, padic_norm_e (f i - ((lim_seq f) i : ℚ_[p])) < ε :=
begin
refine (exists_nat_gt (1/ε)).imp (λ N hN i hi, _),
have h := classical.some_spec (rat_dense' (f i) (div_nat_pos i)),
refine lt_of_lt_of_le h (div_le_of_le_mul (by exact_mod_cast succ_pos _) _),
rw right_distrib,
apply le_add_of_le_of_nonneg,
{ exact le_mul_of_div_le hε (le_trans (le_of_lt hN) (by exact_mod_cast hi)) },
{ apply le_of_lt, simpa }
end
lemma exi_rat_seq_conv_cauchy : is_cau_seq (padic_norm p) (lim_seq f) :=
assume ε hε,
have hε3 : ε / 3 > 0, from div_pos hε (by norm_num),
let ⟨N, hN⟩ := exi_rat_seq_conv f hε3,
⟨N2, hN2⟩ := f.cauchy₂ hε3 in
begin
existsi max N N2,
intros j hj,
suffices : padic_norm_e ((↑(lim_seq f j) - f (max N N2)) + (f (max N N2) - lim_seq f (max N N2))) < ε,
{ ring at this ⊢,
rw [← padic_norm_e.eq_padic_norm', ← padic.cast_eq_of_rat],
exact_mod_cast this },
{ apply lt_of_le_of_lt,
{ apply padic_norm_e.add },
{ have : (3 : ℚ) ≠ 0, by norm_num,
have : ε = ε / 3 + ε / 3 + ε / 3,
{ apply eq_of_mul_eq_mul_left this, simp [left_distrib, mul_div_cancel' _ this ], ring },
rw this,
apply add_lt_add,
{ suffices : padic_norm_e ((↑(lim_seq f j) - f j) + (f j - f (max N N2))) < ε / 3 + ε / 3,
by simpa,
apply lt_of_le_of_lt,
{ apply padic_norm_e.add },
{ apply add_lt_add,
{ rw [padic_norm_e.sub_rev],
apply_mod_cast hN,
exact le_of_max_le_left hj },
{ apply hN2,
exact le_of_max_le_right hj,
apply le_max_right }}},
{ apply_mod_cast hN,
apply le_max_left }}}
end
private def lim' : padic_seq p := ⟨_, exi_rat_seq_conv_cauchy f⟩
private def lim : ℚ_[p] := ⟦lim' f⟧
theorem complete' : ∃ q : ℚ_[p], ∀ ε > 0, ∃ N, ∀ i ≥ N, padic_norm_e (q - f i) < ε :=
⟨ lim f,
λ ε hε,
let ⟨N, hN⟩ := exi_rat_seq_conv f (show ε / 2 > 0, from div_pos hε (by norm_num)),
⟨N2, hN2⟩ := padic_norm_e.defn (lim' f) (show ε / 2 > 0, from div_pos hε (by norm_num)) in
begin
existsi max N N2,
intros i hi,
suffices : padic_norm_e ((lim f - lim' f i) + (lim' f i - f i)) < ε,
{ ring at this; exact this },
{ apply lt_of_le_of_lt,
{ apply padic_norm_e.add },
{ have : ε = ε / 2 + ε / 2, by rw ←(add_self_div_two ε); simp,
rw this,
apply add_lt_add,
{ apply hN2, exact le_of_max_le_right hi },
{ rw_mod_cast [padic_norm_e.sub_rev],
apply hN,
exact le_of_max_le_left hi }}}
end ⟩
end complete
section normed_space
variables (p : ℕ) [nat.prime p]
instance : has_dist ℚ_[p] := ⟨λ x y, padic_norm_e (x - y)⟩
instance : metric_space ℚ_[p] :=
{ dist_self := by simp [dist],
dist_comm := λ x y, by unfold dist; rw ←padic_norm_e.neg (x - y); simp,
dist_triangle :=
begin
intros, unfold dist,
exact_mod_cast padic_norm_e.triangle_ineq _ _ _,
end,
eq_of_dist_eq_zero :=
begin
unfold dist, intros _ _ h,
apply eq_of_sub_eq_zero,
apply (padic_norm_e.zero_iff _).1,
exact_mod_cast h
end }
instance : has_norm ℚ_[p] := ⟨λ x, padic_norm_e x⟩
instance : normed_field ℚ_[p] :=
{ dist_eq := λ _ _, rfl,
norm_mul' := by simp [has_norm.norm, padic_norm_e.mul'] }
instance : is_absolute_value (λ a : ℚ_[p], ∥a∥) :=
{ abv_nonneg := norm_nonneg,
abv_eq_zero := norm_eq_zero,
abv_add := norm_add_le,
abv_mul := by simp [has_norm.norm, padic_norm_e.mul'] }
theorem rat_dense {p : ℕ} {hp : p.prime} (q : ℚ_[p]) {ε : ℝ} (hε : ε > 0) :
∃ r : ℚ, ∥q - r∥ < ε :=
let ⟨ε', hε'l, hε'r⟩ := exists_rat_btwn hε,
⟨r, hr⟩ := rat_dense' q (by simpa using hε'l) in
⟨r, lt.trans (by simpa [has_norm.norm] using hr) hε'r⟩
end normed_space
end padic
namespace padic_norm_e
section normed_space
variables {p : ℕ} [hp : p.prime]
include hp
@[simp] protected lemma mul (q r : ℚ_[p]) : ∥q * r∥ = ∥q∥ * ∥r∥ :=
by simp [has_norm.norm, padic_norm_e.mul']
protected lemma is_norm (q : ℚ_[p]) : ↑(padic_norm_e q) = ∥q∥ := rfl
theorem nonarchimedean (q r : ℚ_[p]) : ∥q + r∥ ≤ max (∥q∥) (∥r∥) :=
begin
unfold has_norm.norm,
exact_mod_cast nonarchimedean' _ _
end
theorem add_eq_max_of_ne {q r : ℚ_[p]} (h : ∥q∥ ≠ ∥r∥) : ∥q+r∥ = max (∥q∥) (∥r∥) :=
begin
unfold has_norm.norm,
apply_mod_cast add_eq_max_of_ne',
intro h',
apply h,
unfold has_norm.norm,
exact_mod_cast h'
end
@[simp] lemma eq_padic_norm (q : ℚ) : ∥(↑q : ℚ_[p])∥ = padic_norm p q :=
begin
unfold has_norm.norm,
rw [← padic_norm_e.eq_padic_norm', ← padic.cast_eq_of_rat]
end
instance : nondiscrete_normed_field ℚ_[p] :=
{ non_trivial := ⟨padic.of_rat p (p⁻¹), begin
have h0 : p ≠ 0 := ne_of_gt (hp.pos),
have h1 : 1 < p := hp.one_lt,
rw [← padic.cast_eq_of_rat, eq_padic_norm],
simp only [padic_norm, inv_eq_zero],
simp only [if_neg] {discharger := `[exact_mod_cast h0]},
norm_cast,
simp only [padic_val_rat.inv] {discharger := `[exact_mod_cast h0]},
rw [neg_neg, padic_val_rat.padic_val_rat_self h1],
erw _root_.pow_one,
exact_mod_cast h1,
end⟩ }
protected theorem image {q : ℚ_[p]} : q ≠ 0 → ∃ n : ℤ, ∥q∥ = ↑((↑p : ℚ) ^ (-n)) :=
quotient.induction_on q $ λ f hf,
have ¬ f ≈ 0, from (padic_seq.ne_zero_iff_nequiv_zero f).1 hf,
let ⟨n, hn⟩ := padic_seq.norm_image f this in
⟨n, congr_arg rat.cast hn⟩
protected lemma is_rat (q : ℚ_[p]) : ∃ q' : ℚ, ∥q∥ = ↑q' :=
if h : q = 0 then ⟨0, by simp [h]⟩
else let ⟨n, hn⟩ := padic_norm_e.image h in ⟨_, hn⟩
def rat_norm (q : ℚ_[p]) : ℚ := classical.some (padic_norm_e.is_rat q)
lemma eq_rat_norm (q : ℚ_[p]) : ∥q∥ = rat_norm q := classical.some_spec (padic_norm_e.is_rat q)
theorem norm_rat_le_one : ∀ {q : ℚ} (hq : ¬ p ∣ q.denom), ∥(q : ℚ_[p])∥ ≤ 1
| ⟨n, d, hn, hd⟩ := λ hq : ¬ p ∣ d,
if hnz : n = 0 then
have (⟨n, d, hn, hd⟩ : ℚ) = 0,
from rat.zero_iff_num_zero.mpr hnz,
by norm_num [this]
else
begin
have hnz' : {rat . num := n, denom := d, pos := hn, cop := hd} ≠ 0, from mt rat.zero_iff_num_zero.1 hnz,
rw [padic_norm_e.eq_padic_norm],
norm_cast,
rw [padic_norm.eq_fpow_of_nonzero p hnz', padic_val_rat_def p hnz'],
have h : (multiplicity p d).get _ = 0, by simp [multiplicity_eq_zero_of_not_dvd, hq],
rw_mod_cast [h, sub_zero],
apply fpow_le_one_of_nonpos,
{ exact_mod_cast le_of_lt hp.one_lt, },
{ apply neg_nonpos_of_nonneg, norm_cast, simp, }
end
lemma eq_of_norm_add_lt_right {p : ℕ} {hp : p.prime} {z1 z2 : ℚ_[p]}
(h : ∥z1 + z2∥ < ∥z2∥) : ∥z1∥ = ∥z2∥ :=
by_contradiction $ λ hne,
not_lt_of_ge (by rw padic_norm_e.add_eq_max_of_ne hne; apply le_max_right) h
lemma eq_of_norm_add_lt_left {p : ℕ} {hp : p.prime} {z1 z2 : ℚ_[p]}
(h : ∥z1 + z2∥ < ∥z1∥) : ∥z1∥ = ∥z2∥ :=
by_contradiction $ λ hne,
not_lt_of_ge (by rw padic_norm_e.add_eq_max_of_ne hne; apply le_max_left) h
end normed_space
end padic_norm_e
namespace padic
variables {p : ℕ} [nat.prime p]
set_option eqn_compiler.zeta true
instance complete : cau_seq.is_complete ℚ_[p] norm :=
begin
split, intro f,
have cau_seq_norm_e : is_cau_seq padic_norm_e f,
{ intros ε hε,
let h := is_cau f ε (by exact_mod_cast hε),
unfold norm at h,
apply_mod_cast h },
cases padic.complete' ⟨f, cau_seq_norm_e⟩ with q hq,
existsi q,
intros ε hε,
cases exists_rat_btwn hε with ε' hε',
norm_cast at hε',
cases hq ε' hε'.1 with N hN, existsi N,
intros i hi, let h := hN i hi,
unfold norm,
rw_mod_cast [cau_seq.sub_apply, padic_norm_e.sub_rev],
refine lt.trans _ hε'.2,
exact_mod_cast hN i hi
end
lemma padic_norm_e_lim_le {f : cau_seq ℚ_[p] norm} {a : ℝ} (ha : a > 0)
(hf : ∀ i, ∥f i∥ ≤ a) : ∥f.lim∥ ≤ a :=
let ⟨N, hN⟩ := setoid.symm (cau_seq.equiv_lim f) _ ha in
calc ∥f.lim∥ = ∥f.lim - f N + f N∥ : by simp
... ≤ max (∥f.lim - f N∥) (∥f N∥) : padic_norm_e.nonarchimedean _ _
... ≤ a : max_le (le_of_lt (hN _ (le_refl _))) (hf _)
end padic
|
889d211716835dc3104731d40047d107023fcb4b
|
9dd3f3912f7321eb58ee9aa8f21778ad6221f87c
|
/library/init/native/default.lean
|
6f181db5d275cf267328d150e912e2c3b039a008
|
[
"Apache-2.0"
] |
permissive
|
bre7k30/lean
|
de893411bcfa7b3c5572e61b9e1c52951b310aa4
|
5a924699d076dab1bd5af23a8f910b433e598d7a
|
refs/heads/master
| 1,610,900,145,817
| 1,488,006,845,000
| 1,488,006,845,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 23,779
|
lean
|
/-
Copyright (c) 2016 Jared Roesch. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jared Roesch
-/
prelude
import init.meta.format
import init.meta.expr
import init.category.state
import init.data.string
import init.data.list.instances
import init.native.ir
import init.native.format
import init.native.internal
import init.native.anf
import init.native.cf
import init.native.pass
import init.native.util
import init.native.config
import init.native.result
namespace native
inductive error : Type
| string : string → error
| many : list error → error
meta def error.to_string : error → string
| (error.string s) := s
| (error.many es) := to_string $ list.map error.to_string es
meta def arity_map : Type :=
rb_map name nat
meta def get_arity : expr → nat
| (expr.lam _ _ _ body) := 1 + get_arity body
| _ := 0
@[reducible] def ir_result (A : Type*) :=
native.result error A
meta def mk_arity_map : list (name × expr) → arity_map
| [] := rb_map.mk name nat
| ((n, body) :: rest) := rb_map.insert (mk_arity_map rest) n (get_arity body)
@[reducible] meta def ir_compiler_state :=
(config × arity_map × nat)
@[reducible] meta def ir_compiler (A : Type) :=
native.resultT (state ir_compiler_state) error A
meta def lift {A} (action : state ir_compiler_state A) : ir_compiler A :=
⟨fmap (fun (a : A), native.result.ok a) action⟩
meta def trace_ir (s : string) : ir_compiler unit := do
(conf, map, counter) ← lift $ state.read,
if config.debug conf
then trace s (return ())
else return ()
-- An `exotic` monad combinator that accumulates errors.
meta def run {M E A} (res : native.resultT M E A) : M (native.result E A) :=
match res with
| ⟨action⟩ := action
end
meta def sequence_err : list (ir_compiler format) → ir_compiler (list format × list error)
| [] := return ([], [])
| (action :: remaining) :=
⟨ fun s,
match (run (sequence_err remaining)) s with
| (native.result.err e, s') := (native.result.err e, s)
| (native.result.ok (res, errs), s') :=
match (run action) s' with
| (native.result.err e, s'') := (native.result.ok (res, e :: errs), s'')
| (native.result.ok v, s'') := (native.result.ok (v :: res, errs), s'')
end
end
⟩
-- meta lemma sequence_err_always_ok :
-- forall xs v s s', sequence_err xs s = native.result.ok (v, s') := sorry
meta def lift_result {A} (action : ir_result A) : ir_compiler A :=
⟨fun s, (action, s)⟩
-- TODO: fix naming here
private meta def take_arguments' : expr → list name → (list name × expr)
| (expr.lam n _ _ body) ns := take_arguments' body (n :: ns)
| e' ns := (ns, e')
meta def fresh_name : ir_compiler name := do
(conf, map, counter) ← lift state.read,
let fresh := name.mk_numeral (unsigned.of_nat counter) `native._ir_compiler_,
lift $ state.write (conf, map, counter + 1),
return fresh
meta def take_arguments (e : expr) : ir_compiler (list name × expr) :=
let (arg_names, body) := take_arguments' e [] in do
fresh_names ← monad.mapm (fun x, fresh_name) arg_names,
let locals := list.map mk_local fresh_names,
return $ (fresh_names, expr.instantiate_vars body (list.reverse locals))
-- meta def lift_state {A} (action : state arity_map A) : ir_compiler A :=
-- fun (s : arity_map), match action s with
-- | (a, s) := (return a, s)
-- end
meta def mk_error {T} : string → ir_compiler T :=
fun s, do
trace_ir "CREATEDERROR",
lift_result (native.result.err $ error.string s)
meta def lookup_arity (n : name) : ir_compiler nat := do
(_, map, counter) ← lift state.read,
if n = `nat.cases_on
then pure 2
else
match rb_map.find map n with
| option.none := mk_error $ "could not find arity for: " ++ to_string n
| option.some n := return n
end
meta def mk_nat_literal (n : nat) : ir_compiler ir.expr :=
return (ir.expr.lit $ ir.literal.nat n)
def repeat {A : Type} : nat → A → list A
| 0 _ := []
| (n + 1) a := a :: repeat n a
def zip {A B : Type} : list A → list B → list (A × B)
| [] [] := []
| [] (y :: ys) := []
| (x :: xs) [] := []
| (x :: xs) (y :: ys) := (x, y) :: zip xs ys
private def upto' : ℕ → list ℕ
| 0 := []
| (n + 1) := n :: upto' n
def upto (n : ℕ) : list ℕ :=
list.reverse $ upto' n
def label {A : Type} (xs : list A) : list (nat × A) :=
zip (upto (list.length xs)) xs
-- lemma label_size_eq :
-- forall A (xs : list A),
-- list.length (label xs) = list.length xs :=
-- begin
-- intros,
-- induction xs,
-- apply sorry
-- apply sorry
-- end
-- HELPERS --
meta def assert_name : ir.expr → ir_compiler name
| (ir.expr.locl n) := lift_result $ native.result.ok n
| e := mk_error $ "expected name found: " ++ to_string (format_cpp.expr e)
meta def assert_expr : ir.stmt → ir_compiler ir.expr
| (ir.stmt.e exp) := return exp
| s := mk_error ("internal invariant violated, found: " ++ (to_string (format_cpp.stmt s)))
meta def mk_call (head : name) (args : list ir.expr) : ir_compiler ir.expr :=
let args'' := list.map assert_name args
in do
args' ← monad.sequence args'',
return (ir.expr.call head args')
meta def mk_under_sat_call (head : name) (args : list ir.expr) : ir_compiler ir.expr :=
let args'' := list.map assert_name args in do
args' ← monad.sequence args'',
return $ ir.expr.mk_native_closure head args'
meta def bind_value_with_ty (val : ir.expr) (ty : ir.ty) (body : name → ir_compiler ir.stmt) : ir_compiler ir.stmt := do
fresh ← fresh_name,
ir.stmt.letb fresh ty val <$> (body fresh)
meta def bind_value (val : ir.expr) (body : name → ir_compiler ir.stmt) : ir_compiler ir.stmt :=
bind_value_with_ty val ir.ty.object body
-- not in love with this --solution-- hack, revisit
meta def compile_local (n : name) : ir_compiler name :=
return $ (mk_str_name "_$local$_" (name.to_string_with_sep "_" n))
meta def mk_invoke (loc : name) (args : list ir.expr) : ir_compiler ir.expr :=
let args'' := list.map assert_name args
in do
args' ← monad.sequence args'',
loc' ← compile_local loc,
lift_result (native.result.ok $ ir.expr.invoke loc' args')
meta def mk_over_sat_call (head : name) (fst snd : list ir.expr) : ir_compiler ir.expr :=
let fst' := list.map assert_name fst,
snd' := list.map assert_name snd in do
args' ← monad.sequence fst',
args'' ← monad.sequence snd',
fresh ← fresh_name,
locl ← compile_local fresh,
invoke ← ir.stmt.e <$> (mk_invoke fresh (fmap ir.expr.locl args'')),
return $ ir.expr.block (ir.stmt.seq [
ir.stmt.letb locl ir.ty.object (ir.expr.call head args') ir.stmt.nop,
invoke
])
meta def is_return (n : name) : bool :=
`native_compiler.return = n
meta def compile_call (head : name) (arity : nat) (args : list ir.expr) : ir_compiler ir.expr := do
trace_ir $ "compile_call: " ++ (to_string head),
if list.length args = arity
then mk_call head args
else if list.length args < arity
then mk_under_sat_call head args
else mk_over_sat_call head (list.taken arity args) (list.dropn arity args)
meta def mk_object (arity : unsigned) (args : list ir.expr) : ir_compiler ir.expr :=
let args'' := list.map assert_name args
in do args' ← monad.sequence args'',
lift_result (native.result.ok $ ir.expr.mk_object (unsigned.to_nat arity) args')
meta def one_or_error (args : list expr) : ir_compiler expr :=
match args with
| ((h : expr) :: []) := lift_result $ native.result.ok h
| _ := mk_error "internal invariant violated, should only have one argument"
end
meta def panic (msg : string) : ir_compiler ir.expr :=
return $ ir.expr.panic msg
-- END HELPERS --
meta def bind_case_fields' (scrut : name) : list (nat × name) → ir.stmt → ir_compiler ir.stmt
| [] body := return body
| ((n, f) :: fs) body := do
loc ← compile_local f,
ir.stmt.letb f ir.ty.object (ir.expr.project scrut n) <$> (bind_case_fields' fs body)
meta def bind_case_fields (scrut : name) (fs : list name) (body : ir.stmt) : ir_compiler ir.stmt :=
bind_case_fields' scrut (label fs) body
meta def mk_cases_on (case_name scrut : name) (cases : list (nat × ir.stmt)) (default : ir.stmt) : ir.stmt :=
ir.stmt.seq [
ir.stmt.letb `ctor_index ir.ty.int (ir.expr.call `cidx [scrut]) ir.stmt.nop,
ir.stmt.switch `ctor_index cases default
]
meta def compile_cases (action : expr → ir_compiler ir.stmt) (scrut : name)
: list (nat × expr) → ir_compiler (list (nat × ir.stmt))
| [] := return []
| ((n, body) :: cs) := do
(fs, body') ← take_arguments body,
body'' ← action body',
cs' ← compile_cases cs,
case ← bind_case_fields scrut fs body'',
return $ (n, case) :: cs'
meta def compile_cases_on_to_ir_expr
(case_name : name)
(cases : list expr)
(action : expr → ir_compiler ir.stmt) : ir_compiler ir.expr := do
default ← panic "default case should never be reached",
match cases with
| [] := mk_error $ "found " ++ to_string case_name ++ "applied to zero arguments"
| (h :: cs) := do
ir_scrut ← action h >>= assert_expr,
ir.expr.block <$> bind_value ir_scrut (fun scrut, do
cs' ← compile_cases action scrut (label cs),
return (mk_cases_on case_name scrut cs' (ir.stmt.e default)))
end
meta def bind_builtin_case_fields' (scrut : name) : list (nat × name) → ir.stmt → ir_compiler ir.stmt
| [] body := return body
| ((n, f) :: fs) body := do
loc ← compile_local f,
ir.stmt.letb loc ir.ty.object (ir.expr.project scrut n) <$> (bind_builtin_case_fields' fs body)
meta def bind_builtin_case_fields (scrut : name) (fs : list name) (body : ir.stmt) : ir_compiler ir.stmt :=
bind_builtin_case_fields' scrut (label fs) body
meta def compile_builtin_cases (action : expr → ir_compiler ir.stmt) (scrut : name)
: list (nat × expr) → ir_compiler (list (nat × ir.stmt))
| [] := return []
| ((n, body) :: cs) := do
(fs, body') ← take_arguments body,
body'' ← action body',
cs' ← compile_builtin_cases cs,
case ← bind_builtin_case_fields scrut fs body'',
return $ (n, case) :: cs'
meta def in_lean_ns (n : name) : name :=
mk_simple_name ("lean::" ++ name.to_string_with_sep "_" n)
meta def mk_builtin_cases_on (case_name scrut : name) (cases : list (nat × ir.stmt)) (default : ir.stmt) : ir.stmt :=
-- replace `ctor_index with a generated name
ir.stmt.seq [
ir.stmt.letb `buffer ir.ty.object_buffer ir.expr.uninitialized ir.stmt.nop,
ir.stmt.letb `ctor_index ir.ty.int (ir.expr.call (in_lean_ns case_name) [scrut, `buffer]) ir.stmt.nop,
ir.stmt.switch `ctor_index cases default
]
meta def compile_builtin_cases_on_to_ir_expr
(case_name : name)
(cases : list expr)
(action : expr → ir_compiler ir.stmt) : ir_compiler ir.expr := do
default ← panic "default case should never be reached",
match cases with
| [] := mk_error $ "found " ++ to_string case_name ++ "applied to zero arguments"
| (h :: cs) := do
ir_scrut ← action h >>= assert_expr,
ir.expr.block <$> bind_value ir_scrut (fun scrut, do
cs' ← compile_builtin_cases action scrut (label cs),
return (mk_builtin_cases_on case_name scrut cs' (ir.stmt.e default)))
end
meta def mk_is_simple (scrut : name) : ir.expr :=
ir.expr.call `is_simple [scrut]
meta def mk_is_zero (n : name) : ir.expr :=
ir.expr.equals (ir.expr.raw_int 0) (ir.expr.locl n)
meta def mk_cidx (obj : name) : ir.expr :=
ir.expr.call `cidx [obj]
-- we should add applicative brackets
meta def mk_simple_nat_cases_on (scrut : name) (zero_case succ_case : ir.stmt) : ir_compiler ir.stmt :=
bind_value_with_ty (mk_cidx scrut) (ir.ty.name `int) (fun cidx,
bind_value_with_ty (mk_is_zero cidx) (ir.ty.name `bool) (fun is_zero,
pure $ ir.stmt.ite is_zero zero_case succ_case))
meta def mk_mpz_nat_cases_on (scrut : name) (zero_case succ_case : ir.stmt) : ir_compiler ir.stmt :=
ir.stmt.e <$> panic "mpz"
meta def mk_nat_cases_on (scrut : name) (zero_case succ_case : ir.stmt) : ir_compiler ir.stmt :=
bind_value_with_ty (mk_is_simple scrut) (ir.ty.name `bool) (fun is_simple,
ir.stmt.ite is_simple <$>
mk_simple_nat_cases_on scrut zero_case succ_case <*>
mk_mpz_nat_cases_on scrut zero_case succ_case)
meta def assert_two_cases (cases : list expr) : ir_compiler (expr × expr) :=
match cases with
| c1 :: c2 :: _ := return (c1, c2)
| _ := mk_error "nat.cases_on should have exactly two cases"
end
meta def mk_vm_nat (n : name) : ir.expr :=
ir.expr.call (in_lean_ns `mk_vm_simple) [n]
meta def compile_succ_case (action : expr → ir_compiler ir.stmt) (scrut : name) (succ_case : expr) : ir_compiler ir.stmt := do
(fs, body') ← take_arguments succ_case,
body'' ← action body',
match fs with
| pred :: _ := do
loc ← compile_local pred,
fresh ← fresh_name,
bind_value_with_ty (mk_cidx scrut) (ir.ty.name `int) (fun cidx,
bind_value_with_ty (ir.expr.sub (ir.expr.locl cidx) (ir.expr.raw_int 1)) (ir.ty.name `int) (fun sub,
pure $ ir.stmt.letb loc ir.ty.object (mk_vm_nat sub) body''
))
| _ := mk_error "compile_succ_case too many fields"
end
meta def compile_nat_cases_on_to_ir_expr
(case_name : name)
(cases : list expr)
(action : expr → ir_compiler ir.stmt) : ir_compiler ir.expr :=
match cases with
| [] := mk_error $ "found " ++ to_string case_name ++ "applied to zero arguments"
| (h :: cs) := do
ir_scrut ← action h >>= assert_expr,
(zero_case, succ_case) ← assert_two_cases cs,
trace_ir (to_string zero_case),
trace_ir (to_string succ_case),
ir.expr.block <$> bind_value ir_scrut (fun scrut, do
zc ← action zero_case,
sc ← compile_succ_case action scrut succ_case,
mk_nat_cases_on scrut zc sc
)
end
-- this→emit_indented("if (is_simple(");
-- action(scrutinee);
-- this→emit_string("))");
-- this→emit_block([&] () {
-- this→emit_indented("if (cidx(");
-- action(scrutinee);
-- this→emit_string(") == 0) ");
-- this→emit_block([&] () {
-- action(zero_case);
-- *this→m_output_stream << ";\n";
-- });
-- this→emit_string("else ");
-- this→emit_block([&] () {
-- action(succ_case);
-- *this→m_output_stream << ";\n";
-- });
-- });
-- this→emit_string("else ");
-- this→emit_block([&] () {
-- this→emit_indented("if (to_mpz(");
-- action(scrutinee);
-- this→emit_string(") == 0) ");
-- this→emit_block([&] () {
-- action(zero_case);
-- *this→m_output_stream << ";\n";
-- });
-- this→emit_string("else ");
-- this→emit_block([&] () {
-- action(succ_case);
-- });
-- });
-- this code isnt' great working around the semi-functional frontend
meta def compile_expr_app_to_ir_expr
(head : expr)
(args : list expr)
(action : expr → ir_compiler ir.stmt) : ir_compiler ir.expr := do
trace_ir (to_string head ++ to_string args),
if expr.is_constant head = bool.tt
then (if is_return (expr.const_name head)
then do
rexp ← one_or_error args,
(ir.expr.block ∘ ir.stmt.return) <$> ((action rexp) >>= assert_expr)
else if is_nat_cases_on (expr.const_name head)
then compile_nat_cases_on_to_ir_expr (expr.const_name head) args action
else match is_internal_cnstr head with
| option.some n := do
args' ← monad.sequence $ list.map (fun x, action x >>= assert_expr) args,
mk_object n args'
| option.none := match is_internal_cases head with
| option.some n := compile_cases_on_to_ir_expr (expr.const_name head) args action
| option.none := match get_builtin (expr.const_name head) with
| option.some builtin :=
match builtin with
| builtin.vm n := mk_error "vm"
| builtin.cfun n arity := do
args' ← monad.sequence $ list.map (fun x, action x >>= assert_expr) args,
compile_call n arity args'
| builtin.cases n arity :=
compile_builtin_cases_on_to_ir_expr (expr.const_name head) args action
end
| option.none := do
args' ← monad.sequence $ list.map (fun x, action x >>= assert_expr) args,
arity ← lookup_arity (expr.const_name head),
compile_call (expr.const_name head) arity args'
end
end end)
else if expr.is_local_constant head
then do
args' ← monad.sequence $ list.map (fun x, action x >>= assert_expr) args,
mk_invoke (expr.local_uniq_name head) args'
else (mk_error ("unsupported call position" ++ (to_string head)))
meta def compile_expr_macro_to_ir_expr (e : expr) : ir_compiler ir.expr :=
match native.get_nat_value e with
| option.none := mk_error "unsupported macro"
| option.some n := mk_nat_literal n
end
meta def compile_expr_to_ir_expr (action : expr → ir_compiler ir.stmt): expr → ir_compiler ir.expr
| (expr.const n ls) :=
match native.is_internal_cnstr (expr.const n ls) with
| option.none :=
-- TODO, do I need to case on arity here? I should probably always emit a call
match get_builtin n with
| option.some (builtin.cfun n' arity) :=
compile_call n arity []
| _ :=
if n = "_neutral_"
then (pure $ ir.expr.mk_object 0 [])
else do
arity ← lookup_arity n,
compile_call n arity []
end
| option.some arity := pure $ ir.expr.mk_object (unsigned.to_nat arity) []
end
| (expr.var i) := mk_error "there should be no bound variables in compiled terms"
| (expr.sort _) := mk_error "found sort"
| (expr.mvar _ _) := mk_error "unexpected meta-variable in expr"
| (expr.local_const n _ _ _) := ir.expr.locl <$> compile_local n
| (expr.app f x) :=
let head := expr.get_app_fn (expr.app f x),
args := expr.get_app_args (expr.app f x)
in compile_expr_app_to_ir_expr head args action
| (expr.lam _ _ _ _) := mk_error "found lam"
| (expr.pi _ _ _ _) := mk_error "found pi"
| (expr.elet n _ v body) := mk_error "internal error: can not translate let binding into a ir_expr"
| (expr.macro d sz args) := compile_expr_macro_to_ir_expr (expr.macro d sz args)
meta def compile_expr_to_ir_stmt : expr → ir_compiler ir.stmt
| (expr.pi _ _ _ _) := mk_error "found pi, should not be translating a Pi for any reason (yet ...)"
| (expr.elet n _ v body) := do
n' ← compile_local n,
v' ← compile_expr_to_ir_expr compile_expr_to_ir_stmt v,
-- this is a scoping fail, we need to fix how we compile locals
body' ← compile_expr_to_ir_stmt (expr.instantiate_vars body [mk_local n]),
-- not the best solution, here need to think hard about how to prevent thing, more aggressive anf?
match v' with
| ir.expr.block stmt := return (ir.stmt.seq [ir.stmt.letb n' ir.ty.object ir.expr.uninitialized ir.stmt.nop, body'])
| _ := return (ir.stmt.letb n' ir.ty.object v' body')
end
| e' := ir.stmt.e <$> compile_expr_to_ir_expr compile_expr_to_ir_stmt e'
meta def compile_defn_to_ir (decl_name : name) (args : list name) (body : expr) : ir_compiler ir.defn := do
body' ← compile_expr_to_ir_stmt body,
let params := (zip args (repeat (list.length args) (ir.ty.ref ir.ty.object))),
pure (ir.defn.mk decl_name params ir.ty.object body')
def unwrap_or_else {T R : Type} : ir_result T → (T → R) → (error → R) → R
| (native.result.err e) f err := err e
| (native.result.ok t) f err := f t
meta def replace_main (n : name) : name :=
if n = `main
then "___lean__main"
else n
meta def trace_expr (e : expr) : ir_compiler unit :=
trace ("trace_expr: " ++ to_string e) (return ())
meta def compile_defn (decl_name : name) (e : expr) : ir_compiler format :=
let arity := get_arity e in do
(args, body) ← take_arguments e,
ir ← compile_defn_to_ir (replace_main decl_name) args body,
return $ format_cpp.defn ir
meta def compile' : list (name × expr) → list (ir_compiler format)
| [] := []
| ((n, e) :: rest) := do
let decl := (fun d, d ++ format.line ++ format.line) <$> compile_defn n e,
decl :: (compile' rest)
meta def format_error : error → format
| (error.string s) := to_fmt s
| (error.many es) := format_concat (list.map format_error es)
meta def mk_lean_name (n : name) : ir.expr :=
ir.expr.constructor (in_lean_ns `name) (name.components n)
meta def emit_declare_vm_builtins : list (name × expr) → ir_compiler (list ir.stmt)
| [] := return []
| ((n, body) :: es) := do
vm_name ← pure $ (mk_lean_name n),
tail ← emit_declare_vm_builtins es,
fresh ← fresh_name,
let cpp_name := in_lean_ns `name,
let single_binding := ir.stmt.seq [
ir.stmt.letb fresh (ir.ty.name cpp_name) vm_name ir.stmt.nop,
ir.stmt.e $ ir.expr.assign `env (ir.expr.call `add_native [`env, fresh, replace_main n])
],
return $ single_binding :: tail
meta def emit_main (procs : list (name × expr)) : ir_compiler ir.defn := do
builtins ← emit_declare_vm_builtins procs,
arity ← lookup_arity `main,
vm_simple_obj ← fresh_name,
call_main ← compile_call "___lean__main" arity [ir.expr.locl vm_simple_obj],
return (ir.defn.mk `main [] ir.ty.int $ ir.stmt.seq ([
ir.stmt.e $ ir.expr.call (in_lean_ns `initialize) [],
ir.stmt.letb `env (ir.ty.name (in_lean_ns `environment)) ir.expr.uninitialized ir.stmt.nop
] ++ builtins ++ [
ir.stmt.letb `ios (ir.ty.name (in_lean_ns `io_state)) (ir.expr.call (in_lean_ns `get_global_ios) []) ir.stmt.nop,
ir.stmt.letb `opts (ir.ty.name (in_lean_ns `options)) (ir.expr.call (in_lean_ns `get_options_from_ios) [`ios]) ir.stmt.nop,
ir.stmt.letb `S (ir.ty.name (in_lean_ns `vm_state)) (ir.expr.constructor (in_lean_ns `vm_state) [`env, `opts]) ir.stmt.nop,
ir.stmt.letb `scoped (ir.ty.name (in_lean_ns `scope_vm_state)) (ir.expr.constructor (in_lean_ns `scope_vm_state) [`S]) ir.stmt.nop,
ir.stmt.e $ ir.expr.assign `g_env (ir.expr.address_of `env),
ir.stmt.letb vm_simple_obj ir.ty.object (ir.expr.mk_object 0 []) ir.stmt.nop,
ir.stmt.e call_main
]))
-- -- call_mains
-- -- buffer<expr> args;
-- -- auto unit = mk_neutral_expr();
-- -- args.push_back(unit);
-- -- // Make sure to invoke the C call machinery since it is non-deterministic
-- -- // which case we enter here.
-- -- compile_to_c_call(main_fn, args, 0, name_map<unsigned>());
-- -- *this→m_output_stream << ";\n return 0;\n}" << std::endl;
-- ]
meta def unzip {A B} : list (A × B) → (list A × list B)
| [] := ([], [])
| ((x, y) :: rest) :=
let (xs, ys) := unzip rest
in (x :: xs, y :: ys)
meta def configuration : ir_compiler config := do
(conf, _, _) ← lift $ state.read,
pure conf
meta def apply_pre_ir_passes (procs : list procedure) (conf : config) : list procedure :=
run_passes conf [anf, cf] procs
meta def driver (procs : list (name × expr)) : ir_compiler (list format × list error) := do
procs' ← apply_pre_ir_passes procs <$> configuration,
(fmt_decls, errs) ← sequence_err (compile' procs'),
main ← emit_main procs',
return (format_cpp.defn main :: fmt_decls, errs)
meta def compile (conf : config) (procs : list (name × expr)) : format :=
let arities := mk_arity_map procs in
-- Put this in a combinator or something ...
match run (driver procs) (conf, arities, 0) with
| (native.result.err e, s) := error.to_string e
| (native.result.ok (decls, errs), s) :=
if list.length errs = 0
then format_concat decls
else format_error (error.many errs)
end
-- meta def compile (procs : list (name))
end native
|
dc7010880c9f7a4d0df5322c15b77c80460d4f15
|
a721fe7446524f18ba361625fc01033d9c8b7a78
|
/src/principia/mynat/basic.lean
|
63130d51bf37e1c8ce7209369c3c6975dd89a825
|
[] |
no_license
|
Sterrs/leaning
|
8fd80d1f0a6117a220bb2e57ece639b9a63deadc
|
3901cc953694b33adda86cb88ca30ba99594db31
|
refs/heads/master
| 1,627,023,822,744
| 1,616,515,221,000
| 1,616,515,221,000
| 245,512,190
| 2
| 0
| null | 1,616,429,050,000
| 1,583,527,118,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 7,592
|
lean
|
-- vim: ts=2 sw=0 sts=-1 et ai tw=70
-- Natural numbers
-- TODO:
-- shorten the pow theorems
-- work through the sporadic confused comments
-- clean everything up a bit, re naming and formatting
-- try to re-use theorems more
-- stops name conflicts
namespace hidden
inductive mynat
| zero : mynat
| succ (n : mynat) : mynat
namespace mynat
-- this instance stuff is pure voodoo but it seems to make the
-- notation work
instance: has_zero mynat := ⟨zero⟩
instance: has_one mynat := ⟨succ zero⟩
def add: mynat → mynat → mynat
| m 0 := m
| m (succ n) := succ (add m n)
instance: has_add mynat := ⟨add⟩
def mul: mynat → mynat → mynat
| m 0 := 0
| m (succ n) := m + mul m n
instance: has_mul mynat := ⟨mul⟩
-- a ^ b should be number of functions from a b-set to an a-set. fight
-- me
def pow: mynat → mynat → mynat
| m 0 := 1
| m (succ n) := m * pow m n
instance: has_pow mynat mynat := ⟨pow⟩
variables {m n k : mynat}
-- ADDITION
-- I'm simping liberally for future reasons
@[simp] theorem add_zero (m : mynat): m + 0 = m := rfl
@[simp]
theorem add_succ (m n : mynat): m + succ n = succ (m + n) := rfl
-- so for some reason all the old code breaks with the new operator
-- instances, so I have to go and replace zero with 0 wherever I used
-- induction. How fix???
@[simp] theorem zz: zero = 0 := rfl
@[simp]
theorem zero_add: ∀ m : mynat, 0 + m = m
| zero := rfl
| (succ m) := by rw [add_succ, zero_add]
@[simp]
theorem succ_add: ∀ m n : mynat, succ m + n = succ (m + n)
| m zero := rfl
| m (succ n) := by rw [add_succ, add_succ, succ_add]
theorem add_assoc: ∀ m n k : mynat, (m + n) + k = m + (n + k)
| m n zero := rfl
| m n (succ k) := by rw [add_succ, add_succ, add_succ, add_assoc]
instance add_is_assoc: is_associative mynat add :=
⟨assume a b c, add_assoc a b c⟩
theorem add_comm: ∀ m n : mynat, m + n = n + m
| m zero := by rw [zz, add_zero, zero_add]
| m (succ n) := by rw [add_succ, succ_add, add_comm]
instance add_is_comm: is_commutative mynat add :=
⟨assume a b, add_comm a b⟩
@[simp] theorem add_one_succ: m + 1 = succ m := rfl
theorem succ_inj: succ m = succ n → m = n :=
succ.inj
@[simp]
theorem one_eq_succ_zero: succ 0 = 1 := rfl
theorem zero_ne_one : (0: mynat) ≠ 1 :=
begin
assume h,
from mynat.no_confusion h,
end
theorem add_cancel: ∀ {m}, m + n = m + k → n = k
| zero := by simp; cc
| (succ m) := assume h, by {
rw [succ_add, succ_add] at h,
from @add_cancel m (succ_inj h)
}
theorem add_cancel_right: m + n = k + n → m = k :=
begin
repeat {rw add_comm _ n},
from add_cancel,
end
-- In the case where nothing is being added on LHS
theorem add_cancel_to_zero: m = m + k → k = 0 :=
begin
assume hmmk,
-- use conv to just add zero to the left m
conv at hmmk { to_lhs, rw ←add_zero m, },
from add_cancel hmmk.symm,
end
-- again, this is magical cases
theorem succ_ne_zero: succ m ≠ 0 :=
begin
assume h,
cases h,
end
theorem nzero_iff_succ: m ≠ 0 ↔ ∃ n, m = succ n :=
begin
split; assume h, {
cases m,
contradiction,
existsi m,
refl,
}, {
cases h with n h,
rw h,
from succ_ne_zero,
},
end
theorem add_integral: ∀ {m n : mynat}, m + n = 0 → m = 0
| zero _ := by simp
| (succ m) n :=
begin
rw succ_add,
assume h,
from false.elim (succ_ne_zero h),
end
-- DECIDABILITY
instance: decidable_eq mynat :=
begin
intros m n,
induction m with m hm generalizing n, {
cases n, {
from is_true rfl,
}, {
from is_false succ_ne_zero.symm,
},
}, {
induction n with n hn generalizing m, {
from is_false succ_ne_zero,
}, {
cases (hm n) with hmnen hmeqn, {
apply is_false,
assume hsmsn,
from hmnen (succ_inj hsmsn),
}, {
rw hmeqn,
from is_true rfl,
},
},
},
end
-- MULTIPLICATION
@[simp] theorem mul_zero (m : mynat): m * 0 = 0 := rfl
@[simp]
theorem mul_succ (m n : mynat): m * (succ n) = m + (m * n) := rfl
@[simp] theorem mul_one (m : mynat) : m * 1 = m := rfl
@[simp]
theorem zero_mul: ∀ m : mynat, 0 * m = 0
| zero := by rw [zz, mul_zero]
| (succ m) := by rw [mul_succ, zero_mul, add_zero]
@[simp]
theorem one_mul: ∀ m : mynat, 1 * m = m
| zero := by rw [zz, mul_zero]
| (succ m) := by rw [mul_succ, one_mul, add_comm, add_one_succ]
@[simp]
theorem succ_mul: ∀ m n : mynat, (succ m) * n = m * n + n
| m zero := by rw [zz, mul_zero, mul_zero, add_zero]
| m (succ n) := by conv {
congr,
rw [mul_succ, succ_mul, ←add_assoc, ←add_comm (m*n), add_assoc],
skip,
rw [mul_succ, add_comm m, add_assoc, add_succ, ←succ_add],
}
@[simp]
theorem mul_add: ∀ m n k : mynat, m * (n + k) = m * n + m * k
| zero n k := by repeat { rw zz <|> rw zero_mul <|> rw zero_add }
| (succ m) n k := by repeat { rw succ_mul <|> rw mul_add }; ac_refl
theorem mul_assoc: ∀ m n k: mynat, (m * n) * k = m * (n * k)
| m n zero := by rw [zz, mul_zero, mul_zero, mul_zero]
| m n (succ k) := by rw [mul_succ, mul_succ, mul_add, mul_assoc]
instance mul_is_assoc: is_associative mynat mul :=
⟨assume a b c, mul_assoc a b c⟩
theorem mul_comm: ∀ m n : mynat, m * n = n * m
| m zero := by rw [zz, mul_zero, zero_mul]
| m (succ n) := by rw [mul_succ, succ_mul, mul_comm, add_comm]
instance mul_is_comm: is_commutative mynat mul :=
⟨assume a b, mul_comm a b⟩
@[simp]
theorem add_mul (m n k : mynat) : (m + n) * k = m * k + n * k :=
by rw [mul_comm (m + n), mul_comm m, mul_comm n, mul_add]
theorem mul_integral: ∀ {m n : mynat}, m ≠ 0 → m * n = 0 → n = 0
| m zero := by simp
| m (succ n) :=
begin
assume hmne0 hmn0,
rw mul_succ at hmn0,
from false.elim (hmne0 (add_integral hmn0)),
end
theorem mul_integral_symmetric: m * n = 0 → m = 0 ∨ n = 0 :=
begin
cases m, {
assume _, left, from rfl,
}, {
assume hsmn0,
right,
from mul_integral succ_ne_zero hsmn0,
},
end
theorem mul_cancel: m ≠ 0 → m * n = m * k → n = k :=
begin
induction n with n hn generalizing m k; assume hmn0 heq, {
simp at heq,
symmetry,
from mul_integral hmn0 heq.symm,
}, {
cases k, {
exfalso,
rw [zz, mul_zero] at heq,
from succ_ne_zero (mul_integral hmn0 heq),
}, {
rw [mul_succ, mul_succ] at heq,
rw hn hmn0 (add_cancel heq),
},
},
end
theorem mul_cancel_to_one: m ≠ 0 → m = m * k → k = 1 :=
begin
assume hmn0 hmmk,
rw [←mul_one m, mul_assoc, one_mul] at hmmk,
rw mul_cancel hmn0 hmmk,
end
-- POWERS
-- do I really have to spell out mynat like this? yuck
@[simp] theorem pow_zero (m : mynat) : m ^ (0: mynat) = 1 := rfl
@[simp]
theorem pow_succ (m n : mynat) : m ^ (succ n) = m * (m ^ n) := rfl
theorem zero_pow: ∀ {m : mynat}, m ≠ 0 → (0: mynat) ^ m = 0
| zero := assume h, by contradiction
| (succ m) := assume h, by simp
@[simp]
theorem pow_one: n ^ (1: mynat) = n := rfl
@[simp]
theorem one_pow: ∀ {n : mynat}, (1: mynat) ^ n = 1
| zero := rfl
| (succ n) := by rw [pow_succ, one_mul, one_pow]
@[simp]
theorem pow_add (m n : mynat) : ∀ k, m ^ (n + k) = (m ^ n) * (m ^ k)
| zero := by simp
| (succ k) := by simp [pow_add]; ac_refl
@[simp]
theorem pow_mul (m n : mynat): ∀ k, (m ^ n) ^ k = m ^ (n * k)
| zero := by rw [zz, pow_zero, mul_zero, pow_zero]
| (succ k) := by rw [pow_succ, mul_succ, pow_add, pow_mul]
@[simp]
theorem mul_pow (m n : mynat): ∀ k : mynat, (m * n) ^ k = m ^ k * n ^ k
| zero := by simp
| (succ k) := by rw [pow_succ, pow_succ, pow_succ, mul_pow]; ac_refl
end mynat
end hidden
|
5cf7d2871eee581b1efb90e8eab7ce57524a4149
|
83c8119e3298c0bfc53fc195c41a6afb63d01513
|
/library/init/algebra/ordered_ring.lean
|
660ad26f0d3678aced94ca100acc85ab922ae93d
|
[
"Apache-2.0"
] |
permissive
|
anfelor/lean
|
584b91c4e87a6d95f7630c2a93fb082a87319ed0
|
31cfc2b6bf7d674f3d0f73848b842c9c9869c9f1
|
refs/heads/master
| 1,610,067,141,310
| 1,585,992,232,000
| 1,585,992,232,000
| 251,683,543
| 0
| 0
|
Apache-2.0
| 1,585,676,570,000
| 1,585,676,569,000
| null |
UTF-8
|
Lean
| false
| false
| 17,036
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
-/
prelude
import init.algebra.ordered_group init.algebra.ring
/- Make sure instances defined in this file have lower priority than the ones
defined for concrete structures -/
set_option default_priority 100
set_option old_structure_cmd true
universe u
class ordered_semiring (α : Type u)
extends semiring α, ordered_cancel_comm_monoid α :=
(mul_lt_mul_of_pos_left: ∀ a b c : α, a < b → 0 < c → c * a < c * b)
(mul_lt_mul_of_pos_right: ∀ a b c : α, a < b → 0 < c → a * c < b * c)
lemma ordered_semiring.mul_le_mul_of_nonneg_left {α} [s : ordered_semiring α] (a b c : α) (h₁ : a ≤ b) (h₂ : 0 ≤ c) : c * a ≤ c * b :=
begin
cases classical.em (b ≤ a), { simp [le_antisymm h h₁] },
cases classical.em (c ≤ 0), { simp [le_antisymm h_1 h₂] },
exact (le_not_le_of_lt (ordered_semiring.mul_lt_mul_of_pos_left a b c (lt_of_le_not_le h₁ h) (lt_of_le_not_le h₂ h_1))).left,
end
lemma ordered_semiring.mul_le_mul_of_nonneg_right {α} [s : ordered_semiring α] (a b c : α) (h₁ : a ≤ b) (h₂ : 0 ≤ c) : a * c ≤ b * c :=
begin
cases classical.em (b ≤ a), { simp [le_antisymm h h₁] },
cases classical.em (c ≤ 0), { simp [le_antisymm h_1 h₂] },
exact (le_not_le_of_lt (ordered_semiring.mul_lt_mul_of_pos_right a b c (lt_of_le_not_le h₁ h) (lt_of_le_not_le h₂ h_1))).left,
end
variable {α : Type u}
section ordered_semiring
variable [ordered_semiring α]
lemma mul_le_mul_of_nonneg_left {a b c : α} (h₁ : a ≤ b) (h₂ : 0 ≤ c) : c * a ≤ c * b :=
ordered_semiring.mul_le_mul_of_nonneg_left a b c h₁ h₂
lemma mul_le_mul_of_nonneg_right {a b c : α} (h₁ : a ≤ b) (h₂ : 0 ≤ c) : a * c ≤ b * c :=
ordered_semiring.mul_le_mul_of_nonneg_right a b c h₁ h₂
lemma mul_lt_mul_of_pos_left {a b c : α} (h₁ : a < b) (h₂ : 0 < c) : c * a < c * b :=
ordered_semiring.mul_lt_mul_of_pos_left a b c h₁ h₂
lemma mul_lt_mul_of_pos_right {a b c : α} (h₁ : a < b) (h₂ : 0 < c) : a * c < b * c :=
ordered_semiring.mul_lt_mul_of_pos_right a b c h₁ h₂
-- TODO: there are four variations, depending on which variables we assume to be nonneg
lemma mul_le_mul {a b c d : α} (hac : a ≤ c) (hbd : b ≤ d) (nn_b : 0 ≤ b) (nn_c : 0 ≤ c) : a * b ≤ c * d :=
calc
a * b ≤ c * b : mul_le_mul_of_nonneg_right hac nn_b
... ≤ c * d : mul_le_mul_of_nonneg_left hbd nn_c
lemma mul_nonneg {a b : α} (ha : a ≥ 0) (hb : b ≥ 0) : a * b ≥ 0 :=
have h : 0 * b ≤ a * b, from mul_le_mul_of_nonneg_right ha hb,
by rwa [zero_mul] at h
lemma mul_nonpos_of_nonneg_of_nonpos {a b : α} (ha : a ≥ 0) (hb : b ≤ 0) : a * b ≤ 0 :=
have h : a * b ≤ a * 0, from mul_le_mul_of_nonneg_left hb ha,
by rwa mul_zero at h
lemma mul_nonpos_of_nonpos_of_nonneg {a b : α} (ha : a ≤ 0) (hb : b ≥ 0) : a * b ≤ 0 :=
have h : a * b ≤ 0 * b, from mul_le_mul_of_nonneg_right ha hb,
by rwa zero_mul at h
lemma mul_lt_mul {a b c d : α} (hac : a < c) (hbd : b ≤ d) (pos_b : 0 < b) (nn_c : 0 ≤ c) : a * b < c * d :=
calc
a * b < c * b : mul_lt_mul_of_pos_right hac pos_b
... ≤ c * d : mul_le_mul_of_nonneg_left hbd nn_c
lemma mul_lt_mul' {a b c d : α} (h1 : a ≤ c) (h2 : b < d) (h3 : b ≥ 0) (h4 : c > 0) :
a * b < c * d :=
calc
a * b ≤ c * b : mul_le_mul_of_nonneg_right h1 h3
... < c * d : mul_lt_mul_of_pos_left h2 h4
lemma mul_pos {a b : α} (ha : a > 0) (hb : b > 0) : a * b > 0 :=
have h : 0 * b < a * b, from mul_lt_mul_of_pos_right ha hb,
by rwa zero_mul at h
lemma mul_neg_of_pos_of_neg {a b : α} (ha : a > 0) (hb : b < 0) : a * b < 0 :=
have h : a * b < a * 0, from mul_lt_mul_of_pos_left hb ha,
by rwa mul_zero at h
lemma mul_neg_of_neg_of_pos {a b : α} (ha : a < 0) (hb : b > 0) : a * b < 0 :=
have h : a * b < 0 * b, from mul_lt_mul_of_pos_right ha hb,
by rwa zero_mul at h
lemma mul_self_le_mul_self {a b : α} (h1 : 0 ≤ a) (h2 : a ≤ b) : a * a ≤ b * b :=
mul_le_mul h2 h2 h1 (le_trans h1 h2)
lemma mul_self_lt_mul_self {a b : α} (h1 : 0 ≤ a) (h2 : a < b) : a * a < b * b :=
mul_lt_mul' (le_of_lt h2) h2 h1 (lt_of_le_of_lt h1 h2)
end ordered_semiring
class linear_ordered_semiring (α : Type u) extends ordered_semiring α, linear_order α :=
(zero_lt_one : zero < one)
section linear_ordered_semiring
variable [linear_ordered_semiring α]
lemma zero_lt_one : 0 < (1:α) :=
linear_ordered_semiring.zero_lt_one α
lemma zero_le_one : 0 ≤ (1:α) :=
le_of_lt zero_lt_one
lemma two_pos : 0 < (2:α) := add_pos zero_lt_one zero_lt_one
lemma two_ne_zero : (2:α) ≠ 0 :=
ne.symm (ne_of_lt two_pos)
lemma two_gt_one : (2:α) > 1 :=
calc (2:α) = 1+1 : one_add_one_eq_two
... > 1+0 : add_lt_add_left zero_lt_one _
... = 1 : add_zero 1
lemma two_ge_one : (2:α) ≥ 1 :=
le_of_lt two_gt_one
lemma four_pos : (4:α) > 0 :=
add_pos two_pos two_pos
lemma lt_of_mul_lt_mul_left {a b c : α} (h : c * a < c * b) (hc : c ≥ 0) : a < b :=
lt_of_not_ge
(assume h1 : b ≤ a,
have h2 : c * b ≤ c * a, from mul_le_mul_of_nonneg_left h1 hc,
not_lt_of_ge h2 h)
lemma lt_of_mul_lt_mul_right {a b c : α} (h : a * c < b * c) (hc : c ≥ 0) : a < b :=
lt_of_not_ge
(assume h1 : b ≤ a,
have h2 : b * c ≤ a * c, from mul_le_mul_of_nonneg_right h1 hc,
not_lt_of_ge h2 h)
lemma le_of_mul_le_mul_left {a b c : α} (h : c * a ≤ c * b) (hc : c > 0) : a ≤ b :=
le_of_not_gt
(assume h1 : b < a,
have h2 : c * b < c * a, from mul_lt_mul_of_pos_left h1 hc,
not_le_of_gt h2 h)
lemma le_of_mul_le_mul_right {a b c : α} (h : a * c ≤ b * c) (hc : c > 0) : a ≤ b :=
le_of_not_gt
(assume h1 : b < a,
have h2 : b * c < a * c, from mul_lt_mul_of_pos_right h1 hc,
not_le_of_gt h2 h)
lemma pos_of_mul_pos_left {a b : α} (h : 0 < a * b) (h1 : 0 ≤ a) : 0 < b :=
lt_of_not_ge
(assume h2 : b ≤ 0,
have h3 : a * b ≤ 0, from mul_nonpos_of_nonneg_of_nonpos h1 h2,
not_lt_of_ge h3 h)
lemma pos_of_mul_pos_right {a b : α} (h : 0 < a * b) (h1 : 0 ≤ b) : 0 < a :=
lt_of_not_ge
(assume h2 : a ≤ 0,
have h3 : a * b ≤ 0, from mul_nonpos_of_nonpos_of_nonneg h2 h1,
not_lt_of_ge h3 h)
lemma nonneg_of_mul_nonneg_left {a b : α} (h : 0 ≤ a * b) (h1 : 0 < a) : 0 ≤ b :=
le_of_not_gt
(assume h2 : b < 0,
not_le_of_gt (mul_neg_of_pos_of_neg h1 h2) h)
lemma nonneg_of_mul_nonneg_right {a b : α} (h : 0 ≤ a * b) (h1 : 0 < b) : 0 ≤ a :=
le_of_not_gt
(assume h2 : a < 0,
not_le_of_gt (mul_neg_of_neg_of_pos h2 h1) h)
lemma neg_of_mul_neg_left {a b : α} (h : a * b < 0) (h1 : 0 ≤ a) : b < 0 :=
lt_of_not_ge
(assume h2 : b ≥ 0,
not_lt_of_ge (mul_nonneg h1 h2) h)
lemma neg_of_mul_neg_right {a b : α} (h : a * b < 0) (h1 : 0 ≤ b) : a < 0 :=
lt_of_not_ge
(assume h2 : a ≥ 0,
not_lt_of_ge (mul_nonneg h2 h1) h)
lemma nonpos_of_mul_nonpos_left {a b : α} (h : a * b ≤ 0) (h1 : 0 < a) : b ≤ 0 :=
le_of_not_gt
(assume h2 : b > 0,
not_le_of_gt (mul_pos h1 h2) h)
lemma nonpos_of_mul_nonpos_right {a b : α} (h : a * b ≤ 0) (h1 : 0 < b) : a ≤ 0 :=
le_of_not_gt
(assume h2 : a > 0,
not_le_of_gt (mul_pos h2 h1) h)
end linear_ordered_semiring
class decidable_linear_ordered_semiring (α : Type u) extends linear_ordered_semiring α, decidable_linear_order α
class ordered_ring (α : Type u) extends ring α, ordered_comm_group α, zero_ne_one_class α :=
(mul_pos : ∀ a b : α, 0 < a → 0 < b → 0 < a * b)
lemma ordered_ring.mul_nonneg {α} [s : ordered_ring α] (a b : α) (h₁ : 0 ≤ a) (h₂ : 0 ≤ b) : 0 ≤ a * b :=
begin
cases classical.em (a ≤ 0), { simp [le_antisymm h h₁] },
cases classical.em (b ≤ 0), { simp [le_antisymm h_1 h₂] },
exact (le_not_le_of_lt (ordered_ring.mul_pos a b (lt_of_le_not_le h₁ h) (lt_of_le_not_le h₂ h_1))).left,
end
lemma ordered_ring.mul_le_mul_of_nonneg_left [s : ordered_ring α] {a b c : α}
(h₁ : a ≤ b) (h₂ : 0 ≤ c) : c * a ≤ c * b :=
have 0 ≤ b - a, from sub_nonneg_of_le h₁,
have 0 ≤ c * (b - a), from ordered_ring.mul_nonneg c (b - a) h₂ this,
begin
rw mul_sub_left_distrib at this,
apply le_of_sub_nonneg this
end
lemma ordered_ring.mul_le_mul_of_nonneg_right [s : ordered_ring α] {a b c : α}
(h₁ : a ≤ b) (h₂ : 0 ≤ c) : a * c ≤ b * c :=
have 0 ≤ b - a, from sub_nonneg_of_le h₁,
have 0 ≤ (b - a) * c, from ordered_ring.mul_nonneg (b - a) c this h₂,
begin
rw mul_sub_right_distrib at this,
apply le_of_sub_nonneg this
end
lemma ordered_ring.mul_lt_mul_of_pos_left [s : ordered_ring α] {a b c : α}
(h₁ : a < b) (h₂ : 0 < c) : c * a < c * b :=
have 0 < b - a, from sub_pos_of_lt h₁,
have 0 < c * (b - a), from ordered_ring.mul_pos c (b - a) h₂ this,
begin
rw mul_sub_left_distrib at this,
apply lt_of_sub_pos this
end
lemma ordered_ring.mul_lt_mul_of_pos_right [s : ordered_ring α] {a b c : α}
(h₁ : a < b) (h₂ : 0 < c) : a * c < b * c :=
have 0 < b - a, from sub_pos_of_lt h₁,
have 0 < (b - a) * c, from ordered_ring.mul_pos (b - a) c this h₂,
begin
rw mul_sub_right_distrib at this,
apply lt_of_sub_pos this
end
instance ordered_ring.to_ordered_semiring [s : ordered_ring α] : ordered_semiring α :=
{ mul_zero := mul_zero,
zero_mul := zero_mul,
add_left_cancel := @add_left_cancel α _,
add_right_cancel := @add_right_cancel α _,
le_of_add_le_add_left := @le_of_add_le_add_left α _,
mul_lt_mul_of_pos_left := @ordered_ring.mul_lt_mul_of_pos_left α _,
mul_lt_mul_of_pos_right := @ordered_ring.mul_lt_mul_of_pos_right α _,
..s }
section ordered_ring
variable [ordered_ring α]
lemma mul_le_mul_of_nonpos_left {a b c : α} (h : b ≤ a) (hc : c ≤ 0) : c * a ≤ c * b :=
have -c ≥ 0, from neg_nonneg_of_nonpos hc,
have -c * b ≤ -c * a, from mul_le_mul_of_nonneg_left h this,
have -(c * b) ≤ -(c * a), by rwa [← neg_mul_eq_neg_mul, ← neg_mul_eq_neg_mul] at this,
le_of_neg_le_neg this
lemma mul_le_mul_of_nonpos_right {a b c : α} (h : b ≤ a) (hc : c ≤ 0) : a * c ≤ b * c :=
have -c ≥ 0, from neg_nonneg_of_nonpos hc,
have b * -c ≤ a * -c, from mul_le_mul_of_nonneg_right h this,
have -(b * c) ≤ -(a * c), by rwa [← neg_mul_eq_mul_neg, ← neg_mul_eq_mul_neg] at this,
le_of_neg_le_neg this
lemma mul_nonneg_of_nonpos_of_nonpos {a b : α} (ha : a ≤ 0) (hb : b ≤ 0) : 0 ≤ a * b :=
have 0 * b ≤ a * b, from mul_le_mul_of_nonpos_right ha hb,
by rwa zero_mul at this
lemma mul_lt_mul_of_neg_left {a b c : α} (h : b < a) (hc : c < 0) : c * a < c * b :=
have -c > 0, from neg_pos_of_neg hc,
have -c * b < -c * a, from mul_lt_mul_of_pos_left h this,
have -(c * b) < -(c * a), by rwa [← neg_mul_eq_neg_mul, ← neg_mul_eq_neg_mul] at this,
lt_of_neg_lt_neg this
lemma mul_lt_mul_of_neg_right {a b c : α} (h : b < a) (hc : c < 0) : a * c < b * c :=
have -c > 0, from neg_pos_of_neg hc,
have b * -c < a * -c, from mul_lt_mul_of_pos_right h this,
have -(b * c) < -(a * c), by rwa [← neg_mul_eq_mul_neg, ← neg_mul_eq_mul_neg] at this,
lt_of_neg_lt_neg this
lemma mul_pos_of_neg_of_neg {a b : α} (ha : a < 0) (hb : b < 0) : 0 < a * b :=
have 0 * b < a * b, from mul_lt_mul_of_neg_right ha hb,
by rwa zero_mul at this
end ordered_ring
class linear_ordered_ring (α : Type u) extends ordered_ring α, linear_order α :=
(zero_lt_one : zero < one)
instance linear_ordered_ring.to_linear_ordered_semiring [s : linear_ordered_ring α] : linear_ordered_semiring α :=
{ mul_zero := mul_zero,
zero_mul := zero_mul,
add_left_cancel := @add_left_cancel α _,
add_right_cancel := @add_right_cancel α _,
le_of_add_le_add_left := @le_of_add_le_add_left α _,
mul_lt_mul_of_pos_left := @mul_lt_mul_of_pos_left α _,
mul_lt_mul_of_pos_right := @mul_lt_mul_of_pos_right α _,
le_total := linear_ordered_ring.le_total,
..s }
section linear_ordered_ring
variable [linear_ordered_ring α]
lemma mul_self_nonneg (a : α) : a * a ≥ 0 :=
or.elim (le_total 0 a)
(assume h : a ≥ 0, mul_nonneg h h)
(assume h : a ≤ 0, mul_nonneg_of_nonpos_of_nonpos h h)
lemma pos_and_pos_or_neg_and_neg_of_mul_pos {a b : α} (hab : a * b > 0) :
(a > 0 ∧ b > 0) ∨ (a < 0 ∧ b < 0) :=
match lt_trichotomy 0 a with
| or.inl hlt₁ :=
match lt_trichotomy 0 b with
| or.inl hlt₂ := or.inl ⟨hlt₁, hlt₂⟩
| or.inr (or.inl heq₂) := begin rw [← heq₂, mul_zero] at hab, exact absurd hab (lt_irrefl _) end
| or.inr (or.inr hgt₂) := absurd hab (lt_asymm (mul_neg_of_pos_of_neg hlt₁ hgt₂))
end
| or.inr (or.inl heq₁) := begin rw [← heq₁, zero_mul] at hab, exact absurd hab (lt_irrefl _) end
| or.inr (or.inr hgt₁) :=
match lt_trichotomy 0 b with
| or.inl hlt₂ := absurd hab (lt_asymm (mul_neg_of_neg_of_pos hgt₁ hlt₂))
| or.inr (or.inl heq₂) := begin rw [← heq₂, mul_zero] at hab, exact absurd hab (lt_irrefl _) end
| or.inr (or.inr hgt₂) := or.inr ⟨hgt₁, hgt₂⟩
end
end
lemma gt_of_mul_lt_mul_neg_left {a b c : α} (h : c * a < c * b) (hc : c ≤ 0) : a > b :=
have nhc : -c ≥ 0, from neg_nonneg_of_nonpos hc,
have h2 : -(c * b) < -(c * a), from neg_lt_neg h,
have h3 : (-c) * b < (-c) * a, from calc
(-c) * b = - (c * b) : by rewrite neg_mul_eq_neg_mul
... < -(c * a) : h2
... = (-c) * a : by rewrite neg_mul_eq_neg_mul,
lt_of_mul_lt_mul_left h3 nhc
lemma zero_gt_neg_one : -1 < (0:α) :=
begin
have this := neg_lt_neg (@zero_lt_one α _),
rwa neg_zero at this
end
lemma le_of_mul_le_of_ge_one {a b c : α} (h : a * c ≤ b) (hb : b ≥ 0) (hc : c ≥ 1) : a ≤ b :=
have h' : a * c ≤ b * c, from calc
a * c ≤ b : h
... = b * 1 : by rewrite mul_one
... ≤ b * c : mul_le_mul_of_nonneg_left hc hb,
le_of_mul_le_mul_right h' (lt_of_lt_of_le zero_lt_one hc)
lemma nonneg_le_nonneg_of_squares_le {a b : α} (hb : b ≥ 0) (h : a * a ≤ b * b) : a ≤ b :=
le_of_not_gt (λhab, not_le_of_gt (mul_self_lt_mul_self hb hab) h)
lemma mul_self_le_mul_self_iff {a b : α} (h1 : 0 ≤ a) (h2 : 0 ≤ b) : a ≤ b ↔ a * a ≤ b * b :=
⟨mul_self_le_mul_self h1, nonneg_le_nonneg_of_squares_le h2⟩
lemma mul_self_lt_mul_self_iff {a b : α} (h1 : 0 ≤ a) (h2 : 0 ≤ b) : a < b ↔ a * a < b * b :=
iff.trans (lt_iff_not_ge _ _) $ iff.trans (not_iff_not_of_iff $ mul_self_le_mul_self_iff h2 h1) $
iff.symm (lt_iff_not_ge _ _)
lemma linear_ordered_ring.eq_zero_or_eq_zero_of_mul_eq_zero
{a b : α} (h : a * b = 0) : a = 0 ∨ b = 0 :=
match lt_trichotomy 0 a with
| or.inl hlt₁ :=
match lt_trichotomy 0 b with
| or.inl hlt₂ :=
have 0 < a * b, from mul_pos hlt₁ hlt₂,
begin rw h at this, exact absurd this (lt_irrefl _) end
| or.inr (or.inl heq₂) := or.inr heq₂.symm
| or.inr (or.inr hgt₂) :=
have 0 > a * b, from mul_neg_of_pos_of_neg hlt₁ hgt₂,
begin rw h at this, exact absurd this (lt_irrefl _) end
end
| or.inr (or.inl heq₁) := or.inl heq₁.symm
| or.inr (or.inr hgt₁) :=
match lt_trichotomy 0 b with
| or.inl hlt₂ :=
have 0 > a * b, from mul_neg_of_neg_of_pos hgt₁ hlt₂,
begin rw h at this, exact absurd this (lt_irrefl _) end
| or.inr (or.inl heq₂) := or.inr heq₂.symm
| or.inr (or.inr hgt₂) :=
have 0 < a * b, from mul_pos_of_neg_of_neg hgt₁ hgt₂,
begin rw h at this, exact absurd this (lt_irrefl _) end
end
end
end linear_ordered_ring
class linear_ordered_comm_ring (α : Type u) extends linear_ordered_ring α, comm_monoid α
instance linear_ordered_comm_ring.to_integral_domain [s: linear_ordered_comm_ring α] : integral_domain α :=
{ eq_zero_or_eq_zero_of_mul_eq_zero := @linear_ordered_ring.eq_zero_or_eq_zero_of_mul_eq_zero α _,
..s }
class decidable_linear_ordered_comm_ring (α : Type u) extends linear_ordered_comm_ring α,
decidable_linear_ordered_comm_group α
instance decidable_linear_ordered_comm_ring.to_decidable_linear_ordered_semiring [d : decidable_linear_ordered_comm_ring α] :
decidable_linear_ordered_semiring α :=
let s : linear_ordered_semiring α := @linear_ordered_ring.to_linear_ordered_semiring α _ in
{ zero_mul := @linear_ordered_semiring.zero_mul α s,
mul_zero := @linear_ordered_semiring.mul_zero α s,
add_left_cancel := @linear_ordered_semiring.add_left_cancel α s,
add_right_cancel := @linear_ordered_semiring.add_right_cancel α s,
le_of_add_le_add_left := @linear_ordered_semiring.le_of_add_le_add_left α s,
mul_lt_mul_of_pos_left := @linear_ordered_semiring.mul_lt_mul_of_pos_left α s,
mul_lt_mul_of_pos_right := @linear_ordered_semiring.mul_lt_mul_of_pos_right α s,
..d }
|
7636a3f2a712cf00f903b319255af279bf2a0f5c
|
6b2a480f27775cba4f3ae191b1c1387a29de586e
|
/group_rep_2/orthoganality_of_character/orthogonality.lean
|
fdd767ba3ea6c38af7f5bf03f88f7ca4868246e6
|
[] |
no_license
|
Or7ando/group_representation
|
a681de2e19d1930a1e1be573d6735a2f0b8356cb
|
9b576984f17764ebf26c8caa2a542d248f1b50d2
|
refs/heads/master
| 1,662,413,107,324
| 1,590,302,389,000
| 1,590,302,389,000
| 258,130,829
| 0
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 7,150
|
lean
|
import schur_theorem.schur
import data.complex.basic
import Tools.tools
import Tools.eigen_value
import linear_algebra.caracteristic_polynomial
import basic_definitions.matrix_representation
import basic_definitions.equiv
import permutation_representation.regular_representation
import Reynold_operator.reynold_scalar_product
set_option pp.generalized_field_notation false
open_locale big_operators
universes u v w w'
open car_pol
variables {G : Type u} [group G]
{X : Type v} [fintype X][decidable_eq X]
{ρ : group_representation G ℂ (X → ℂ)}
open Schur₂ morphism.from_irreductible equiv_morphism shur₁_comm_ring stability pequiv
open Reynold
/--
-/
theorem eigen_values_exist (hyp : 0 < fintype.card X ) : ∀ f : (X → ℂ ) →ₗ[ℂ] (X → ℂ), ∃ t : ℂ, ∃ φ : (X → ℂ ),
(φ ≠ 0) ∧ (f φ + t • φ =0) := begin
intro f,
rcases eigen_values_exist_mat hyp (linear_map.to_matrix f) with ⟨w, hyp_w⟩ ,
let t := exist_ker_not_trivial (f+ w • 1),
use w, apply t,
have : linear_map.to_matrix f + w • 1 = linear_map.to_matrix (f + w • 1),
rw to_matrix_add, rw to_matrix_smul, rw to_matrix_one,
rw ← this, assumption,
end
theorem Schur_on (hyp : 0 < fintype.card X ) [Irreductible ρ] (f : ρ ⟶ᵣ ρ) :
∃ t : ℂ, f + t • 1 = 0 :=
begin
rcases eigen_values_exist hyp f.ℓ with ⟨t,⟨ φ, hyp_t⟩ ⟩,
use t,
apply morphism.ext, apply linear_map.ext,
exact Schur₂ f t φ hyp_t,
end
/--
Universe Y = Univrse X ? here i put the dsame for matrix !!!
-/
variables {Y : Type v} [fintype Y][decidable_eq Y] {ρ' : group_representation G ℂ (Y → ℂ)}
variables [fintype G][decidable_eq G]
open matrix linear_map character
open_locale matrix
/--
Le theorem vit sans hypothèse
-/
theorem 𝒪ℛ𝒯ℋ𝒪 (F : not_isomorphic ρ ρ')[Irreductible ρ ][Irreductible ρ'] :
scalar_product G ℂ (χ ρ ) (χ ρ' ) = 0 :=
begin
rw interpretation_produit_scalaire ρ ρ',
apply finset.sum_eq_zero,
intros,
rw Reynold_is_zero F, exact rfl,
end
open Ring
lemma proof_strategy₃ (a b: ℂ ) (hyp : b ≠ 0) (M N : matrix X X ℂ ): a • M = b • N → N = (b ⁻¹ * a) • M :=
begin
intros, rw mul_smul_mat, rw a_1,
erw ← mul_smul_mat, rw inv_mul_cancel, rw one_smul, exact hyp,
end
/--
Faire mieux !!! hyp et hyp'
-/
lemma Schur_grallr [Irreductible ρ] (f : ρ ⟶ᵣ ρ ) (hyp' : 0 < fintype.card X ) (hyp : fintype.card X ≠ 0) :
(to_matrix f.ℓ) = ((fintype.card X : ℂ)⁻¹ * (matrix.trace X ℂ ℂ (linear_map.to_matrix f.ℓ))) • 1
:= begin
apply proof_strategy₃,
refine function.injective.ne (nat.cast_injective) hyp,
rcases Schur_on hyp' f with ⟨ζ, hyp⟩,
have : f.ℓ + ζ • (1 : ρ ⟶ᵣ ρ).ℓ = 0,
erw ← morphism_module.coe_smul,
rw ← morphism_module.add_coe, rw hyp, exact rfl,
rw homo_eq_diag f.ℓ ζ this, rw map_smul, rw trace_one,
erw ← mul_smul_mat,
rw mul_comm,
exact rfl,
end
lemma proof_strategy₂ (a : ℂ ) { c d : ℂ } : c = d → a * c = a * d := congr_arg (λ x, a * x)
lemma Reynold_E (ρ : group_representation G ℂ (X → ℂ)) [Irreductible ρ ](hyp' : 0 < fintype.card X ) (hyp : fintype.card X ≠ 0)(x : X × X) :
to_matrix ((Re ρ ρ) (to_lin (ℰ x.snd x.fst))).ℓ x.snd x.fst = ↑(fintype.card G) * (↑(fintype.card X))⁻¹ * (trace X ℂ ℂ) (ℰ x.snd x.fst) :=
begin
rw Schur_grallr ((Re ρ ρ) (to_lin (ℰ x.snd x.fst)))hyp' hyp,
-- clean up
rw [ smul_val, one_val, mul_assoc, mul_comm ↑(fintype.card G), mul_assoc],
apply proof_strategy₂ (↑(fintype.card X))⁻¹,
rw to_matrix_Reynold_E,
rw trace_E, split_ifs,
-- (trace ∑ = ∑ trace) and (trace conjugate = trace) and (trace ℰ x x = 1)
{rw h,rw trace_Reynold_E,},
-- trivial
{repeat {rw mul_zero}},
end
lemma 𝒪𝓇𝓉𝒽ℴ (ρ : group_representation G ℂ (X → ℂ )) [Irreductible ρ ] (hyp' : 0 < fintype.card X )(hyp : fintype.card X ≠ 0) :
scalar_product G ℂ (χ ρ ) (χ ρ ) = (fintype.card G : ℂ ) :=
begin
rw interpretation_produit_scalaire,
conv_lhs{
apply_congr, skip,
rw Reynold_E ρ hyp' hyp,
rw trace_E,
},
rw ← finset.mul_sum,
-- ∑ x : X × X, ite (x.fst = x.snd) 1 0 = card X ... the diagonal equivalence
erw sum_diagonal_one_eq_cardinal X ℂ,
rw mul_assoc,rw inv_mul_cancel,rw mul_one,
exact function.injective.ne (nat.cast_injective) hyp,
end
theorem scalar_product_equiv (ρ : group_representation G ℂ (X → ℂ )) [Irreductible ρ ] (ρ' : group_representation G ℂ (Y → ℂ ))
(iso : is_isomorphic ρ ρ' ) [Irreductible ρ ] (hyp' : 0 < fintype.card X )
(hyp : fintype.card X ≠ 0) : scalar_product G ℂ (χ ρ ) (χ ρ' ) = (fintype.card G : ℂ ) := begin
rw ← equivalence.carac_eq' ρ ρ' iso,
erw 𝒪𝓇𝓉𝒽ℴ ρ,exact hyp', exact hyp,
end
noncomputable instance : decidable (is_isomorphic ρ ρ' ) := begin
exact classical.dec (is_isomorphic ρ ρ'),
end
theorem scalar_product_ite (ρ : group_representation G ℂ (X → ℂ )) [Irreductible ρ ] (ρ' : group_representation G ℂ (Y → ℂ ))
[Irreductible ρ' ](hyp' : 0 < fintype.card X )
(hyp : fintype.card X ≠ 0) : scalar_product G ℂ (χ ρ ) (χ ρ' ) = if (is_isomorphic ρ ρ') then (fintype.card G : ℂ ) else 0
:= begin
split_ifs, apply scalar_product_equiv (by assumption), exact h, exact hyp', exact hyp,
let := non_is_isomorphic_eq_not_isomorphic ρ ρ' h,
exact 𝒪ℛ𝒯ℋ𝒪 this ,
end
example (ρ : group_representation G ℂ (X → ℂ )) [Irreductible ρ ]
(hyp : fintype.card X ≠ 0) (hyp' : 0 < fintype.card X ) :
(↑(fintype.card G))⁻¹ * ∑ (t : G), χ ρ t * χ ρ t⁻¹ = 1 :=
begin
let g := 𝒪𝓇𝓉𝒽ℴ ρ hyp' hyp,
rw scalar_product_ext at g,
rw g, apply inv_mul_cancel,rw coe_to_lift, simp,
intro,
let r := finset.card_eq_zero.mp a, have : ( 1 : G) ∈ finset.univ, --- grrrrrrrrrr
refine finset.mem_univ 1,
finish,
end
example (F : not_isomorphic ρ ρ')[Irreductible ρ ][Irreductible ρ'] :
(↑(fintype.card G))⁻¹ * ∑ (t : G), χ ρ t * χ ρ' t⁻¹ = 0 :=
begin
let g := 𝒪ℛ𝒯ℋ𝒪 F,
rw scalar_product_ext at g,
rw g, erw mul_zero,
end
#check Regular.Regular_representation
/--
OKay for other ring than `ℂ` !
-/
theorem scalar_product_with_regular (ρ : group_representation G ℂ (X → ℂ )) :
scalar_product G ℂ (χ ρ) (χ (Regular.Regular_representation G ℂ )) = (χ ρ 1) * (fintype.card G) :=
begin
rw scalar_product_ext,
conv_lhs{
apply_congr,skip,
rw Regular.character_of_regular_representation, rw mul_ite, rw mul_zero,
},
rw finset.sum_ite,
rw finset.sum_const_zero, rw add_zero,simp,rw finset.sum_filter,
rw finset.sum_ite_eq',split_ifs,
rw χ_one,
let t := (finset.mem_univ (1 : G)),trivial,
end
|
bfb2cd7a419575daeaf9ba4717479dec26b7770b
|
5df84495ec6c281df6d26411cc20aac5c941e745
|
/src/formal_ml/complete_lattice.lean
|
1aba515f672676ca6ebb41544303244efa9ed481
|
[
"Apache-2.0"
] |
permissive
|
eric-wieser/formal-ml
|
e278df5a8df78aa3947bc8376650419e1b2b0a14
|
630011d19fdd9539c8d6493a69fe70af5d193590
|
refs/heads/master
| 1,681,491,589,256
| 1,612,642,743,000
| 1,612,642,743,000
| 360,114,136
| 0
| 0
|
Apache-2.0
| 1,618,998,189,000
| 1,618,998,188,000
| null |
UTF-8
|
Lean
| false
| false
| 4,098
|
lean
|
/-
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-/
import order.complete_lattice
import order.basic
lemma Sup_image_le {α β:Type*} [complete_lattice β] {f:α → β} {S:set α} {x:β}:
(∀ s∈ S, f s ≤ x) → (Sup (f '' S)≤ x) :=
begin
intro A1,
apply Sup_le,
intros b A2,
cases A2 with a A2,
rw ← A2.right,
apply (A1 a A2.left),
end
lemma le_Sup_image {α β:Type*} [complete_lattice β] {f:α → β} {S:set α} {a:α}:
(a∈ S) →
(f a) ≤ Sup (f '' S) :=
begin
intro A1,
apply le_Sup,
simp,
apply exists.intro a,
split,
apply A1,
refl,
end
lemma Sup_Sup_image_image_le {α β γ:Type*} [complete_lattice γ] {f:α → β → γ} {S:set α}
{T:set β} {x:γ}:
(∀ a∈ S, ∀ b∈T, f a b ≤ x) →
(Sup ((λ a:α, Sup ((f a) '' T)) '' S) ≤ x) :=
begin
intro A1,
apply Sup_image_le,
intros a A2,
apply Sup_image_le,
intros b A3,
apply A1 a A2 b A3,
end
lemma le_Sup_Sup_image_image {α β γ:Type*} [complete_lattice γ] {f:α → β → γ}
{S:set α} {T:set β} {a:α} {b:β}:
(a∈ S) → (b∈ T) →
(f a b) ≤ (Sup ((λ a:α, Sup ((f a) '' T)) '' S)) :=
begin
intros A1 A2,
apply le_trans,
apply @le_Sup_image β γ _ (f a) T b A2,
apply @le_Sup_image α γ _ ((λ a:α, Sup ((f a) '' T))) S a A1,
end
lemma Sup_le_Sup_of_monotone {α β:Type*} [complete_lattice α]
[complete_lattice β]
{f:α → β} {s:set α}:
(monotone f) →
Sup (f '' s) ≤ f (Sup s) :=
begin
intro A1,
apply Sup_le,
intros b A2,
simp at A2,
cases A2 with x A2,
cases A2 with A2 A3,
subst b,
apply A1,
apply le_Sup,
apply A2,
end
lemma supr_le_supr_of_monotone {α β γ:Type*} [complete_lattice α]
[complete_lattice β]
{f:α → β} {g:γ → α}:
(monotone f) →
supr (f ∘ g) ≤ f (supr g) :=
begin
intro A1,
apply Sup_le,
intros b A2,
simp at A2,
cases A2 with x A2,
subst b,
apply A1,
apply le_Sup,
simp,
end
lemma infi_le_infi_of_monotone {α β γ:Type*} [complete_lattice α]
[complete_lattice β]
{f:α → β} {g:γ → α}:
(monotone f) →
f (infi g) ≤ infi (f ∘ g) :=
begin
intro A1,
apply le_infi,
intros b,
apply A1,
apply infi_le,
end
lemma Inf_le_Inf_of_monotone {α β:Type*} [complete_lattice α]
[complete_lattice β]
{f:α → β} {s:set α}:
(monotone f) →
f (Inf s) ≤ Inf (f '' s) :=
begin
intro A1,
apply le_Inf,
intros b A2,
simp at A2,
cases A2 with b' A2,
cases A2 with A2 A3,
subst b,
apply A1,
apply Inf_le,
apply A2,
end
lemma supr_prop_def {α:Type*} [complete_lattice α]
{v:α} {P:Prop} (H:P):(⨆ (H2:P), v) = v :=
begin
apply le_antisymm,
{
apply supr_le,
intro A1,
apply le_refl v,
},
{
apply @le_supr α P _ (λ H2:P, v),
apply H,
},
end
lemma infi_prop_def {α:Type*} [complete_lattice α]
{v:α} {P:Prop} (H:P):(⨅ (H2:P), v) = v :=
begin
apply le_antisymm,
{
apply infi_le,
apply H,
},
{
apply @le_infi,
intro A1,
apply le_refl v,
},
end
lemma supr_prop_false {α:Type*} [complete_lattice α]
{v:α} {P:Prop} (H:¬P):(⨆ (H2:P), v) = ⊥ :=
begin
apply le_antisymm,
{
apply supr_le,
intro A1,
exfalso,
apply H,
apply A1,
},
{
apply bot_le,
},
end
lemma infi_prop_false {α:Type*} [complete_lattice α]
{v:α} {P:Prop} (H:¬P):(⨅ (H2:P), v) = ⊤ :=
begin
apply le_antisymm,
{
apply le_top,
},
{
apply le_infi,
intro A1,
exfalso,
apply H,
apply A1,
},
end
|
84e6738741ae9e345a756ecd3a11fa6f8543471b
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/tests/lean/run/simp_all_contextual.lean
|
10ba18b9b2fe90d6b5f9dd10a02f17af9951713c
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 73
|
lean
|
theorem ex (h : a = 0) (p : Nat → Prop) : p a → p 0 := by
simp_all
|
52f30a795d2d2fd74962a624efb6c17550bca361
|
367134ba5a65885e863bdc4507601606690974c1
|
/src/topology/algebra/group.lean
|
7eedd9e06e15118a3241575352d0d4c11f8f78d9
|
[
"Apache-2.0"
] |
permissive
|
kodyvajjha/mathlib
|
9bead00e90f68269a313f45f5561766cfd8d5cad
|
b98af5dd79e13a38d84438b850a2e8858ec21284
|
refs/heads/master
| 1,624,350,366,310
| 1,615,563,062,000
| 1,615,563,062,000
| 162,666,963
| 0
| 0
|
Apache-2.0
| 1,545,367,651,000
| 1,545,367,651,000
| null |
UTF-8
|
Lean
| false
| false
| 26,466
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Patrick Massot
-/
import order.filter.pointwise
import group_theory.quotient_group
import topology.algebra.monoid
import topology.homeomorph
/-!
# Theory of topological groups
This file defines the following typeclasses:
* `topological_group`, `topological_add_group`: multiplicative and additive topological groups,
i.e., groups with continuous `(*)` and `(⁻¹)` / `(+)` and `(-)`;
* `has_continuous_sub G` means that `G` has a continuous subtraction operation.
There is an instance deducing `has_continuous_sub` from `topological_group` but we use a separate
typeclass because, e.g., `ℕ` and `ℝ≥0` have continuous subtraction but are not additive groups.
We also define `homeomorph` versions of several `equiv`s: `homeomorph.mul_left`,
`homeomorph.mul_right`, `homeomorph.inv`, and prove a few facts about neighbourhood filters in
groups.
## Tags
topological space, group, topological group
-/
open classical set filter topological_space function
open_locale classical topological_space filter
universes u v w x
variables {α : Type u} {β : Type v} {G : Type w} {H : Type x}
section continuous_mul_group
/-!
### Groups with continuous multiplication
In this section we prove a few statements about groups with continuous `(*)`.
-/
variables [topological_space G] [group G] [has_continuous_mul G]
/-- Multiplication from the left in a topological group as a homeomorphism. -/
@[to_additive "Addition from the left in a topological additive group as a homeomorphism."]
protected def homeomorph.mul_left (a : G) : G ≃ₜ G :=
{ continuous_to_fun := continuous_const.mul continuous_id,
continuous_inv_fun := continuous_const.mul continuous_id,
.. equiv.mul_left a }
@[simp, to_additive]
lemma homeomorph.coe_mul_left (a : G) : ⇑(homeomorph.mul_left a) = (*) a := rfl
@[to_additive]
lemma homeomorph.mul_left_symm (a : G) : (homeomorph.mul_left a).symm = homeomorph.mul_left a⁻¹ :=
by { ext, refl }
@[to_additive]
lemma is_open_map_mul_left (a : G) : is_open_map (λ x, a * x) :=
(homeomorph.mul_left a).is_open_map
@[to_additive]
lemma is_closed_map_mul_left (a : G) : is_closed_map (λ x, a * x) :=
(homeomorph.mul_left a).is_closed_map
/-- Multiplication from the right in a topological group as a homeomorphism. -/
@[to_additive "Addition from the right in a topological additive group as a homeomorphism."]
protected def homeomorph.mul_right (a : G) :
G ≃ₜ G :=
{ continuous_to_fun := continuous_id.mul continuous_const,
continuous_inv_fun := continuous_id.mul continuous_const,
.. equiv.mul_right a }
@[to_additive]
lemma is_open_map_mul_right (a : G) : is_open_map (λ x, x * a) :=
(homeomorph.mul_right a).is_open_map
@[to_additive]
lemma is_closed_map_mul_right (a : G) : is_closed_map (λ x, x * a) :=
(homeomorph.mul_right a).is_closed_map
@[to_additive]
lemma is_open_map_div_right (a : G) : is_open_map (λ x, x / a) :=
by simpa only [div_eq_mul_inv] using is_open_map_mul_right (a⁻¹)
@[to_additive]
lemma is_closed_map_div_right (a : G) : is_closed_map (λ x, x / a) :=
by simpa only [div_eq_mul_inv] using is_closed_map_mul_right (a⁻¹)
end continuous_mul_group
section topological_group
/-!
### Topological groups
A topological group is a group in which the multiplication and inversion operations are
continuous. Topological additive groups are defined in the same way. Equivalently, we can require
that the division operation `λ x y, x * y⁻¹` (resp., subtraction) is continuous.
-/
/-- A topological (additive) group is a group in which the addition and negation operations are
continuous. -/
class topological_add_group (G : Type u) [topological_space G] [add_group G]
extends has_continuous_add G : Prop :=
(continuous_neg : continuous (λa:G, -a))
/-- A topological group is a group in which the multiplication and inversion operations are
continuous. -/
@[to_additive]
class topological_group (G : Type*) [topological_space G] [group G]
extends has_continuous_mul G : Prop :=
(continuous_inv : continuous (has_inv.inv : G → G))
variables [topological_space G] [group G] [topological_group G]
export topological_group (continuous_inv)
export topological_add_group (continuous_neg)
@[to_additive]
lemma continuous_on_inv {s : set G} : continuous_on has_inv.inv s :=
continuous_inv.continuous_on
@[to_additive]
lemma continuous_within_at_inv {s : set G} {x : G} : continuous_within_at has_inv.inv s x :=
continuous_inv.continuous_within_at
@[to_additive]
lemma continuous_at_inv {x : G} : continuous_at has_inv.inv x :=
continuous_inv.continuous_at
@[to_additive]
lemma tendsto_inv (a : G) : tendsto has_inv.inv (𝓝 a) (𝓝 (a⁻¹)) :=
continuous_at_inv
/-- If a function converges to a value in a multiplicative topological group, then its inverse
converges to the inverse of this value. For the version in normed fields assuming additionally
that the limit is nonzero, use `tendsto.inv'`. -/
@[to_additive]
lemma filter.tendsto.inv {f : α → G} {l : filter α} {y : G} (h : tendsto f l (𝓝 y)) :
tendsto (λ x, (f x)⁻¹) l (𝓝 y⁻¹) :=
(continuous_inv.tendsto y).comp h
variables [topological_space α] {f : α → G} {s : set α} {x : α}
@[continuity, to_additive]
lemma continuous.inv (hf : continuous f) : continuous (λx, (f x)⁻¹) :=
continuous_inv.comp hf
attribute [continuity] continuous.neg -- TODO
@[to_additive]
lemma continuous_on.inv (hf : continuous_on f s) : continuous_on (λx, (f x)⁻¹) s :=
continuous_inv.comp_continuous_on hf
@[to_additive]
lemma continuous_within_at.inv (hf : continuous_within_at f s x) :
continuous_within_at (λ x, (f x)⁻¹) s x :=
hf.inv
@[instance, to_additive]
instance [topological_space H] [group H] [topological_group H] :
topological_group (G × H) :=
{ continuous_inv := continuous_inv.prod_map continuous_inv }
variable (G)
/-- Inversion in a topological group as a homeomorphism. -/
@[to_additive "Negation in a topological group as a homeomorphism."]
protected def homeomorph.inv : G ≃ₜ G :=
{ continuous_to_fun := continuous_inv,
continuous_inv_fun := continuous_inv,
.. equiv.inv G }
@[to_additive]
lemma nhds_one_symm : comap has_inv.inv (𝓝 (1 : G)) = 𝓝 (1 : G) :=
((homeomorph.inv G).comap_nhds_eq _).trans (congr_arg nhds one_inv)
/-- The map `(x, y) ↦ (x, xy)` as a homeomorphism. This is a shear mapping. -/
@[to_additive "The map `(x, y) ↦ (x, x + y)` as a homeomorphism.
This is a shear mapping."]
protected def homeomorph.shear_mul_right : G × G ≃ₜ G × G :=
{ continuous_to_fun := continuous_fst.prod_mk continuous_mul,
continuous_inv_fun := continuous_fst.prod_mk $ continuous_fst.inv.mul continuous_snd,
.. equiv.prod_shear (equiv.refl _) equiv.mul_left }
@[simp, to_additive]
lemma homeomorph.shear_mul_right_coe :
⇑(homeomorph.shear_mul_right G) = λ z : G × G, (z.1, z.1 * z.2) :=
rfl
@[simp, to_additive]
lemma homeomorph.shear_mul_right_symm_coe :
⇑(homeomorph.shear_mul_right G).symm = λ z : G × G, (z.1, z.1⁻¹ * z.2) :=
rfl
variable {G}
@[to_additive]
lemma inv_closure (s : set G) : (closure s)⁻¹ = closure s⁻¹ :=
(homeomorph.inv G).preimage_closure s
@[to_additive exists_nhds_half_neg]
lemma exists_nhds_split_inv {s : set G} (hs : s ∈ 𝓝 (1 : G)) :
∃ V ∈ 𝓝 (1 : G), ∀ (v ∈ V) (w ∈ V), v / w ∈ s :=
have ((λp : G × G, p.1 * p.2⁻¹) ⁻¹' s) ∈ 𝓝 ((1, 1) : G × G),
from continuous_at_fst.mul continuous_at_snd.inv (by simpa),
by simpa only [div_eq_mul_inv, nhds_prod_eq, mem_prod_self_iff, prod_subset_iff, mem_preimage]
using this
@[to_additive]
lemma nhds_translation_mul_inv (x : G) : comap (λ y : G, y * x⁻¹) (𝓝 1) = 𝓝 x :=
((homeomorph.mul_right x⁻¹).comap_nhds_eq 1).trans $ show 𝓝 (1 * x⁻¹⁻¹) = 𝓝 x, by simp
@[simp, to_additive] lemma map_mul_left_nhds (x y : G) : map ((*) x) (𝓝 y) = 𝓝 (x * y) :=
(homeomorph.mul_left x).map_nhds_eq y
@[to_additive] lemma map_mul_left_nhds_one (x : G) : map ((*) x) (𝓝 1) = 𝓝 x := by simp
@[to_additive]
lemma topological_group.ext {G : Type*} [group G] {t t' : topological_space G}
(tg : @topological_group G t _) (tg' : @topological_group G t' _)
(h : @nhds G t 1 = @nhds G t' 1) : t = t' :=
eq_of_nhds_eq_nhds $ λ x, by
rw [← @nhds_translation_mul_inv G t _ _ x , ← @nhds_translation_mul_inv G t' _ _ x , ← h]
@[to_additive]
lemma topological_group.of_nhds_aux {G : Type*} [group G] [topological_space G]
(hinv : tendsto (λ (x : G), x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ (x₀ : G), 𝓝 x₀ = map (λ (x : G), x₀ * x) (𝓝 1))
(hconj : ∀ (x₀ : G), map (λ (x : G), x₀ * x * x₀⁻¹) (𝓝 1) ≤ 𝓝 1) : continuous (λ x : G, x⁻¹) :=
begin
rw continuous_iff_continuous_at,
rintros x₀,
have key : (λ x, (x₀*x)⁻¹) = (λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹) ∘ (λ x, x⁻¹),
by {ext ; simp[mul_assoc] },
calc map (λ x, x⁻¹) (𝓝 x₀)
= map (λ x, x⁻¹) (map (λ x, x₀*x) $ 𝓝 1) : by rw hleft
... = map (λ x, (x₀*x)⁻¹) (𝓝 1) : by rw filter.map_map
... = map (((λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹)) ∘ (λ x, x⁻¹)) (𝓝 1) : by rw key
... = map ((λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹)) _ : by rw ← filter.map_map
... ≤ map ((λ x, x₀⁻¹ * x) ∘ λ x, x₀ * x * x₀⁻¹) (𝓝 1) : map_mono hinv
... = map (λ x, x₀⁻¹ * x) (map (λ x, x₀ * x * x₀⁻¹) (𝓝 1)) : filter.map_map
... ≤ map (λ x, x₀⁻¹ * x) (𝓝 1) : map_mono (hconj x₀)
... = 𝓝 x₀⁻¹ : (hleft _).symm
end
@[to_additive]
lemma topological_group.of_nhds_one' {G : Type*} [group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1))
(hright : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x*x₀) (𝓝 1)) : topological_group G :=
begin
refine { continuous_mul := (has_continuous_mul.of_nhds_one hmul hleft hright).continuous_mul,
continuous_inv := topological_group.of_nhds_aux hinv hleft _ },
intros x₀,
suffices : map (λ (x : G), x₀ * x * x₀⁻¹) (𝓝 1) = 𝓝 1, by simp [this, le_refl],
rw [show (λ x, x₀ * x * x₀⁻¹) = (λ x, x₀ * x) ∘ λ x, x*x₀⁻¹, by {ext, simp [mul_assoc] },
← filter.map_map, ← hright, hleft x₀⁻¹, filter.map_map],
convert map_id,
ext,
simp
end
@[to_additive]
lemma topological_group.of_nhds_one {G : Type u} [group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1))
(hconj : ∀ x₀ : G, tendsto (λ x, x₀*x*x₀⁻¹) (𝓝 1) (𝓝 1)) : topological_group G :=
{ continuous_mul := begin
rw continuous_iff_continuous_at,
rintros ⟨x₀, y₀⟩,
have key : (λ (p : G × G), x₀ * p.1 * (y₀ * p.2)) =
((λ x, x₀*y₀*x) ∘ (uncurry (*)) ∘ (prod.map (λ x, y₀⁻¹*x*y₀) id)),
by { ext, simp [uncurry, prod.map, mul_assoc] },
specialize hconj y₀⁻¹, rw inv_inv at hconj,
calc map (λ (p : G × G), p.1 * p.2) (𝓝 (x₀, y₀))
= map (λ (p : G × G), p.1 * p.2) ((𝓝 x₀) ×ᶠ 𝓝 y₀)
: by rw nhds_prod_eq
... = map (λ (p : G × G), x₀ * p.1 * (y₀ * p.2)) ((𝓝 1) ×ᶠ (𝓝 1))
: by rw [hleft x₀, hleft y₀, prod_map_map_eq, filter.map_map]
... = map (((λ x, x₀*y₀*x) ∘ (uncurry (*))) ∘ (prod.map (λ x, y₀⁻¹*x*y₀) id))((𝓝 1) ×ᶠ (𝓝 1))
: by rw key
... = map ((λ x, x₀*y₀*x) ∘ (uncurry (*))) ((map (λ x, y₀⁻¹*x*y₀) $ 𝓝 1) ×ᶠ (𝓝 1))
: by rw [← filter.map_map, ← prod_map_map_eq', map_id]
... ≤ map ((λ x, x₀*y₀*x) ∘ (uncurry (*))) ((𝓝 1) ×ᶠ (𝓝 1))
: map_mono (filter.prod_mono hconj $ le_refl _)
... = map (λ x, x₀*y₀*x) (map (uncurry (*)) ((𝓝 1) ×ᶠ (𝓝 1))) : by rw filter.map_map
... ≤ map (λ x, x₀*y₀*x) (𝓝 1) : map_mono hmul
... = 𝓝 (x₀*y₀) : (hleft _).symm
end,
continuous_inv := topological_group.of_nhds_aux hinv hleft hconj}
@[to_additive]
lemma topological_group.of_comm_of_nhds_one {G : Type*} [comm_group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1)) : topological_group G :=
topological_group.of_nhds_one hmul hinv hleft (by simpa using tendsto_id)
end topological_group
section quotient_topological_group
variables [topological_space G] [group G] [topological_group G] (N : subgroup G) (n : N.normal)
@[to_additive]
instance {G : Type*} [group G] [topological_space G] (N : subgroup G) :
topological_space (quotient_group.quotient N) :=
quotient.topological_space
open quotient_group
@[to_additive]
lemma quotient_group.is_open_map_coe : is_open_map (coe : G → quotient N) :=
begin
intros s s_op,
change is_open ((coe : G → quotient N) ⁻¹' (coe '' s)),
rw quotient_group.preimage_image_coe N s,
exact is_open_Union (λ n, is_open_map_mul_right n s s_op)
end
@[to_additive]
instance topological_group_quotient [N.normal] : topological_group (quotient N) :=
{ continuous_mul := begin
have cont : continuous ((coe : G → quotient N) ∘ (λ (p : G × G), p.fst * p.snd)) :=
continuous_quot_mk.comp continuous_mul,
have quot : quotient_map (λ p : G × G, ((p.1:quotient N), (p.2:quotient N))),
{ apply is_open_map.to_quotient_map,
{ exact (quotient_group.is_open_map_coe N).prod (quotient_group.is_open_map_coe N) },
{ exact continuous_quot_mk.prod_map continuous_quot_mk },
{ exact (surjective_quot_mk _).prod_map (surjective_quot_mk _) } },
exact (quotient_map.continuous_iff quot).2 cont,
end,
continuous_inv := begin
have : continuous ((coe : G → quotient N) ∘ (λ (a : G), a⁻¹)) :=
continuous_quot_mk.comp continuous_inv,
convert continuous_quotient_lift _ this,
end }
attribute [instance] topological_add_group_quotient
end quotient_topological_group
/-- A typeclass saying that `λ p : G × G, p.1 - p.2` is a continuous function. This property
automatically holds for topological additive groups but it also holds, e.g., for `ℝ≥0`. -/
class has_continuous_sub (G : Type*) [topological_space G] [has_sub G] : Prop :=
(continuous_sub : continuous (λ p : G × G, p.1 - p.2))
@[priority 100] -- see Note [lower instance priority]
instance topological_add_group.to_has_continuous_sub [topological_space G] [add_group G]
[topological_add_group G] :
has_continuous_sub G :=
⟨by { simp only [sub_eq_add_neg], exact continuous_fst.add continuous_snd.neg }⟩
export has_continuous_sub (continuous_sub)
section has_continuous_sub
variables [topological_space G] [has_sub G] [has_continuous_sub G]
lemma filter.tendsto.sub {f g : α → G} {l : filter α} {a b : G} (hf : tendsto f l (𝓝 a))
(hg : tendsto g l (𝓝 b)) :
tendsto (λx, f x - g x) l (𝓝 (a - b)) :=
(continuous_sub.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
variables [topological_space α] {f g : α → G} {s : set α} {x : α}
@[continuity] lemma continuous.sub (hf : continuous f) (hg : continuous g) :
continuous (λ x, f x - g x) :=
continuous_sub.comp (hf.prod_mk hg : _)
lemma continuous_within_at.sub (hf : continuous_within_at f s x) (hg : continuous_within_at g s x) :
continuous_within_at (λ x, f x - g x) s x :=
hf.sub hg
lemma continuous_on.sub (hf : continuous_on f s) (hg : continuous_on g s) :
continuous_on (λx, f x - g x) s :=
λ x hx, (hf x hx).sub (hg x hx)
end has_continuous_sub
lemma nhds_translation [topological_space G] [add_group G] [topological_add_group G] (x : G) :
comap (λy:G, y - x) (𝓝 0) = 𝓝 x :=
by simpa only [sub_eq_add_neg] using nhds_translation_add_neg x
/-- additive group with a neighbourhood around 0.
Only used to construct a topology and uniform space.
This is currently only available for commutative groups, but it can be extended to
non-commutative groups too.
-/
class add_group_with_zero_nhd (G : Type u) extends add_comm_group G :=
(Z [] : filter G)
(zero_Z : pure 0 ≤ Z)
(sub_Z : tendsto (λp:G×G, p.1 - p.2) (Z ×ᶠ Z) Z)
namespace add_group_with_zero_nhd
variables (G) [add_group_with_zero_nhd G]
local notation `Z` := add_group_with_zero_nhd.Z
@[priority 100] -- see Note [lower instance priority]
instance : topological_space G :=
topological_space.mk_of_nhds $ λa, map (λx, x + a) (Z G)
variables {G}
lemma neg_Z : tendsto (λa:G, - a) (Z G) (Z G) :=
have tendsto (λa, (0:G)) (Z G) (Z G),
by refine le_trans (assume h, _) zero_Z; simp [univ_mem_sets'] {contextual := tt},
have tendsto (λa:G, 0 - a) (Z G) (Z G), from
sub_Z.comp (tendsto.prod_mk this tendsto_id),
by simpa
lemma add_Z : tendsto (λp:G×G, p.1 + p.2) (Z G ×ᶠ Z G) (Z G) :=
suffices tendsto (λp:G×G, p.1 - -p.2) (Z G ×ᶠ Z G) (Z G),
by simpa [sub_eq_add_neg],
sub_Z.comp (tendsto.prod_mk tendsto_fst (neg_Z.comp tendsto_snd))
lemma exists_Z_half {s : set G} (hs : s ∈ Z G) : ∃ V ∈ Z G, ∀ (v ∈ V) (w ∈ V), v + w ∈ s :=
begin
have : ((λa:G×G, a.1 + a.2) ⁻¹' s) ∈ Z G ×ᶠ Z G := add_Z (by simpa using hs),
rcases mem_prod_self_iff.1 this with ⟨V, H, H'⟩,
exact ⟨V, H, prod_subset_iff.1 H'⟩
end
lemma nhds_eq (a : G) : 𝓝 a = map (λx, x + a) (Z G) :=
topological_space.nhds_mk_of_nhds _ _
(assume a, calc pure a = map (λx, x + a) (pure 0) : by simp
... ≤ _ : map_mono zero_Z)
(assume b s hs,
let ⟨t, ht, eqt⟩ := exists_Z_half hs in
have t0 : (0:G) ∈ t, by simpa using zero_Z ht,
begin
refine ⟨(λx:G, x + b) '' t, image_mem_map ht, _, _⟩,
{ refine set.image_subset_iff.2 (assume b hbt, _),
simpa using eqt 0 t0 b hbt },
{ rintros _ ⟨c, hb, rfl⟩,
refine (Z G).sets_of_superset ht (assume x hxt, _),
simpa [add_assoc] using eqt _ hxt _ hb }
end)
lemma nhds_zero_eq_Z : 𝓝 0 = Z G := by simp [nhds_eq]; exact filter.map_id
@[priority 100] -- see Note [lower instance priority]
instance : has_continuous_add G :=
⟨ continuous_iff_continuous_at.2 $ assume ⟨a, b⟩,
begin
rw [continuous_at, nhds_prod_eq, nhds_eq, nhds_eq, nhds_eq, filter.prod_map_map_eq,
tendsto_map'_iff],
suffices : tendsto ((λx:G, (a + b) + x) ∘ (λp:G×G,p.1 + p.2)) (Z G ×ᶠ Z G)
(map (λx:G, (a + b) + x) (Z G)),
{ simpa [(∘), add_comm, add_left_comm] },
exact tendsto_map.comp add_Z
end ⟩
@[priority 100] -- see Note [lower instance priority]
instance : topological_add_group G :=
⟨continuous_iff_continuous_at.2 $ assume a,
begin
rw [continuous_at, nhds_eq, nhds_eq, tendsto_map'_iff],
suffices : tendsto ((λx:G, x - a) ∘ (λx:G, -x)) (Z G) (map (λx:G, x - a) (Z G)),
{ simpa [(∘), add_comm, sub_eq_add_neg] using this },
exact tendsto_map.comp neg_Z
end⟩
end add_group_with_zero_nhd
section filter_mul
section
variables [topological_space G] [group G] [topological_group G]
@[to_additive]
lemma is_open.mul_left {s t : set G} : is_open t → is_open (s * t) := λ ht,
begin
have : ∀a, is_open ((λ (x : G), a * x) '' t) :=
assume a, is_open_map_mul_left a t ht,
rw ← Union_mul_left_image,
exact is_open_Union (λa, is_open_Union $ λha, this _),
end
@[to_additive]
lemma is_open.mul_right {s t : set G} : is_open s → is_open (s * t) := λ hs,
begin
have : ∀a, is_open ((λ (x : G), x * a) '' s),
assume a, apply is_open_map_mul_right, exact hs,
rw ← Union_mul_right_image,
exact is_open_Union (λa, is_open_Union $ λha, this _),
end
variables (G)
lemma topological_group.t1_space (h : @is_closed G _ {1}) : t1_space G :=
⟨assume x, by { convert is_closed_map_mul_right x _ h, simp }⟩
lemma topological_group.regular_space [t1_space G] : regular_space G :=
⟨assume s a hs ha,
let f := λ p : G × G, p.1 * (p.2)⁻¹ in
have hf : continuous f := continuous_fst.mul continuous_snd.inv,
-- a ∈ -s implies f (a, 1) ∈ -s, and so (a, 1) ∈ f⁻¹' (-s);
-- and so can find t₁ t₂ open such that a ∈ t₁ × t₂ ⊆ f⁻¹' (-s)
let ⟨t₁, t₂, ht₁, ht₂, a_mem_t₁, one_mem_t₂, t_subset⟩ :=
is_open_prod_iff.1 ((is_open_compl_iff.2 hs).preimage hf) a (1:G) (by simpa [f]) in
begin
use [s * t₂, ht₂.mul_left, λ x hx, ⟨x, 1, hx, one_mem_t₂, mul_one _⟩],
rw [nhds_within, inf_principal_eq_bot, mem_nhds_sets_iff],
refine ⟨t₁, _, ht₁, a_mem_t₁⟩,
rintros x hx ⟨y, z, hy, hz, yz⟩,
have : x * z⁻¹ ∈ sᶜ := (prod_subset_iff.1 t_subset) x hx z hz,
have : x * z⁻¹ ∈ s, rw ← yz, simpa,
contradiction
end⟩
local attribute [instance] topological_group.regular_space
lemma topological_group.t2_space [t1_space G] : t2_space G := regular_space.t2_space G
end
section
/-! Some results about an open set containing the product of two sets in a topological group. -/
variables [topological_space G] [group G] [topological_group G]
/-- Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of `1`
such that `KV ⊆ U`. -/
@[to_additive "Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of
`0` such that `K + V ⊆ U`."]
lemma compact_open_separated_mul {K U : set G} (hK : is_compact K) (hU : is_open U) (hKU : K ⊆ U) :
∃ V : set G, is_open V ∧ (1 : G) ∈ V ∧ K * V ⊆ U :=
begin
let W : G → set G := λ x, (λ y, x * y) ⁻¹' U,
have h1W : ∀ x, is_open (W x) := λ x, hU.preimage (continuous_mul_left x),
have h2W : ∀ x ∈ K, (1 : G) ∈ W x := λ x hx, by simp only [mem_preimage, mul_one, hKU hx],
choose V hV using λ x : K, exists_open_nhds_one_mul_subset (mem_nhds_sets (h1W x) (h2W x.1 x.2)),
let X : K → set G := λ x, (λ y, (x : G)⁻¹ * y) ⁻¹' (V x),
obtain ⟨t, ht⟩ : ∃ t : finset ↥K, K ⊆ ⋃ i ∈ t, X i,
{ refine hK.elim_finite_subcover X (λ x, (hV x).1.preimage (continuous_mul_left x⁻¹)) _,
intros x hx, rw [mem_Union], use ⟨x, hx⟩, rw [mem_preimage], convert (hV _).2.1,
simp only [mul_left_inv, subtype.coe_mk] },
refine ⟨⋂ x ∈ t, V x, is_open_bInter (finite_mem_finset _) (λ x hx, (hV x).1), _, _⟩,
{ simp only [mem_Inter], intros x hx, exact (hV x).2.1 },
rintro _ ⟨x, y, hx, hy, rfl⟩, simp only [mem_Inter] at hy,
have := ht hx, simp only [mem_Union, mem_preimage] at this, rcases this with ⟨z, h1z, h2z⟩,
have : (z : G)⁻¹ * x * y ∈ W z := (hV z).2.2 (mul_mem_mul h2z (hy z h1z)),
rw [mem_preimage] at this, convert this using 1, simp only [mul_assoc, mul_inv_cancel_left]
end
/-- A compact set is covered by finitely many left multiplicative translates of a set
with non-empty interior. -/
@[to_additive "A compact set is covered by finitely many left additive translates of a set
with non-empty interior."]
lemma compact_covered_by_mul_left_translates {K V : set G} (hK : is_compact K)
(hV : (interior V).nonempty) : ∃ t : finset G, K ⊆ ⋃ g ∈ t, (λ h, g * h) ⁻¹' V :=
begin
obtain ⟨t, ht⟩ : ∃ t : finset G, K ⊆ ⋃ x ∈ t, interior (((*) x) ⁻¹' V),
{ refine hK.elim_finite_subcover (λ x, interior $ ((*) x) ⁻¹' V) (λ x, is_open_interior) _,
cases hV with g₀ hg₀,
refine λ g hg, mem_Union.2 ⟨g₀ * g⁻¹, _⟩,
refine preimage_interior_subset_interior_preimage (continuous_const.mul continuous_id) _,
rwa [mem_preimage, inv_mul_cancel_right] },
exact ⟨t, subset.trans ht $ bUnion_subset_bUnion_right $ λ g hg, interior_subset⟩
end
/-- Every locally compact separable topological group is σ-compact.
Note: this is not true if we drop the topological group hypothesis. -/
@[priority 100] instance separable_locally_compact_group.sigma_compact_space
[separable_space G] [locally_compact_space G] : sigma_compact_space G :=
begin
obtain ⟨L, hLc, hL1⟩ := exists_compact_mem_nhds (1 : G),
refine ⟨⟨λ n, (λ x, x * dense_seq G n) ⁻¹' L, _, _⟩⟩,
{ intro n, exact (homeomorph.mul_right _).compact_preimage.mpr hLc },
{ refine Union_eq_univ_iff.2 (λ x, _),
obtain ⟨_, ⟨n, rfl⟩, hn⟩ : (range (dense_seq G) ∩ (λ y, x * y) ⁻¹' L).nonempty,
{ rw [← (homeomorph.mul_left x).apply_symm_apply 1] at hL1,
exact (dense_range_dense_seq G).inter_nhds_nonempty
((homeomorph.mul_left x).continuous.continuous_at $ hL1) },
exact ⟨n, hn⟩ }
end
end
section
variables [topological_space G] [comm_group G] [topological_group G]
@[to_additive]
lemma nhds_mul (x y : G) : 𝓝 (x * y) = 𝓝 x * 𝓝 y :=
filter_eq $ set.ext $ assume s,
begin
rw [← nhds_translation_mul_inv x, ← nhds_translation_mul_inv y, ← nhds_translation_mul_inv (x*y)],
split,
{ rintros ⟨t, ht, ts⟩,
rcases exists_nhds_one_split ht with ⟨V, V1, h⟩,
refine ⟨(λa, a * x⁻¹) ⁻¹' V, (λa, a * y⁻¹) ⁻¹' V,
⟨V, V1, subset.refl _⟩, ⟨V, V1, subset.refl _⟩, _⟩,
rintros a ⟨v, w, v_mem, w_mem, rfl⟩,
apply ts,
simpa [mul_comm, mul_assoc, mul_left_comm] using h (v * x⁻¹) v_mem (w * y⁻¹) w_mem },
{ rintros ⟨a, c, ⟨b, hb, ba⟩, ⟨d, hd, dc⟩, ac⟩,
refine ⟨b ∩ d, inter_mem_sets hb hd, assume v, _⟩,
simp only [preimage_subset_iff, mul_inv_rev, mem_preimage] at *,
rintros ⟨vb, vd⟩,
refine ac ⟨v * y⁻¹, y, _, _, _⟩,
{ rw ← mul_assoc _ _ _ at vb, exact ba _ vb },
{ apply dc y, rw mul_right_inv, exact mem_of_nhds hd },
{ simp only [inv_mul_cancel_right] } }
end
@[to_additive]
lemma nhds_is_mul_hom : is_mul_hom (λx:G, 𝓝 x) := ⟨λ_ _, nhds_mul _ _⟩
end
end filter_mul
instance additive.topological_add_group {G} [h : topological_space G]
[group G] [topological_group G] : @topological_add_group (additive G) h _ :=
{ continuous_neg := @continuous_inv G _ _ _ }
instance multiplicative.topological_group {G} [h : topological_space G]
[add_group G] [topological_add_group G] : @topological_group (multiplicative G) h _ :=
{ continuous_inv := @continuous_neg G _ _ _ }
|
1c6b7cc87ce0d043c4c4aa032f9d8b858f61ae8b
|
076f5040b63237c6dd928c6401329ed5adcb0e44
|
/instructor-notes/2019.10.01..poly/option.lean
|
b9a2230edcf9f4fce5ed67597e1c74c8e0d2a274
|
[] |
no_license
|
kevinsullivan/uva-cs-dm-f19
|
0f123689cf6cb078f263950b18382a7086bf30be
|
09a950752884bd7ade4be33e9e89a2c4b1927167
|
refs/heads/master
| 1,594,771,841,541
| 1,575,853,850,000
| 1,575,853,850,000
| 205,433,890
| 4
| 9
| null | 1,571,592,121,000
| 1,567,188,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 136
|
lean
|
namespace myoption
inductive moption (α : Type) : Type
| some (a : α) : moption
| none : moption
#check moption.some 4
end myoption
|
4642e2fcaa3a44fe64593408a249438d94453210
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/geometry/manifold/instances/real.lean
|
4c835a09792d29e738eb8da782e5f1df9b97f121
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 13,811
|
lean
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import linear_algebra.finite_dimensional
import geometry.manifold.smooth_manifold_with_corners
import analysis.inner_product_space.pi_L2
/-!
# Constructing examples of manifolds over ℝ
We introduce the necessary bits to be able to define manifolds modelled over `ℝ^n`, boundaryless
or with boundary or with corners. As a concrete example, we construct explicitly the manifold with
boundary structure on the real interval `[x, y]`.
More specifically, we introduce
* `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n)` for the model space
used to define `n`-dimensional real manifolds with boundary
* `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_quadrant n)` for the model space used
to define `n`-dimensional real manifolds with corners
## Notations
In the locale `manifold`, we introduce the notations
* `𝓡 n` for the identity model with corners on `euclidean_space ℝ (fin n)`
* `𝓡∂ n` for `model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n)`.
For instance, if a manifold `M` is boundaryless, smooth and modelled on `euclidean_space ℝ (fin m)`,
and `N` is smooth with boundary modelled on `euclidean_half_space n`, and `f : M → N` is a smooth
map, then the derivative of `f` can be written simply as `mfderiv (𝓡 m) (𝓡∂ n) f` (as to why the
model with corners can not be implicit, see the discussion in `smooth_manifold_with_corners.lean`).
## Implementation notes
The manifold structure on the interval `[x, y] = Icc x y` requires the assumption `x < y` as a
typeclass. We provide it as `[fact (x < y)]`.
-/
noncomputable theory
open set function
open_locale manifold
/--
The half-space in `ℝ^n`, used to model manifolds with boundary. We only define it when
`1 ≤ n`, as the definition only makes sense in this case.
-/
def euclidean_half_space (n : ℕ) [has_zero (fin n)] : Type :=
{x : euclidean_space ℝ (fin n) // 0 ≤ x 0}
/--
The quadrant in `ℝ^n`, used to model manifolds with corners, made of all vectors with nonnegative
coordinates.
-/
def euclidean_quadrant (n : ℕ) : Type := {x : euclidean_space ℝ (fin n) // ∀i:fin n, 0 ≤ x i}
section
/- Register class instances for euclidean half-space and quadrant, that can not be noticed
without the following reducibility attribute (which is only set in this section). -/
local attribute [reducible] euclidean_half_space euclidean_quadrant
variable {n : ℕ}
instance [has_zero (fin n)] : topological_space (euclidean_half_space n) := by apply_instance
instance : topological_space (euclidean_quadrant n) := by apply_instance
instance [has_zero (fin n)] : inhabited (euclidean_half_space n) := ⟨⟨0, le_rfl⟩⟩
instance : inhabited (euclidean_quadrant n) := ⟨⟨0, λ i, le_rfl⟩⟩
lemma range_half_space (n : ℕ) [has_zero (fin n)] :
range (λx : euclidean_half_space n, x.val) = {y | 0 ≤ y 0} :=
by simp
lemma range_quadrant (n : ℕ) :
range (λx : euclidean_quadrant n, x.val) = {y | ∀i:fin n, 0 ≤ y i} :=
by simp
end
/--
Definition of the model with corners `(euclidean_space ℝ (fin n), euclidean_half_space n)`, used as
a model for manifolds with boundary. In the locale `manifold`, use the shortcut `𝓡∂ n`.
-/
def model_with_corners_euclidean_half_space (n : ℕ) [has_zero (fin n)] :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n) :=
{ to_fun := subtype.val,
inv_fun := λx, ⟨update x 0 (max (x 0) 0), by simp [le_refl]⟩,
source := univ,
target := {x | 0 ≤ x 0},
map_source' := λx hx, x.property,
map_target' := λx hx, mem_univ _,
left_inv' := λ ⟨xval, xprop⟩ hx, begin
rw [subtype.mk_eq_mk, update_eq_iff],
exact ⟨max_eq_left xprop, λ i _, rfl⟩
end,
right_inv' := λx hx, update_eq_iff.2 ⟨max_eq_left hx, λ i _, rfl⟩,
source_eq := rfl,
unique_diff' :=
have this : unique_diff_on ℝ _ :=
unique_diff_on.pi (fin n) (λ _, ℝ) _ _ (λ i ∈ ({0} : set (fin n)), unique_diff_on_Ici 0),
by simpa only [singleton_pi] using this,
continuous_to_fun := continuous_subtype_val,
continuous_inv_fun := (continuous_id.update 0 $
(continuous_apply 0).max continuous_const).subtype_mk _ }
/--
Definition of the model with corners `(euclidean_space ℝ (fin n), euclidean_quadrant n)`, used as a
model for manifolds with corners -/
def model_with_corners_euclidean_quadrant (n : ℕ) :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_quadrant n) :=
{ to_fun := subtype.val,
inv_fun := λx, ⟨λi, max (x i) 0, λi, by simp only [le_refl, or_true, le_max_iff]⟩,
source := univ,
target := {x | ∀ i, 0 ≤ x i},
map_source' := λx hx, by simpa only [subtype.range_val] using x.property,
map_target' := λx hx, mem_univ _,
left_inv' := λ ⟨xval, xprop⟩ hx, by { ext i, simp only [subtype.coe_mk, xprop i, max_eq_left] },
right_inv' := λ x hx, by { ext1 i, simp only [hx i, max_eq_left] },
source_eq := rfl,
unique_diff' :=
have this : unique_diff_on ℝ _ :=
unique_diff_on.univ_pi (fin n) (λ _, ℝ) _ (λ i, unique_diff_on_Ici 0),
by simpa only [pi_univ_Ici] using this,
continuous_to_fun := continuous_subtype_val,
continuous_inv_fun := continuous.subtype_mk (continuous_pi $ λ i,
(continuous_id.max continuous_const).comp (continuous_apply i)) _ }
localized "notation (name := model_with_corners_self.euclidean) `𝓡 `n :=
(model_with_corners_self ℝ (euclidean_space ℝ (fin n)) :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_space ℝ (fin n)))" in manifold
localized "notation (name := model_with_corners_euclidean_half_space.euclidean) `𝓡∂ `n :=
(model_with_corners_euclidean_half_space n :
model_with_corners ℝ (euclidean_space ℝ (fin n)) (euclidean_half_space n))" in manifold
/--
The left chart for the topological space `[x, y]`, defined on `[x,y)` and sending `x` to `0` in
`euclidean_half_space 1`.
-/
def Icc_left_chart (x y : ℝ) [fact (x < y)] :
local_homeomorph (Icc x y) (euclidean_half_space 1) :=
{ source := {z : Icc x y | z.val < y},
target := {z : euclidean_half_space 1 | z.val 0 < y - x},
to_fun := λ(z : Icc x y), ⟨λi, z.val - x, sub_nonneg.mpr z.property.1⟩,
inv_fun := λz, ⟨min (z.val 0 + x) y, by simp [le_refl, z.prop, le_of_lt (fact.out (x < y))]⟩,
map_source' := by simp only [imp_self, sub_lt_sub_iff_right, mem_set_of_eq, forall_true_iff],
map_target' :=
by { simp only [min_lt_iff, mem_set_of_eq], assume z hz, left,
dsimp [-subtype.val_eq_coe] at hz, linarith },
left_inv' := begin
rintros ⟨z, hz⟩ h'z,
simp only [mem_set_of_eq, mem_Icc] at hz h'z,
simp only [hz, min_eq_left, sub_add_cancel]
end,
right_inv' := begin
rintros ⟨z, hz⟩ h'z,
rw subtype.mk_eq_mk,
funext,
dsimp at hz h'z,
have A : x + z 0 ≤ y, by linarith,
rw subsingleton.elim i 0,
simp only [A, add_comm, add_sub_cancel', min_eq_left],
end,
open_source := begin
have : is_open {z : ℝ | z < y} := is_open_Iio,
exact this.preimage continuous_subtype_val
end,
open_target := begin
have : is_open {z : ℝ | z < y - x} := is_open_Iio,
have : is_open {z : euclidean_space ℝ (fin 1) | z 0 < y - x} :=
this.preimage (@continuous_apply (fin 1) (λ _, ℝ) _ 0),
exact this.preimage continuous_subtype_val
end,
continuous_to_fun := begin
apply continuous.continuous_on,
apply continuous.subtype_mk,
have : continuous (λ (z : ℝ) (i : fin 1), z - x) :=
continuous.sub (continuous_pi $ λi, continuous_id) continuous_const,
exact this.comp continuous_subtype_val,
end,
continuous_inv_fun := begin
apply continuous.continuous_on,
apply continuous.subtype_mk,
have A : continuous (λ z : ℝ, min (z + x) y) :=
(continuous_id.add continuous_const).min continuous_const,
have B : continuous (λz : euclidean_space ℝ (fin 1), z 0) := continuous_apply 0,
exact (A.comp B).comp continuous_subtype_val
end }
/--
The right chart for the topological space `[x, y]`, defined on `(x,y]` and sending `y` to `0` in
`euclidean_half_space 1`.
-/
def Icc_right_chart (x y : ℝ) [fact (x < y)] :
local_homeomorph (Icc x y) (euclidean_half_space 1) :=
{ source := {z : Icc x y | x < z.val},
target := {z : euclidean_half_space 1 | z.val 0 < y - x},
to_fun := λ(z : Icc x y), ⟨λi, y - z.val, sub_nonneg.mpr z.property.2⟩,
inv_fun := λz,
⟨max (y - z.val 0) x, by simp [le_refl, z.prop, le_of_lt (fact.out (x < y)), sub_eq_add_neg]⟩,
map_source' := by simp only [imp_self, mem_set_of_eq, sub_lt_sub_iff_left, forall_true_iff],
map_target' :=
by { simp only [lt_max_iff, mem_set_of_eq], assume z hz, left,
dsimp [-subtype.val_eq_coe] at hz, linarith },
left_inv' := begin
rintros ⟨z, hz⟩ h'z,
simp only [mem_set_of_eq, mem_Icc] at hz h'z,
simp only [hz, sub_eq_add_neg, max_eq_left, add_add_neg_cancel'_right, neg_add_rev, neg_neg]
end,
right_inv' := begin
rintros ⟨z, hz⟩ h'z,
rw subtype.mk_eq_mk,
funext,
dsimp at hz h'z,
have A : x ≤ y - z 0, by linarith,
rw subsingleton.elim i 0,
simp only [A, sub_sub_cancel, max_eq_left],
end,
open_source := begin
have : is_open {z : ℝ | x < z} := is_open_Ioi,
exact this.preimage continuous_subtype_val
end,
open_target := begin
have : is_open {z : ℝ | z < y - x} := is_open_Iio,
have : is_open {z : euclidean_space ℝ (fin 1) | z 0 < y - x} :=
this.preimage (@continuous_apply (fin 1) (λ _, ℝ) _ 0),
exact this.preimage continuous_subtype_val
end,
continuous_to_fun := begin
apply continuous.continuous_on,
apply continuous.subtype_mk,
have : continuous (λ (z : ℝ) (i : fin 1), y - z) :=
continuous_const.sub (continuous_pi (λi, continuous_id)),
exact this.comp continuous_subtype_val,
end,
continuous_inv_fun := begin
apply continuous.continuous_on,
apply continuous.subtype_mk,
have A : continuous (λ z : ℝ, max (y - z) x) :=
(continuous_const.sub continuous_id).max continuous_const,
have B : continuous (λz : euclidean_space ℝ (fin 1), z 0) := continuous_apply 0,
exact (A.comp B).comp continuous_subtype_val
end }
/--
Charted space structure on `[x, y]`, using only two charts taking values in
`euclidean_half_space 1`.
-/
instance Icc_manifold (x y : ℝ) [fact (x < y)] : charted_space (euclidean_half_space 1) (Icc x y) :=
{ atlas := {Icc_left_chart x y, Icc_right_chart x y},
chart_at := λz, if z.val < y then Icc_left_chart x y else Icc_right_chart x y,
mem_chart_source := λz, begin
by_cases h' : z.val < y,
{ simp only [h', if_true],
exact h' },
{ simp only [h', if_false],
apply lt_of_lt_of_le (fact.out (x < y)),
simpa only [not_lt] using h'}
end,
chart_mem_atlas := λ z, by by_cases h' : (z : ℝ) < y; simp [h'] }
/--
The manifold structure on `[x, y]` is smooth.
-/
instance Icc_smooth_manifold (x y : ℝ) [fact (x < y)] :
smooth_manifold_with_corners (𝓡∂ 1) (Icc x y) :=
begin
have M : cont_diff_on ℝ ∞ (λz : euclidean_space ℝ (fin 1), - z + (λi, y - x)) univ,
{ rw cont_diff_on_univ,
exact cont_diff_id.neg.add cont_diff_const },
apply smooth_manifold_with_corners_of_cont_diff_on,
assume e e' he he',
simp only [atlas, mem_singleton_iff, mem_insert_iff] at he he',
/- We need to check that any composition of two charts gives a `C^∞` function. Each chart can be
either the left chart or the right chart, leaving 4 possibilities that we handle successively.
-/
rcases he with rfl | rfl; rcases he' with rfl | rfl,
{ -- `e = left chart`, `e' = left chart`
exact (mem_groupoid_of_pregroupoid.mpr (symm_trans_mem_cont_diff_groupoid _ _ _)).1 },
{ -- `e = left chart`, `e' = right chart`
apply M.congr_mono _ (subset_univ _),
rintro _ ⟨⟨hz₁, hz₂⟩, ⟨⟨z, hz₀⟩, rfl⟩⟩,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart,
update_same, max_eq_left, hz₀, lt_sub_iff_add_lt] with mfld_simps at hz₁ hz₂,
rw [min_eq_left hz₁.le, lt_add_iff_pos_left] at hz₂,
ext i,
rw subsingleton.elim i 0,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, *,
pi_Lp.add_apply, pi_Lp.neg_apply, max_eq_left, min_eq_left hz₁.le, update_same]
with mfld_simps,
abel },
{ -- `e = right chart`, `e' = left chart`
apply M.congr_mono _ (subset_univ _),
rintro _ ⟨⟨hz₁, hz₂⟩, ⟨z, hz₀⟩, rfl⟩,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart, max_lt_iff,
update_same, max_eq_left hz₀] with mfld_simps at hz₁ hz₂,
rw lt_sub at hz₁,
ext i,
rw subsingleton.elim i 0,
simp only [model_with_corners_euclidean_half_space, Icc_left_chart, Icc_right_chart,
pi_Lp.add_apply, pi_Lp.neg_apply, update_same, max_eq_left, hz₀, hz₁.le] with mfld_simps,
abel },
{ -- `e = right chart`, `e' = right chart`
exact (mem_groupoid_of_pregroupoid.mpr (symm_trans_mem_cont_diff_groupoid _ _ _)).1 }
end
/-! Register the manifold structure on `Icc 0 1`, and also its zero and one. -/
section
lemma fact_zero_lt_one : fact ((0 : ℝ) < 1) := ⟨zero_lt_one⟩
local attribute [instance] fact_zero_lt_one
instance : charted_space (euclidean_half_space 1) (Icc (0 : ℝ) 1) := by apply_instance
instance : smooth_manifold_with_corners (𝓡∂ 1) (Icc (0 : ℝ) 1) := by apply_instance
end
|
b01351d52db0b6137e71265e86d7acf501851c7e
|
1abd1ed12aa68b375cdef28959f39531c6e95b84
|
/src/field_theory/separable.lean
|
cfafeec7669a6739d203fe09089a24bb1c920056
|
[
"Apache-2.0"
] |
permissive
|
jumpy4/mathlib
|
d3829e75173012833e9f15ac16e481e17596de0f
|
af36f1a35f279f0e5b3c2a77647c6bf2cfd51a13
|
refs/heads/master
| 1,693,508,842,818
| 1,636,203,271,000
| 1,636,203,271,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 28,676
|
lean
|
/-
Copyright (c) 2020 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import algebra.polynomial.big_operators
import field_theory.minpoly
import field_theory.splitting_field
import field_theory.tower
import algebra.squarefree
/-!
# Separable polynomials
We define a polynomial to be separable if it is coprime with its derivative. We prove basic
properties about separable polynomials here.
## Main definitions
* `polynomial.separable f`: a polynomial `f` is separable iff it is coprime with its derivative.
* `polynomial.expand R p f`: expand the polynomial `f` with coefficients in a
commutative semiring `R` by a factor of p, so `expand R p (∑ aₙ xⁿ)` is `∑ aₙ xⁿᵖ`.
* `polynomial.contract p f`: the opposite of `expand`, so it sends `∑ aₙ xⁿᵖ` to `∑ aₙ xⁿ`.
-/
universes u v w
open_locale classical big_operators
open finset
namespace polynomial
section comm_semiring
variables {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S]
/-- A polynomial is separable iff it is coprime with its derivative. -/
def separable (f : polynomial R) : Prop :=
is_coprime f f.derivative
lemma separable_def (f : polynomial R) :
f.separable ↔ is_coprime f f.derivative :=
iff.rfl
lemma separable_def' (f : polynomial R) :
f.separable ↔ ∃ a b : polynomial R, a * f + b * f.derivative = 1 :=
iff.rfl
lemma not_separable_zero [nontrivial R] : ¬ separable (0 : polynomial R) :=
begin
rintro ⟨x, y, h⟩,
simpa only [derivative_zero, mul_zero, add_zero, zero_ne_one] using h,
end
lemma separable_one : (1 : polynomial R).separable :=
is_coprime_one_left
lemma separable_X_add_C (a : R) : (X + C a).separable :=
by { rw [separable_def, derivative_add, derivative_X, derivative_C, add_zero],
exact is_coprime_one_right }
lemma separable_X : (X : polynomial R).separable :=
by { rw [separable_def, derivative_X], exact is_coprime_one_right }
lemma separable_C (r : R) : (C r).separable ↔ is_unit r :=
by rw [separable_def, derivative_C, is_coprime_zero_right, is_unit_C]
lemma separable.of_mul_left {f g : polynomial R} (h : (f * g).separable) : f.separable :=
begin
have := h.of_mul_left_left, rw derivative_mul at this,
exact is_coprime.of_mul_right_left (is_coprime.of_add_mul_left_right this)
end
lemma separable.of_mul_right {f g : polynomial R} (h : (f * g).separable) : g.separable :=
by { rw mul_comm at h, exact h.of_mul_left }
lemma separable.of_dvd {f g : polynomial R} (hf : f.separable) (hfg : g ∣ f) : g.separable :=
by { rcases hfg with ⟨f', rfl⟩, exact separable.of_mul_left hf }
lemma separable_gcd_left {F : Type*} [field F] {f : polynomial F}
(hf : f.separable) (g : polynomial F) : (euclidean_domain.gcd f g).separable :=
separable.of_dvd hf (euclidean_domain.gcd_dvd_left f g)
lemma separable_gcd_right {F : Type*} [field F] {g : polynomial F}
(f : polynomial F) (hg : g.separable) : (euclidean_domain.gcd f g).separable :=
separable.of_dvd hg (euclidean_domain.gcd_dvd_right f g)
lemma separable.is_coprime {f g : polynomial R} (h : (f * g).separable) : is_coprime f g :=
begin
have := h.of_mul_left_left, rw derivative_mul at this,
exact is_coprime.of_mul_right_right (is_coprime.of_add_mul_left_right this)
end
theorem separable.of_pow' {f : polynomial R} :
∀ {n : ℕ} (h : (f ^ n).separable), is_unit f ∨ (f.separable ∧ n = 1) ∨ n = 0
| 0 := λ h, or.inr $ or.inr rfl
| 1 := λ h, or.inr $ or.inl ⟨pow_one f ▸ h, rfl⟩
| (n+2) := λ h, by { rw [pow_succ, pow_succ] at h,
exact or.inl (is_coprime_self.1 h.is_coprime.of_mul_right_left) }
theorem separable.of_pow {f : polynomial R} (hf : ¬is_unit f) {n : ℕ} (hn : n ≠ 0)
(hfs : (f ^ n).separable) : f.separable ∧ n = 1 :=
(hfs.of_pow'.resolve_left hf).resolve_right hn
theorem separable.map {p : polynomial R} (h : p.separable) {f : R →+* S} : (p.map f).separable :=
let ⟨a, b, H⟩ := h in ⟨a.map f, b.map f,
by rw [derivative_map, ← map_mul, ← map_mul, ← map_add, H, map_one]⟩
variables (R) (p q : ℕ)
/-- Expand the polynomial by a factor of p, so `∑ aₙ xⁿ` becomes `∑ aₙ xⁿᵖ`. -/
noncomputable def expand : polynomial R →ₐ[R] polynomial R :=
{ commutes' := λ r, eval₂_C _ _,
.. (eval₂_ring_hom C (X ^ p) : polynomial R →+* polynomial R) }
lemma coe_expand : (expand R p : polynomial R → polynomial R) = eval₂ C (X ^ p) := rfl
variables {R}
lemma expand_eq_sum {f : polynomial R} :
expand R p f = f.sum (λ e a, C a * (X ^ p) ^ e) :=
by { dsimp [expand, eval₂], refl, }
@[simp] lemma expand_C (r : R) : expand R p (C r) = C r := eval₂_C _ _
@[simp] lemma expand_X : expand R p X = X ^ p := eval₂_X _ _
@[simp] lemma expand_monomial (r : R) : expand R p (monomial q r) = monomial (q * p) r :=
by simp_rw [monomial_eq_smul_X, alg_hom.map_smul, alg_hom.map_pow, expand_X, mul_comm, pow_mul]
theorem expand_expand (f : polynomial R) : expand R p (expand R q f) = expand R (p * q) f :=
polynomial.induction_on f (λ r, by simp_rw expand_C)
(λ f g ihf ihg, by simp_rw [alg_hom.map_add, ihf, ihg])
(λ n r ih, by simp_rw [alg_hom.map_mul, expand_C, alg_hom.map_pow, expand_X,
alg_hom.map_pow, expand_X, pow_mul])
theorem expand_mul (f : polynomial R) : expand R (p * q) f = expand R p (expand R q f) :=
(expand_expand p q f).symm
@[simp] theorem expand_one (f : polynomial R) : expand R 1 f = f :=
polynomial.induction_on f
(λ r, by rw expand_C)
(λ f g ihf ihg, by rw [alg_hom.map_add, ihf, ihg])
(λ n r ih, by rw [alg_hom.map_mul, expand_C, alg_hom.map_pow, expand_X, pow_one])
theorem expand_pow (f : polynomial R) : expand R (p ^ q) f = (expand R p ^[q] f) :=
nat.rec_on q (by rw [pow_zero, expand_one, function.iterate_zero, id]) $ λ n ih,
by rw [function.iterate_succ_apply', pow_succ, expand_mul, ih]
theorem derivative_expand (f : polynomial R) :
(expand R p f).derivative = expand R p f.derivative * (p * X ^ (p - 1)) :=
by rw [coe_expand, derivative_eval₂_C, derivative_pow, derivative_X, mul_one]
theorem coeff_expand {p : ℕ} (hp : 0 < p) (f : polynomial R) (n : ℕ) :
(expand R p f).coeff n = if p ∣ n then f.coeff (n / p) else 0 :=
begin
simp only [expand_eq_sum],
simp_rw [coeff_sum, ← pow_mul, C_mul_X_pow_eq_monomial, coeff_monomial, sum],
split_ifs with h,
{ rw [finset.sum_eq_single (n/p), nat.mul_div_cancel' h, if_pos rfl],
{ intros b hb1 hb2, rw if_neg, intro hb3, apply hb2, rw [← hb3, nat.mul_div_cancel_left b hp] },
{ intro hn, rw not_mem_support_iff.1 hn, split_ifs; refl } },
{ rw finset.sum_eq_zero, intros k hk, rw if_neg, exact λ hkn, h ⟨k, hkn.symm⟩, },
end
@[simp] theorem coeff_expand_mul {p : ℕ} (hp : 0 < p) (f : polynomial R) (n : ℕ) :
(expand R p f).coeff (n * p) = f.coeff n :=
by rw [coeff_expand hp, if_pos (dvd_mul_left _ _), nat.mul_div_cancel _ hp]
@[simp] theorem coeff_expand_mul' {p : ℕ} (hp : 0 < p) (f : polynomial R) (n : ℕ) :
(expand R p f).coeff (p * n) = f.coeff n :=
by rw [mul_comm, coeff_expand_mul hp]
theorem expand_inj {p : ℕ} (hp : 0 < p) {f g : polynomial R} :
expand R p f = expand R p g ↔ f = g :=
⟨λ H, ext $ λ n, by rw [← coeff_expand_mul hp, H, coeff_expand_mul hp], congr_arg _⟩
theorem expand_eq_zero {p : ℕ} (hp : 0 < p) {f : polynomial R} : expand R p f = 0 ↔ f = 0 :=
by rw [← (expand R p).map_zero, expand_inj hp, alg_hom.map_zero]
theorem expand_eq_C {p : ℕ} (hp : 0 < p) {f : polynomial R} {r : R} :
expand R p f = C r ↔ f = C r :=
by rw [← expand_C, expand_inj hp, expand_C]
theorem nat_degree_expand (p : ℕ) (f : polynomial R) :
(expand R p f).nat_degree = f.nat_degree * p :=
begin
cases p.eq_zero_or_pos with hp hp,
{ rw [hp, coe_expand, pow_zero, mul_zero, ← C_1, eval₂_hom, nat_degree_C] },
by_cases hf : f = 0,
{ rw [hf, alg_hom.map_zero, nat_degree_zero, zero_mul] },
have hf1 : expand R p f ≠ 0 := mt (expand_eq_zero hp).1 hf,
rw [← with_bot.coe_eq_coe, ← degree_eq_nat_degree hf1],
refine le_antisymm ((degree_le_iff_coeff_zero _ _).2 $ λ n hn, _) _,
{ rw coeff_expand hp, split_ifs with hpn,
{ rw coeff_eq_zero_of_nat_degree_lt, contrapose! hn,
rw [with_bot.coe_le_coe, ← nat.div_mul_cancel hpn], exact nat.mul_le_mul_right p hn },
{ refl } },
{ refine le_degree_of_ne_zero _,
rw [coeff_expand_mul hp, ← leading_coeff], exact mt leading_coeff_eq_zero.1 hf }
end
theorem map_expand {p : ℕ} (hp : 0 < p) {f : R →+* S} {q : polynomial R} :
map f (expand R p q) = expand S p (map f q) :=
by { ext, rw [coeff_map, coeff_expand hp, coeff_expand hp], split_ifs; simp, }
/-- Expansion is injective. -/
lemma expand_injective {n : ℕ} (hn : 0 < n) :
function.injective (expand R n) :=
λ g g' h, begin
ext,
have h' : (expand R n g).coeff (n * n_1) = (expand R n g').coeff (n * n_1) :=
begin
apply polynomial.ext_iff.1,
exact h,
end,
rw [polynomial.coeff_expand hn g (n * n_1), polynomial.coeff_expand hn g' (n * n_1)] at h',
simp only [if_true, dvd_mul_right] at h',
rw (nat.mul_div_right n_1 hn) at h',
exact h',
end
end comm_semiring
section comm_ring
variables {R : Type u} [comm_ring R]
lemma separable_X_sub_C {x : R} : separable (X - C x) :=
by simpa only [sub_eq_add_neg, C_neg] using separable_X_add_C (-x)
lemma separable.mul {f g : polynomial R} (hf : f.separable) (hg : g.separable)
(h : is_coprime f g) : (f * g).separable :=
by { rw [separable_def, derivative_mul], exact ((hf.mul_right h).add_mul_left_right _).mul_left
((h.symm.mul_right hg).mul_add_right_right _) }
lemma separable_prod' {ι : Sort*} {f : ι → polynomial R} {s : finset ι} :
(∀x∈s, ∀y∈s, x ≠ y → is_coprime (f x) (f y)) → (∀x∈s, (f x).separable) →
(∏ x in s, f x).separable :=
finset.induction_on s (λ _ _, separable_one) $ λ a s has ih h1 h2, begin
simp_rw [finset.forall_mem_insert, forall_and_distrib] at h1 h2, rw prod_insert has,
exact h2.1.mul (ih h1.2.2 h2.2) (is_coprime.prod_right $ λ i his, h1.1.2 i his $
ne.symm $ ne_of_mem_of_not_mem his has)
end
lemma separable_prod {ι : Sort*} [fintype ι] {f : ι → polynomial R}
(h1 : pairwise (is_coprime on f)) (h2 : ∀ x, (f x).separable) : (∏ x, f x).separable :=
separable_prod' (λ x hx y hy hxy, h1 x y hxy) (λ x hx, h2 x)
lemma separable.inj_of_prod_X_sub_C [nontrivial R] {ι : Sort*} {f : ι → R} {s : finset ι}
(hfs : (∏ i in s, (X - C (f i))).separable)
{x y : ι} (hx : x ∈ s) (hy : y ∈ s) (hfxy : f x = f y) : x = y :=
begin
by_contra hxy,
rw [← insert_erase hx, prod_insert (not_mem_erase _ _),
← insert_erase (mem_erase_of_ne_of_mem (ne.symm hxy) hy),
prod_insert (not_mem_erase _ _), ← mul_assoc, hfxy, ← sq] at hfs,
cases (hfs.of_mul_left.of_pow (by exact not_is_unit_X_sub_C) two_ne_zero).2
end
lemma separable.injective_of_prod_X_sub_C [nontrivial R] {ι : Sort*} [fintype ι] {f : ι → R}
(hfs : (∏ i, (X - C (f i))).separable) : function.injective f :=
λ x y hfxy, hfs.inj_of_prod_X_sub_C (mem_univ _) (mem_univ _) hfxy
lemma is_unit_of_self_mul_dvd_separable {p q : polynomial R}
(hp : p.separable) (hq : q * q ∣ p) : is_unit q :=
begin
obtain ⟨p, rfl⟩ := hq,
apply is_coprime_self.mp,
have : is_coprime (q * (q * p)) (q * (q.derivative * p + q.derivative * p + q * p.derivative)),
{ simp only [← mul_assoc, mul_add],
convert hp,
rw [derivative_mul, derivative_mul],
ring },
exact is_coprime.of_mul_right_left (is_coprime.of_mul_left_left this)
end
end comm_ring
section is_domain
variables (R : Type u) [comm_ring R] [is_domain R]
theorem is_local_ring_hom_expand {p : ℕ} (hp : 0 < p) :
is_local_ring_hom (↑(expand R p) : polynomial R →+* polynomial R) :=
begin
refine ⟨λ f hf1, _⟩, rw ← coe_fn_coe_base at hf1,
have hf2 := eq_C_of_degree_eq_zero (degree_eq_zero_of_is_unit hf1),
rw [coeff_expand hp, if_pos (dvd_zero _), p.zero_div] at hf2,
rw [hf2, is_unit_C] at hf1, rw expand_eq_C hp at hf2, rwa [hf2, is_unit_C]
end
end is_domain
section field
variables {F : Type u} [field F] {K : Type v} [field K]
theorem separable_iff_derivative_ne_zero {f : polynomial F} (hf : irreducible f) :
f.separable ↔ f.derivative ≠ 0 :=
⟨λ h1 h2, hf.not_unit $ is_coprime_zero_right.1 $ h2 ▸ h1,
λ h, is_coprime_of_dvd (mt and.right h) $ λ g hg1 hg2 ⟨p, hg3⟩ hg4,
let ⟨u, hu⟩ := (hf.is_unit_or_is_unit hg3).resolve_left hg1 in
have f ∣ f.derivative, by { conv_lhs { rw [hg3, ← hu] }, rwa units.mul_right_dvd },
not_lt_of_le (nat_degree_le_of_dvd this h) $ nat_degree_derivative_lt h⟩
theorem separable_map (f : F →+* K) {p : polynomial F} : (p.map f).separable ↔ p.separable :=
by simp_rw [separable_def, derivative_map, is_coprime_map]
section char_p
/-- The opposite of `expand`: sends `∑ aₙ xⁿᵖ` to `∑ aₙ xⁿ`. -/
noncomputable def contract (p : ℕ) (f : polynomial F) : polynomial F :=
∑ n in range (f.nat_degree + 1), monomial n (f.coeff (n * p))
variables (p : ℕ) [hp : fact p.prime]
include hp
theorem coeff_contract (f : polynomial F) (n : ℕ) : (contract p f).coeff n = f.coeff (n * p) :=
begin
simp only [contract, coeff_monomial, sum_ite_eq', finset_sum_coeff, mem_range, not_lt,
ite_eq_left_iff],
assume hn,
apply (coeff_eq_zero_of_nat_degree_lt _).symm,
calc f.nat_degree < f.nat_degree + 1 : nat.lt_succ_self _
... ≤ n * 1 : by simpa only [mul_one] using hn
... ≤ n * p : mul_le_mul_of_nonneg_left (@nat.prime.one_lt p (fact.out _)).le (zero_le n)
end
theorem of_irreducible_expand {f : polynomial F} (hf : irreducible (expand F p f)) :
irreducible f :=
@@of_irreducible_map _ _ _ (is_local_ring_hom_expand F hp.1.pos) hf
theorem of_irreducible_expand_pow {f : polynomial F} {n : ℕ} :
irreducible (expand F (p ^ n) f) → irreducible f :=
nat.rec_on n (λ hf, by rwa [pow_zero, expand_one] at hf) $ λ n ih hf,
ih $ of_irreducible_expand p $ by { rw pow_succ at hf, rwa [expand_expand] }
variables [HF : char_p F p]
include HF
theorem expand_char (f : polynomial F) :
map (frobenius F p) (expand F p f) = f ^ p :=
begin
refine f.induction_on' (λ a b ha hb, _) (λ n a, _),
{ rw [alg_hom.map_add, map_add, ha, hb, add_pow_char], },
{ rw [expand_monomial, map_monomial, monomial_eq_C_mul_X, monomial_eq_C_mul_X,
mul_pow, ← C.map_pow, frobenius_def],
ring_exp }
end
theorem map_expand_pow_char (f : polynomial F) (n : ℕ) :
map ((frobenius F p) ^ n) (expand F (p ^ n) f) = f ^ (p ^ n) :=
begin
induction n, { simp [ring_hom.one_def] },
symmetry,
rw [pow_succ', pow_mul, ← n_ih, ← expand_char, pow_succ, ring_hom.mul_def, ← map_map, mul_comm,
expand_mul, ← map_expand (nat.prime.pos hp.1)],
end
theorem expand_contract {f : polynomial F} (hf : f.derivative = 0) :
expand F p (contract p f) = f :=
begin
ext n, rw [coeff_expand hp.1.pos, coeff_contract], split_ifs with h,
{ rw nat.div_mul_cancel h },
{ cases n, { exact absurd (dvd_zero p) h },
have := coeff_derivative f n, rw [hf, coeff_zero, zero_eq_mul] at this, cases this, { rw this },
rw [← nat.cast_succ, char_p.cast_eq_zero_iff F p] at this,
exact absurd this h }
end
theorem separable_or {f : polynomial F} (hf : irreducible f) : f.separable ∨
¬f.separable ∧ ∃ g : polynomial F, irreducible g ∧ expand F p g = f :=
if H : f.derivative = 0 then or.inr
⟨by rw [separable_iff_derivative_ne_zero hf, not_not, H],
contract p f,
by haveI := is_local_ring_hom_expand F hp.1.pos; exact
of_irreducible_map ↑(expand F p) (by rwa ← expand_contract p H at hf),
expand_contract p H⟩
else or.inl $ (separable_iff_derivative_ne_zero hf).2 H
theorem exists_separable_of_irreducible {f : polynomial F} (hf : irreducible f) (hf0 : f ≠ 0) :
∃ (n : ℕ) (g : polynomial F), g.separable ∧ expand F (p ^ n) g = f :=
begin
unfreezingI {
induction hn : f.nat_degree using nat.strong_induction_on with N ih generalizing f },
rcases separable_or p hf with h | ⟨h1, g, hg, hgf⟩,
{ refine ⟨0, f, h, _⟩, rw [pow_zero, expand_one] },
{ cases N with N,
{ rw [nat_degree_eq_zero_iff_degree_le_zero, degree_le_zero_iff] at hn,
rw [hn, separable_C, is_unit_iff_ne_zero, not_not] at h1,
rw [h1, C_0] at hn, exact absurd hn hf0 },
have hg1 : g.nat_degree * p = N.succ,
{ rwa [← nat_degree_expand, hgf] },
have hg2 : g.nat_degree ≠ 0,
{ intro this, rw [this, zero_mul] at hg1, cases hg1 },
have hg3 : g.nat_degree < N.succ,
{ rw [← mul_one g.nat_degree, ← hg1],
exact nat.mul_lt_mul_of_pos_left hp.1.one_lt (nat.pos_of_ne_zero hg2) },
have hg4 : g ≠ 0,
{ rintro rfl, exact hg2 nat_degree_zero },
rcases ih _ hg3 hg hg4 rfl with ⟨n, g, hg5, rfl⟩, refine ⟨n+1, g, hg5, _⟩,
rw [← hgf, expand_expand, pow_succ] }
end
theorem is_unit_or_eq_zero_of_separable_expand {f : polynomial F} (n : ℕ)
(hf : (expand F (p ^ n) f).separable) : is_unit f ∨ n = 0 :=
begin
rw or_iff_not_imp_right, intro hn,
have hf2 : (expand F (p ^ n) f).derivative = 0,
{ by rw [derivative_expand, nat.cast_pow, char_p.cast_eq_zero,
zero_pow (nat.pos_of_ne_zero hn), zero_mul, mul_zero] },
rw [separable_def, hf2, is_coprime_zero_right, is_unit_iff] at hf, rcases hf with ⟨r, hr, hrf⟩,
rw [eq_comm, expand_eq_C (pow_pos hp.1.pos _)] at hrf,
rwa [hrf, is_unit_C]
end
theorem unique_separable_of_irreducible {f : polynomial F} (hf : irreducible f) (hf0 : f ≠ 0)
(n₁ : ℕ) (g₁ : polynomial F) (hg₁ : g₁.separable) (hgf₁ : expand F (p ^ n₁) g₁ = f)
(n₂ : ℕ) (g₂ : polynomial F) (hg₂ : g₂.separable) (hgf₂ : expand F (p ^ n₂) g₂ = f) :
n₁ = n₂ ∧ g₁ = g₂ :=
begin
revert g₁ g₂, wlog hn : n₁ ≤ n₂ := le_total n₁ n₂ using [n₁ n₂, n₂ n₁] tactic.skip,
unfreezingI { intros, rw le_iff_exists_add at hn, rcases hn with ⟨k, rfl⟩,
rw [← hgf₁, pow_add, expand_mul, expand_inj (pow_pos hp.1.pos n₁)] at hgf₂, subst hgf₂,
subst hgf₁,
rcases is_unit_or_eq_zero_of_separable_expand p k hg₁ with h | rfl,
{ rw is_unit_iff at h, rcases h with ⟨r, hr, rfl⟩,
simp_rw expand_C at hf, exact absurd (is_unit_C.2 hr) hf.1 },
{ rw [add_zero, pow_zero, expand_one], split; refl } },
exact λ g₁ g₂ hg₁ hgf₁ hg₂ hgf₂, let ⟨hn, hg⟩ :=
this g₂ g₁ hg₂ hgf₂ hg₁ hgf₁ in ⟨hn.symm, hg.symm⟩
end
end char_p
lemma separable_prod_X_sub_C_iff' {ι : Sort*} {f : ι → F} {s : finset ι} :
(∏ i in s, (X - C (f i))).separable ↔ (∀ (x ∈ s) (y ∈ s), f x = f y → x = y) :=
⟨λ hfs x hx y hy hfxy, hfs.inj_of_prod_X_sub_C hx hy hfxy,
λ H, by { rw ← prod_attach, exact separable_prod' (λ x hx y hy hxy,
@pairwise_coprime_X_sub _ _ { x // x ∈ s } (λ x, f x)
(λ x y hxy, subtype.eq $ H x.1 x.2 y.1 y.2 hxy) _ _ hxy)
(λ _ _, separable_X_sub_C) }⟩
lemma separable_prod_X_sub_C_iff {ι : Sort*} [fintype ι] {f : ι → F} :
(∏ i, (X - C (f i))).separable ↔ function.injective f :=
separable_prod_X_sub_C_iff'.trans $ by simp_rw [mem_univ, true_implies_iff, function.injective]
section splits
open_locale big_operators
variables {i : F →+* K}
lemma not_unit_X_sub_C (a : F) : ¬ is_unit (X - C a) :=
λ h, have one_eq_zero : (1 : with_bot ℕ) = 0, by simpa using degree_eq_zero_of_is_unit h,
one_ne_zero (option.some_injective _ one_eq_zero)
lemma nodup_of_separable_prod {s : multiset F}
(hs : separable (multiset.map (λ a, X - C a) s).prod) : s.nodup :=
begin
rw multiset.nodup_iff_ne_cons_cons,
rintros a t rfl,
refine not_unit_X_sub_C a (is_unit_of_self_mul_dvd_separable hs _),
simpa only [multiset.map_cons, multiset.prod_cons] using mul_dvd_mul_left _ (dvd_mul_right _ _)
end
lemma multiplicity_le_one_of_separable {p q : polynomial F} (hq : ¬ is_unit q)
(hsep : separable p) : multiplicity q p ≤ 1 :=
begin
contrapose! hq,
apply is_unit_of_self_mul_dvd_separable hsep,
rw ← sq,
apply multiplicity.pow_dvd_of_le_multiplicity,
simpa only [nat.cast_one, nat.cast_bit0] using enat.add_one_le_of_lt hq
end
lemma separable.squarefree {p : polynomial F} (hsep : separable p) : squarefree p :=
begin
rw multiplicity.squarefree_iff_multiplicity_le_one p,
intro f,
by_cases hunit : is_unit f,
{ exact or.inr hunit },
exact or.inl (multiplicity_le_one_of_separable hunit hsep)
end
/--If `n ≠ 0` in `F`, then ` X ^ n - a` is separable for any `a ≠ 0`. -/
lemma separable_X_pow_sub_C {n : ℕ} (a : F) (hn : (n : F) ≠ 0) (ha : a ≠ 0) :
separable (X ^ n - C a) :=
begin
cases nat.eq_zero_or_pos n with hzero hpos,
{ exfalso,
rw hzero at hn,
exact hn (refl 0) },
apply (separable_def' (X ^ n - C a)).2,
use [-C (a⁻¹), (C ((a⁻¹) * (↑n)⁻¹) * X)],
have mul_pow_sub : X * X ^ (n - 1) = X ^ n,
{ nth_rewrite 0 [←pow_one X],
rw pow_mul_pow_sub X (nat.succ_le_iff.mpr hpos) },
rw [derivative_sub, derivative_C, sub_zero, derivative_pow X n, derivative_X, mul_one],
have hcalc : C (a⁻¹ * (↑n)⁻¹) * (↑n * (X ^ n)) = C a⁻¹ * (X ^ n),
{ calc C (a⁻¹ * (↑n)⁻¹) * (↑n * (X ^ n))
= C a⁻¹ * C ((↑n)⁻¹) * (C ↑n * (X ^ n)) : by rw [C_mul, C_eq_nat_cast]
... = C a⁻¹ * (C ((↑n)⁻¹) * C ↑n) * (X ^ n) : by ring
... = C a⁻¹ * C ((↑n)⁻¹ * ↑n) * (X ^ n) : by rw [← C_mul]
... = C a⁻¹ * C 1 * (X ^ n) : by field_simp [hn]
... = C a⁻¹ * (X ^ n) : by rw [C_1, mul_one] },
calc -C a⁻¹ * (X ^ n - C a) + C (a⁻¹ * (↑n)⁻¹) * X * (↑n * X ^ (n - 1))
= -C a⁻¹ * (X ^ n - C a) + C (a⁻¹ * (↑n)⁻¹) * (↑n * (X * X ^ (n - 1))) : by ring
... = -C a⁻¹ * (X ^ n - C a) + C a⁻¹ * (X ^ n) : by rw [mul_pow_sub, hcalc]
... = C a⁻¹ * C a : by ring
... = (1 : polynomial F) : by rw [← C_mul, inv_mul_cancel ha, C_1]
end
/--If `n ≠ 0` in `F`, then ` X ^ n - a` is squarefree for any `a ≠ 0`. -/
lemma squarefree_X_pow_sub_C {n : ℕ} (a : F) (hn : (n : F) ≠ 0) (ha : a ≠ 0) :
squarefree (X ^ n - C a) :=
(separable_X_pow_sub_C a hn ha).squarefree
lemma root_multiplicity_le_one_of_separable {p : polynomial F} (hp : p ≠ 0)
(hsep : separable p) (x : F) : root_multiplicity x p ≤ 1 :=
begin
rw [root_multiplicity_eq_multiplicity, dif_neg hp, ← enat.coe_le_coe, enat.coe_get, nat.cast_one],
exact multiplicity_le_one_of_separable (not_unit_X_sub_C _) hsep
end
lemma count_roots_le_one {p : polynomial F} (hsep : separable p) (x : F) :
p.roots.count x ≤ 1 :=
begin
by_cases hp : p = 0,
{ simp [hp] },
rw count_roots hp,
exact root_multiplicity_le_one_of_separable hp hsep x
end
lemma nodup_roots {p : polynomial F} (hsep : separable p) :
p.roots.nodup :=
multiset.nodup_iff_count_le_one.mpr (count_roots_le_one hsep)
lemma card_root_set_eq_nat_degree [algebra F K] {p : polynomial F} (hsep : p.separable)
(hsplit : splits (algebra_map F K) p) : fintype.card (p.root_set K) = p.nat_degree :=
begin
simp_rw [root_set_def, finset.coe_sort_coe, fintype.card_coe],
rw [multiset.to_finset_card_of_nodup, ←nat_degree_eq_card_roots hsplit],
exact nodup_roots hsep.map,
end
lemma eq_X_sub_C_of_separable_of_root_eq {x : F} {h : polynomial F} (h_ne_zero : h ≠ 0)
(h_sep : h.separable) (h_root : h.eval x = 0) (h_splits : splits i h)
(h_roots : ∀ y ∈ (h.map i).roots, y = i x) : h = (C (leading_coeff h)) * (X - C x) :=
begin
apply polynomial.eq_X_sub_C_of_splits_of_single_root i h_splits,
apply finset.mk.inj,
{ change _ = {i x},
rw finset.eq_singleton_iff_unique_mem,
split,
{ apply finset.mem_mk.mpr,
rw mem_roots (show h.map i ≠ 0, by exact map_ne_zero h_ne_zero),
rw [is_root.def,←eval₂_eq_eval_map,eval₂_hom,h_root],
exact ring_hom.map_zero i },
{ exact h_roots } },
{ exact nodup_roots (separable.map h_sep) },
end
lemma exists_finset_of_splits
(i : F →+* K) {f : polynomial F} (sep : separable f) (sp : splits i f) :
∃ (s : finset K), f.map i =
C (i f.leading_coeff) * (s.prod (λ a : K, (X : polynomial K) - C a)) :=
begin
classical,
obtain ⟨s, h⟩ := exists_multiset_of_splits i sp,
use s.to_finset,
rw [h, finset.prod_eq_multiset_prod, ←multiset.to_finset_eq],
apply nodup_of_separable_prod,
apply separable.of_mul_right,
rw ←h,
exact sep.map,
end
end splits
end field
end polynomial
open polynomial
theorem irreducible.separable {F : Type u} [field F] [char_zero F] {f : polynomial F}
(hf : irreducible f) : f.separable :=
begin
rw [separable_iff_derivative_ne_zero hf, ne, ← degree_eq_bot, degree_derivative_eq], rintro ⟨⟩,
rw [pos_iff_ne_zero, ne, nat_degree_eq_zero_iff_degree_le_zero, degree_le_zero_iff],
refine λ hf1, hf.not_unit _, rw [hf1, is_unit_C, is_unit_iff_ne_zero],
intro hf2, rw [hf2, C_0] at hf1, exact absurd hf1 hf.ne_zero
end
section comm_ring
variables (F K : Type*) [comm_ring F] [ring K] [algebra F K]
-- TODO: refactor to allow transcendental extensions?
-- See: https://en.wikipedia.org/wiki/Separable_extension#Separability_of_transcendental_extensions
/-- Typeclass for separable field extension: `K` is a separable field extension of `F` iff
the minimal polynomial of every `x : K` is separable.
We define this for general (commutative) rings and only assume `F` and `K` are fields if this
is needed for a proof.
-/
class is_separable : Prop :=
(is_integral' (x : K) : is_integral F x)
(separable' (x : K) : (minpoly F x).separable)
variables (F) {K}
theorem is_separable.is_integral [is_separable F K] :
∀ x : K, is_integral F x := is_separable.is_integral'
theorem is_separable.separable [is_separable F K] :
∀ x : K, (minpoly F x).separable := is_separable.separable'
variables {F K}
theorem is_separable_iff : is_separable F K ↔ ∀ x : K, is_integral F x ∧ (minpoly F x).separable :=
⟨λ h x, ⟨@@is_separable.is_integral F _ _ _ h x, @@is_separable.separable F _ _ _ h x⟩,
λ h, ⟨λ x, (h x).1, λ x, (h x).2⟩⟩
end comm_ring
instance is_separable_self (F : Type*) [field F] : is_separable F F :=
⟨λ x, is_integral_algebra_map, λ x, by { rw minpoly.eq_X_sub_C', exact separable_X_sub_C }⟩
/-- A finite field extension in characteristic 0 is separable. -/
@[priority 100] -- See note [lower instance priority]
instance is_separable.of_finite (F K : Type*) [field F] [field K] [algebra F K]
[finite_dimensional F K] [char_zero F] : is_separable F K :=
have ∀ (x : K), is_integral F x,
from λ x, (is_algebraic_iff_is_integral _).mp (algebra.is_algebraic_of_finite _),
⟨this, λ x, (minpoly.irreducible (this x)).separable⟩
section is_separable_tower
variables (F K E : Type*) [field F] [field K] [field E] [algebra F K] [algebra F E]
[algebra K E] [is_scalar_tower F K E]
lemma is_separable_tower_top_of_is_separable [is_separable F E] : is_separable K E :=
⟨λ x, is_integral_of_is_scalar_tower x (is_separable.is_integral F x),
λ x, (is_separable.separable F x).map.of_dvd (minpoly.dvd_map_of_is_scalar_tower _ _ _)⟩
lemma is_separable_tower_bot_of_is_separable [h : is_separable F E] : is_separable F K :=
is_separable_iff.2 $ λ x, begin
refine (is_separable_iff.1 h (algebra_map K E x)).imp
is_integral_tower_bot_of_is_integral_field (λ hs, _),
obtain ⟨q, hq⟩ := minpoly.dvd F x
(is_scalar_tower.aeval_eq_zero_of_aeval_algebra_map_eq_zero_field
(minpoly.aeval F ((algebra_map K E) x))),
rw hq at hs,
exact hs.of_mul_left
end
variables {E}
lemma is_separable.of_alg_hom (E' : Type*) [field E'] [algebra F E']
(f : E →ₐ[F] E') [is_separable F E'] : is_separable F E :=
begin
letI : algebra E E' := ring_hom.to_algebra f.to_ring_hom,
haveI : is_scalar_tower F E E' := is_scalar_tower.of_algebra_map_eq (λ x, (f.commutes x).symm),
exact is_separable_tower_bot_of_is_separable F E E',
end
end is_separable_tower
section card_alg_hom
variables {R S T : Type*} [comm_ring S]
variables {K L F : Type*} [field K] [field L] [field F]
variables [algebra K S] [algebra K L]
lemma alg_hom.card_of_power_basis (pb : power_basis K S) (h_sep : (minpoly K pb.gen).separable)
(h_splits : (minpoly K pb.gen).splits (algebra_map K L)) :
@fintype.card (S →ₐ[K] L) (power_basis.alg_hom.fintype pb) = pb.dim :=
begin
let s := ((minpoly K pb.gen).map (algebra_map K L)).roots.to_finset,
have H := λ x, multiset.mem_to_finset,
rw [fintype.card_congr pb.lift_equiv', fintype.card_of_subtype s H,
← pb.nat_degree_minpoly, nat_degree_eq_card_roots h_splits, multiset.to_finset_card_of_nodup],
exact nodup_roots ((separable_map (algebra_map K L)).mpr h_sep)
end
end card_alg_hom
|
751d0fb41304d57bac8e99ad62a22beab934472b
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/data/fintype/lattice.lean
|
0602a1bd7b736c9876d8c029d60b2ebda5dad017
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,426
|
lean
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.fintype.card
import data.finset.lattice
/-!
# Lemmas relating fintypes and order/lattice structure.
## Instances
We provide `infinite` instances for
* specific types: `ℕ`, `ℤ`
* type constructors: `multiset α`, `list α`
-/
open function
open_locale nat
universes u v
variables {α β γ : Type*}
namespace finset
variables [fintype α] {s : finset α}
/-- A special case of `finset.sup_eq_supr` that omits the useless `x ∈ univ` binder. -/
lemma sup_univ_eq_supr [complete_lattice β] (f : α → β) : finset.univ.sup f = supr f :=
(sup_eq_supr _ f).trans $ congr_arg _ $ funext $ λ a, supr_pos (mem_univ _)
/-- A special case of `finset.inf_eq_infi` that omits the useless `x ∈ univ` binder. -/
lemma inf_univ_eq_infi [complete_lattice β] (f : α → β) : finset.univ.inf f = infi f :=
sup_univ_eq_supr (by exact f : α → βᵒᵈ)
@[simp] lemma fold_inf_univ [semilattice_inf α] [order_bot α] (a : α) :
finset.univ.fold (⊓) a (λ x, x) = ⊥ :=
eq_bot_iff.2 $ ((finset.fold_op_rel_iff_and $ @_root_.le_inf_iff α _).1 le_rfl).2 ⊥ $
finset.mem_univ _
@[simp] lemma fold_sup_univ [semilattice_sup α] [order_top α] (a : α) :
finset.univ.fold (⊔) a (λ x, x) = ⊤ :=
@fold_inf_univ αᵒᵈ ‹fintype α› _ _ _
end finset
open finset function
lemma finite.exists_max [finite α] [nonempty α] [linear_order β] (f : α → β) :
∃ x₀ : α, ∀ x, f x ≤ f x₀ :=
by { casesI nonempty_fintype α, simpa using exists_max_image univ f univ_nonempty }
lemma finite.exists_min [finite α] [nonempty α] [linear_order β] (f : α → β) :
∃ x₀ : α, ∀ x, f x₀ ≤ f x :=
by { casesI nonempty_fintype α, simpa using exists_min_image univ f univ_nonempty }
section
open_locale classical
instance : infinite ℕ :=
infinite.of_not_fintype $ λ ⟨s, hs⟩, finset.not_mem_range_self $ s.subset_range_sup_succ (hs _)
instance : infinite ℤ :=
infinite.of_injective int.of_nat (λ _ _, int.of_nat.inj)
instance [nonempty α] : infinite (multiset α) :=
begin
inhabit α,
exact infinite.of_injective (multiset.repeat default) (multiset.repeat_injective _),
end
instance [nonempty α] : infinite (list α) :=
infinite.of_surjective (coe : list α → multiset α) (surjective_quot_mk _)
end
|
670322c37bad9774ceb411a3c4a43ebd597fc2cf
|
a4673261e60b025e2c8c825dfa4ab9108246c32e
|
/src/Lean/Meta/Tactic/ElimInfo.lean
|
0f13e422ab98f71f4b0e4e4915a4217c0fcea7d2
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/lean4
|
c02dec0cc32c4bccab009285475f265f17d73228
|
2909313475588cc20ac0436e55548a4502050d0a
|
refs/heads/master
| 1,674,129,550,893
| 1,606,415,348,000
| 1,606,415,348,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,998
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Meta.Basic
namespace Lean.Meta
structure ElimAltInfo :=
(name : Name)
(numFields : Nat)
structure ElimInfo :=
(motivePos : Nat)
(targetsPos : Array Nat := #[])
(altsInfo : Array ElimAltInfo := #[])
def getElimInfo (declName : Name) : MetaM ElimInfo := do
let declInfo ← getConstInfo declName
forallTelescopeReducing declInfo.type fun xs type => do
let motive := type.getAppFn
let targets := type.getAppArgs
unless motive.isFVar && targets.all (·.isFVar) && targets.size > 0 do
throwError! "unexpected eliminator resulting type{indentExpr type}"
let motiveType ← inferType motive
forallTelescopeReducing motiveType fun motiveArgs motiveResultType => do
unless motiveArgs.size == targets.size do
throwError! "unexpected number of arguments at motive type{indentExpr motiveType}"
unless motiveResultType.isSort do
throwError! "motive result type must be a sort{indentExpr motiveType}"
let some motivePos ← pure (xs.indexOf? motive) |
throwError! "unexpected eliminator type{indentExpr declInfo.type}"
let targetsPos ← targets.mapM fun target => do
match xs.indexOf? target with
| none => throwError! "unexpected eliminator type{indentExpr declInfo.type}"
| some targetPos => pure targetPos.val
let mut altsInfo := #[]
for i in [:xs.size] do
let x := xs[i]
if x != motive && !targets.contains x then
let xDecl ← getLocalDecl x.fvarId!
if xDecl.binderInfo.isExplicit then
let numFields ← forallTelescopeReducing xDecl.type fun args _ => pure args.size
altsInfo := altsInfo.push { name := xDecl.userName, numFields := numFields : ElimAltInfo }
pure { motivePos := motivePos, targetsPos := targetsPos, altsInfo := altsInfo }
end Lean.Meta
|
5fb709bf6370dd82e0dd21016aa62c073251ec9d
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/model_theory/ultraproducts.lean
|
4c9363e172200cbf958b6b1cd54d21c07ca7a2eb
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 5,787
|
lean
|
/-
Copyright (c) 2022 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
-/
import model_theory.quotients
import order.filter.germ
import order.filter.ultrafilter
/-! # Ultraproducts and Łoś's Theorem
## Main Definitions
* `first_order.language.ultraproduct.Structure` is the ultraproduct structure on `filter.product`.
## Main Results
* Łoś's Theorem: `first_order.language.ultraproduct.sentence_realize`. An ultraproduct models a
sentence `φ` if and only if the set of structures in the product that model `φ` is in the
ultrafilter.
## Tags
ultraproduct, Los's theorem
-/
universes u v
variables {α : Type*} (M : α → Type*) (u : ultrafilter α)
open_locale first_order filter
open filter
namespace first_order
namespace language
open Structure
variables {L : language.{u v}} [Π a, L.Structure (M a)]
namespace ultraproduct
instance setoid_prestructure : L.prestructure ((u : filter α).product_setoid M) :=
{ to_structure := { fun_map := λ n f x a, fun_map f (λ i, x i a),
rel_map := λ n r x, ∀ᶠ (a : α) in u, rel_map r (λ i, x i a) },
fun_equiv := λ n f x y xy, begin
refine mem_of_superset (Inter_mem.2 xy) (λ a ha, _),
simp only [set.mem_Inter, set.mem_set_of_eq] at ha,
simp only [set.mem_set_of_eq, ha],
end,
rel_equiv := λ n r x y xy, begin
rw ← iff_eq_eq,
refine ⟨λ hx, _, λ hy, _⟩,
{ refine mem_of_superset (inter_mem hx (Inter_mem.2 xy)) _,
rintro a ⟨ha1, ha2⟩,
simp only [set.mem_Inter, set.mem_set_of_eq] at *,
rw ← funext ha2,
exact ha1 },
{ refine mem_of_superset (inter_mem hy (Inter_mem.2 xy)) _,
rintro a ⟨ha1, ha2⟩,
simp only [set.mem_Inter, set.mem_set_of_eq] at *,
rw funext ha2,
exact ha1 },
end,
.. (u : filter α).product_setoid M }
variables {M} {u}
instance Structure : L.Structure ((u : filter α).product M) := language.quotient_structure
lemma fun_map_cast {n : ℕ} (f : L.functions n) (x : fin n → (Π a, M a)) :
fun_map f (λ i, ((x i) : (u : filter α).product M)) = λ a, fun_map f (λ i, x i a) :=
by apply fun_map_quotient_mk
lemma term_realize_cast {β : Type*} (x : β → (Π a, M a)) (t : L.term β) :
t.realize (λ i, ((x i) : (u : filter α).product M)) = λ a, t.realize (λ i, x i a) :=
begin
convert @term.realize_quotient_mk L _ ((u : filter α).product_setoid M)
(ultraproduct.setoid_prestructure M u) _ t x,
ext a,
induction t,
{ refl },
{ simp only [term.realize, t_ih],
refl }
end
variables [Π a : α, nonempty (M a)]
theorem bounded_formula_realize_cast {β : Type*} {n : ℕ} (φ : L.bounded_formula β n)
(x : β → (Π a, M a)) (v : fin n → (Π a, M a)) :
φ.realize (λ (i : β), ((x i) : (u : filter α).product M)) (λ i, (v i)) ↔
∀ᶠ (a : α) in u, φ.realize (λ (i : β), x i a) (λ i, v i a) :=
begin
letI := ((u : filter α).product_setoid M),
induction φ with _ _ _ _ _ _ _ _ m _ _ ih ih' k φ ih,
{ simp only [bounded_formula.realize, eventually_const], },
{ have h2 : ∀ a : α, sum.elim (λ (i : β), x i a) (λ i, v i a) = λ i, sum.elim x v i a :=
λ a, funext (λ i, sum.cases_on i (λ i, rfl) (λ i, rfl)),
simp only [bounded_formula.realize, (sum.comp_elim coe x v).symm, h2, term_realize_cast],
exact quotient.eq' },
{ have h2 : ∀ a : α, sum.elim (λ (i : β), x i a) (λ i, v i a) = λ i, sum.elim x v i a :=
λ a, funext (λ i, sum.cases_on i (λ i, rfl) (λ i, rfl)),
simp only [bounded_formula.realize, (sum.comp_elim coe x v).symm, term_realize_cast, h2],
exact rel_map_quotient_mk _ _ },
{ simp only [bounded_formula.realize, ih v, ih' v],
rw ultrafilter.eventually_imp },
{ simp only [bounded_formula.realize],
transitivity (∀ (m : Π (a : α), M a), φ.realize (λ (i : β), (x i : (u : filter α).product M))
(fin.snoc (coe ∘ v) (↑m : (u : filter α).product M))),
{ exact forall_quotient_iff },
have h' : ∀ (m : Π a, M a) (a : α), (λ (i : fin (k + 1)), (fin.snoc v m : _ → Π a, M a) i a) =
fin.snoc (λ (i : fin k), v i a) (m a),
{ refine λ m a, funext (fin.reverse_induction _ (λ i hi, _)),
{ simp only [fin.snoc_last] },
{ simp only [fin.snoc_cast_succ] } },
simp only [← fin.comp_snoc, ih, h'],
refine ⟨λ h, _, λ h m, _⟩,
{ contrapose! h,
simp_rw [← ultrafilter.eventually_not, not_forall] at h,
refine ⟨λ a : α, classical.epsilon (λ m : M a, ¬ φ.realize (λ i, x i a)
(fin.snoc (λ i, v i a) m)), _⟩,
rw [← ultrafilter.eventually_not],
exact filter.mem_of_superset h (λ a ha, classical.epsilon_spec ha) },
{ rw filter.eventually_iff at *,
exact filter.mem_of_superset h (λ a ha, ha (m a)) } }
end
theorem realize_formula_cast {β : Type*} (φ : L.formula β) (x : β → (Π a, M a)) :
φ.realize (λ i, ((x i) : (u : filter α).product M)) ↔
∀ᶠ (a : α) in u, φ.realize (λ i, (x i a)) :=
begin
simp_rw [formula.realize, ← bounded_formula_realize_cast φ x, iff_eq_eq],
exact congr rfl (subsingleton.elim _ _),
end
/-- Łoś's Theorem : A sentence is true in an ultraproduct if and only if the set of structures it is
true in is in the ultrafilter. -/
theorem sentence_realize (φ : L.sentence) :
((u : filter α).product M) ⊨ φ ↔ ∀ᶠ (a : α) in u, (M a) ⊨ φ :=
begin
simp_rw [sentence.realize, ← realize_formula_cast φ, iff_eq_eq],
exact congr rfl (subsingleton.elim _ _),
end
instance : nonempty ((u : filter α).product M) :=
begin
letI : Π a, inhabited (M a) := λ _, classical.inhabited_of_nonempty',
exact nonempty_of_inhabited,
end
end ultraproduct
end language
end first_order
|
0c103ece6067127799dc5426d8b6d61452aac2e8
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/src/lake/Lake/CLI/Serve.lean
|
12fb2210344794e6fc91921f414c4c74afdb378e
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,367
|
lean
|
/-
Copyright (c) 2022 Mac Malone. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mac Malone
-/
import Lake.Load
import Lake.Build
import Lake.Util.MainM
namespace Lake
open Lean (Json toJson fromJson? LeanPaths)
/-- Exit code to return if `print-paths` cannot find the config file. -/
def noConfigFileCode : ExitCode := 2
/--
Environment variable that is set when `lake serve` cannot parse the Lake configuration file
and falls back to plain `lean --server`.
-/
def invalidConfigEnvVar := "LAKE_INVALID_CONFIG"
/--
Build a list of imports of the package
and print the `.olean` and source directories of every used package.
If no configuration file exists, exit silently with `noConfigFileCode` (i.e, 2).
The `print-paths` command is used internally by Lean 4 server.
-/
def printPaths (config : LoadConfig) (imports : List String := [])
(oldMode : Bool := false) (verbosity : Verbosity := .normal) : MainM PUnit := do
if (← config.configFile.pathExists) then
if let some errLog := (← IO.getEnv invalidConfigEnvVar) then
IO.eprint errLog
IO.eprintln s!"Invalid Lake configuration. Please restart the server after fixing the Lake configuration file."
exit 1
let ws ← MainM.runLogIO (loadWorkspace config) verbosity
let dynlibs ← ws.runBuild (buildImportsAndDeps imports) oldMode
|>.run (MonadLog.eio verbosity)
IO.println <| Json.compress <| toJson {
oleanPath := ws.leanPath
srcPath := ws.leanSrcPath
loadDynlibPaths := dynlibs
: LeanPaths
}
else
exit noConfigFileCode
/--
Start the Lean LSP for the `Workspace` loaded from `config`
with the given additional `args`.
-/
def serve (config : LoadConfig) (args : Array String) : IO UInt32 := do
let (extraEnv, moreServerArgs) ← do
let (log, ws?) ← loadWorkspace config |>.captureLog
IO.eprint log
if let some ws := ws? then
let ctx := mkLakeContext ws
pure (← LakeT.run ctx getAugmentedEnv, ws.root.moreServerArgs)
else
IO.eprintln "warning: package configuration has errors, falling back to plain `lean --server`"
pure (config.env.installVars.push (invalidConfigEnvVar, log), #[])
(← IO.Process.spawn {
cmd := config.env.lean.lean.toString
args := #["--server"] ++ moreServerArgs ++ args
env := extraEnv
}).wait
|
73e0e58de91995476aea08df18d57544871edaee
|
ac2987d8c7832fb4a87edb6bee26141facbb6fa0
|
/Mathlib/Tactic/NoMatch.lean
|
882d2bc12edb30eb3a9d2450298eede0bc0b3aaf
|
[
"Apache-2.0"
] |
permissive
|
AurelienSaue/mathlib4
|
52204b9bd9d207c922fe0cf3397166728bb6c2e2
|
84271fe0875bafdaa88ac41f1b5a7c18151bd0d5
|
refs/heads/master
| 1,689,156,096,545
| 1,629,378,840,000
| 1,629,378,840,000
| 389,648,603
| 0
| 0
|
Apache-2.0
| 1,627,307,284,000
| 1,627,307,284,000
| null |
UTF-8
|
Lean
| false
| false
| 1,947
|
lean
|
/-
Copyright (c) 2021 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import Mathlib.Tactic.OpenPrivate
import Lean
/-
This adds support for the alternative syntax `match x with.` instead of `nomatch x`. It is more
powerful because it supports pattern matching on multiple discriminants, like regular `match`, and
simply has no alternatives in the match.
Along the same lines, `fun.` is a nullary pattern matching function; it is equivalent to
`fun x y z => match x, y, z with.` where all variables are introduced in order to find an
impossible pattern. The `match x with.` and `intro.` tactics do the same thing but in tactic mode.
-/
namespace Lean
syntax:lead (name := Parser.Term.noMatch) "match " matchDiscr,* " with" "." : term
syntax:lead (name := Parser.Tactic.introNoMatch) "intro" "." : tactic
namespace Elab.Term
open private elabMatchCore in elabMatch.elabMatchDefault in
open private elabMatchAux waitExpectedTypeAndDiscrs in elabMatchCore in
@[termElab Parser.Term.noMatch] def elabNoMatch' : TermElab
| stx@`(match $discrs,* with.), expectedType? => do
let expectedType ← waitExpectedTypeAndDiscrs stx expectedType?
elabMatchAux none discrs #[] mkNullNode expectedType
| _, _ => throwUnsupportedSyntax
elab tk:"fun" "." : term <= expectedType =>
Meta.forallTelescopeReducing expectedType fun args _ => do
let mut discrs := #[]
let mut binders := #[]
for _ in args do
let n ← mkFreshIdent tk
binders := binders.push n
discrs := discrs.push $ Syntax.node ``Lean.Parser.Term.matchDiscr #[mkNullNode, n]
elabTerm (← `(@fun $binders* => match $discrs,* with.)) (some expectedType)
macro tk:"λ" "." : term => `(fun%$tk .)
end Elab.Term
macro "match " discrs:matchDiscr,* " with" "." : tactic =>
`(tactic| exact match $discrs,* with.)
macro "intro" "." : tactic => `(tactic| exact fun.)
end Lean
|
630793d298fd2a0638e7a1a6664f702a5b7a72f0
|
3dc4623269159d02a444fe898d33e8c7e7e9461b
|
/.github/workflows/project_1_a_decrire/lean-scheme-submission/src/spectrum_of_a_ring/zariski_topology.lean
|
bdaba6cdd7f40358e706bc9db6036484ebd5aa31
|
[] |
no_license
|
Or7ando/lean
|
cc003e6c41048eae7c34aa6bada51c9e9add9e66
|
d41169cf4e416a0d42092fb6bdc14131cee9dd15
|
refs/heads/master
| 1,650,600,589,722
| 1,587,262,906,000
| 1,587,262,906,000
| 255,387,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 4,426
|
lean
|
/-
Kenny's proof that Spec(A) is a topological space.
https://stacks.math.columbia.edu/tag/00E1
-/
import topology.basic
import topology.opens
import ring_theory.ideals
import group_theory.submonoid
import to_mathlib.topologcal_space
import spectrum_of_a_ring.spec
import spectrum_of_a_ring.properties
local attribute [instance] classical.prop_decidable
universe u
open topological_space
section spec_topological_space
parameters (α : Type u) [comm_ring α]
-- Proof.
lemma Spec.H1 : ∅ ∈ Spec.closeds α :=
⟨{(1:α)}, set.ext $ λ ⟨I, PI⟩, ⟨
λ HI, false.elim $ ((ideal.ne_top_iff_one I).1 PI.1) $ by simpa [Spec.V] using HI,
λ HI, by cases HI⟩⟩
lemma Spec.H2 : ∀ A ⊆ Spec.closeds α, ⋂₀ A ∈ Spec.closeds α :=
λ A HA, ⟨(⋃₀ {E | ∃ S ∈ A, Spec.V E = S}), set.ext $ λ ⟨I, PI⟩, ⟨
λ HI T HT,
begin
rcases (HA HT) with ⟨S, HS⟩,
rw ←HS,
intros x Hx,
apply HI,
use [S, ⟨T, HT, HS⟩],
exact Hx,
end,
λ HI x ⟨E, ⟨⟨S, HSA, HVES⟩, HxE⟩⟩,
begin
have HIS := HI S HSA,
rw ←HVES at HIS,
exact (HIS HxE)
end⟩⟩
lemma Spec.H3 : ∀ A B ∈ Spec.closeds α, A ∪ B ∈ Spec.closeds α :=
λ A B ⟨EA, HEA⟩ ⟨EB, HEB⟩,
⟨(ideal.span EA ∩ ideal.span EB), set.ext $ λ ⟨I, PI⟩, ⟨
begin
intros HI,
eapply or_iff_not_and_not.2; try { apply_instance },
rintros ⟨HnIA, HnIB⟩,
rw ←HEA at HnIA,
rw ←HEB at HnIB,
rcases not_forall.1 HnIA with ⟨x, Hx⟩,
rcases not_forall.1 HnIB with ⟨y, Hy⟩,
rcases not_imp.1 Hx with ⟨HxEA, HnxI⟩,
rcases not_imp.1 Hy with ⟨HyEB, HnyI⟩,
have HxyEA : x * y ∈ ideal.span EA,
apply ideal.mul_mem_right _ _,
exact ((@ideal.subset_span _ _ EA) _ HxEA),
have HxyEB : x * y ∈ ideal.span EB,
apply ideal.mul_mem_left _ _,
exact ((@ideal.subset_span _ _ EB) _ HyEB),
have HxyEAEB : x * y ∈ ideal.span EA ⊓ ideal.span EB := ⟨HxyEA, HxyEB⟩,
cases (ideal.is_prime.mem_or_mem PI (HI HxyEAEB)) with HCx HCy,
{ apply HnxI,
exact HCx, },
{ apply HnyI,
exact HCy, }
end,
begin
rintros HI x ⟨HxEA, HxEB⟩,
cases HI,
{ rw ←HEA at HI,
rw Spec.V.set_eq_span at HI,
exact HI HxEA, },
{ rw ←HEB at HI,
rw Spec.V.set_eq_span at HI,
exact HI HxEB, }
end⟩⟩
parameter (α)
instance zariski_topology : topological_space (Spec α) :=
topological_space.of_closed (Spec.closeds α) Spec.H1 Spec.H2 Spec.H3
-- Useful.
lemma V_fs_closed : ∀ (f : α), is_closed (Spec.V' f) := λ f,
⟨{f}, by simp [Spec.D', Spec.V', Spec.V]⟩
def Spec.VC : α → closeds (Spec α) := λ x, ⟨Spec.V' x, V_fs_closed x⟩
lemma D_fs_open : ∀ (f : α), is_open (Spec.D' f) := λ f,
⟨{f}, by simp [Spec.D', Spec.V', Spec.V]⟩
def Spec.DO : α → opens (Spec α) := λ x, ⟨Spec.D' x, D_fs_open x⟩
def Spec.closed.univ : closeds (Spec α) := ⟨Spec.univ α, is_closed_univ⟩
def Spec.closed.empty : closeds (Spec α) := ⟨Spec.empty α, is_closed_empty⟩
def Spec.open.univ : topological_space.opens (Spec α) := ⟨Spec.univ α, is_open_univ⟩
def Spec.open.empty : opens (Spec α) := ⟨Spec.empty α, is_open_empty⟩
-- T0 space.
section spec_t0_space
instance spec_t0 : t0_space (Spec α) :=
begin
constructor,
rintros ⟨I, PI⟩ ⟨J, PJ⟩ Hneq,
simp at Hneq,
have HIJneq : ¬ (I.carrier = J.carrier),
intros H,
have Hlift : ↑I = ↑J := H,
apply Hneq,
apply ideal.ext,
unfold has_mem.mem,
rw Hlift,
simp,
rw set.ext_iff at HIJneq,
rw not_forall at HIJneq,
rcases HIJneq with ⟨x, Hx⟩,
use [Spec.D {x}],
split,
{ use {x},
simp [Spec.D], },
{ rw iff_def at Hx,
rw not_and_distrib at Hx,
repeat { rw not_imp at Hx, },
cases Hx,
{ cases Hx with HxI HnxJ,
right,
split,
{ intros H,
apply HnxJ,
apply H,
exact set.mem_singleton x, },
{ intros H,
apply H,
intros z Hz,
rw set.mem_singleton_iff at Hz,
rw Hz,
exact HxI, } },
{ cases Hx with HxJ HnxI,
left,
split,
{ intros H,
apply HnxI,
apply H,
exact set.mem_singleton x, },
{ intros H,
apply H,
intros z Hz,
rw set.mem_singleton_iff at Hz,
rw Hz,
exact HxJ, } } }
end
end spec_t0_space
end spec_topological_space
|
e3c1b0aed926d39d3de474049004bd6b91b70554
|
31e8ce8c3f972f4102a087008fd33dee6f2d7dfe
|
/tauto.lean
|
179b8cdcffa6a5eead7306feb2427d8b4595c71e
|
[
"MIT"
] |
permissive
|
sakas--/lean-tauto
|
6ca908abde99b252c9a3fdce5c0e98871ebbcc26
|
3cc5c1349f9204f3b798ea8cdb6e4f36ed2d1b25
|
refs/heads/master
| 1,610,549,743,092
| 1,488,258,185,000
| 1,488,258,185,000
| 76,671,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 9,495
|
lean
|
/- TODO : Add comments and clean up the code.
-/
open expr tactic list nat
namespace tactic
/- transforms the current goal Γ ⊢ A to into two subgoals Γ ⊢ B → A and Γ ⊢ A. -/
-- I'm not sure if this is the right way to implement it
meta def cut (e: expr) : tactic unit :=
do {goal ← target,
f ← mk_fresh_name,
x ← mk_fresh_name,
h ← mk_fresh_name,
let fty := (pi h binder_info.default e goal) in
apply (lam f binder_info.default fty (lam x binder_info.default e (app (mk_var 1) (mk_var 0))))}
end tactic
open tactic
namespace tauto
/- utils -/
private meta def is_true (e : expr) : bool :=
is_constant_of e ``true
private meta def is_not' : expr → bool
| (app f a) := if is_constant_of f ``not then tt else ff
| (pi n bi a b) := if is_false b then tt else ff
| e := ff
private meta def is_genuine_not : expr → bool
| (app f a) := if is_constant_of f ``not then tt else ff
| e := ff
private meta def is_and (e : expr) : bool :=
is_app_of e ``and
private meta def is_or (e : expr) : bool :=
is_app_of e ``or
private meta def is_iff (e : expr) : bool :=
is_app_of e ``iff
private meta def option_arrow (e : expr) : option (expr × expr × expr) :=
match e with
| (pi n bi a b) := if bnot (has_var b) then some (e, a, b) else none
| e := none
end
private meta def option_arrow_arrow (e : expr) : option (expr × expr × expr × expr × expr) :=
do
(_, ab, c) ← option_arrow e,
(_, a, b) ← option_arrow ab,
return (e, ab, a, b, c)
private meta def option_app : expr → option (expr × expr)
| (app a b) := some (a, b)
| _ := none
private meta def option_binary_connective_of (n : name) (e : expr) : option (expr × expr) :=
do
(e₁, b) ← option_app e,
(e₂, a) ← option_app e₁,
match e₂ with
| (const n₁ _) := if n = n₁ then some (a, b) else none
| _ := none
end
private meta def option_and : expr → option (expr × expr) :=
option_binary_connective_of `and
private meta def option_or : expr → option (expr × expr) :=
option_binary_connective_of `or
private meta def option_iff : expr → option (expr × expr) :=
option_binary_connective_of `iff
/- utility tactics -/
/- uses LCF-style AND_THEN tactic during repeat -/
private meta def repeat_seq_at_most : nat → tactic unit → tactic unit
| 0 t := skip
| (succ n) t := (seq t (repeat_seq_at_most n t)) <|> skip
private meta def repeat_seq : tactic unit → tactic unit :=
repeat_seq_at_most 100000
/- converts f : A -> B , e : A, Γ |- P to f : A → B, f e : B, Γ |- P -/
private meta def apply_to (f : expr) (e : expr) : tactic unit :=
do
fty ← infer_type f,
ety ← infer_type e,
match option_arrow fty with
| none := failed
| (some (_, a , b)) :=
do
unify a ety,
fe ← mk_fresh_name,
assert fe b,
exact (app f e)
end
private meta def not_dep_intros_core : unit → tactic (list expr)
| u :=
do goal ← target,
if is_arrow goal
then do H ← intro1, Hs ← not_dep_intros_core u, return (H :: Hs)
else match is_not goal with
| none := return []
| some a := do H ← intro1, Hs ← not_dep_intros_core u, return (H :: Hs)
end
private meta def not_dep_intros : tactic (list expr) :=
not_dep_intros_core ()
private meta def not_dep_intros' : tactic unit :=
do
es ← not_dep_intros,
match es with
| [] := failed
| _ := skip
end
private meta def find_imp_imp : list expr → tactic (expr × expr × expr × expr × expr)
| [] := failed
| (e :: es) :=
do
ty ← infer_type e,
match option_arrow_arrow ty with
| none := find_imp_imp es
| (some (_, ab, a, b, c)) := return (e, ab, a, b, c)
end
namespace tauto
namespace tauto_rules
/- some logically equivalent rules that are used by the simplifier -/
variables {A B C : Prop}
protected lemma true_imp_iff : (true → A) ↔ A :=
begin
apply iff.intro,
{intro h, apply h, trivial},
{intros h₁ h₂, apply h₁}
end
protected lemma and_imp_iff : A ∧ B → C ↔ A → B → C :=
begin
apply iff.intro,
{intros h ha hb, apply h, split, exact ha, exact hb},
{intros h₁ h₂, cases h₂ with ha hb, exact (h₁ ha hb)}
end
protected lemma or_imp_iff : A ∨ B → C ↔ (A → C) ∧ (B → C) :=
begin
apply iff.intro,
{intro h, split,
{intro ha, apply h, left, exact ha},
{intro hb, apply h, right, exact hb}},
{intro h, cases h with hac hbc,
intro h₁, cases h₁ with ha hb,
{apply hac, exact ha},
{apply hbc, exact hb}}
end
protected lemma iff_imp_iff : (A ↔ B) → C ↔ (A → B) ∧ (B → A) → C :=
begin
apply iff.intro,
{intros h h₁, apply h, apply iff.intro, exact (and.left h₁), exact (and.right h₁)},
{intros h h₁, apply h, cases h₁ with h₂ h₃, exact (and.intro h₂ h₃)}
end
end tauto_rules
end tauto
/- simplifier -/
/- TODO: Currently type parameters are explicitly passed when to_expr is used.
This is to ensure that no metavariables are introduced.
I think there is a better work around (e.g. using the elaborator).
-/
/- converts A ∧ B → C, Γ ⊢ P to A → B → C, Γ ⊢ P -/
private meta def flatten_and_imp (e : expr) : tactic unit :=
do
ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, ab, c)) :=
match option_and ab with
| none := failed
| some (a, b) :=
do
f ← to_expr `(iff.mp (@tauto.tauto_rules.and_imp_iff %%a %%b %%c)),
apply_to f e,
clear e
end
end
/- converts A ∨ B → C, Γ ⊢ P to (A → C) ∧ (B → C), Γ ⊢ P -/
private meta def flatten_or_imp (e : expr) : tactic unit :=
do
ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, ab, c)) :=
match option_or ab with
| none := failed
| (some (a, b)) :=
do
f ← to_expr `(iff.mp (@tauto.tauto_rules.or_imp_iff %%a %%b %%c)),
apply_to f e,
clear e
end
end
private meta def flatten_not_imp (e : expr) : tactic unit :=
do
ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, a, _)) :=
if is_genuine_not a
then
dunfold_at [``not] e
else
failed
end
private meta def flatten_iff_imp (e : expr) : tactic unit :=
do
ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, ab, c)) :=
match option_iff ab with
| none := failed
| (some (a, b)) :=
do
f ← to_expr `(iff.mp (@tauto.tauto_rules.iff_imp_iff %%a %%b %%c)),
apply_to f e,
clear e
end
end
/- converts ¬ A to A → false -/
private meta def reduce_not (e : expr) : tactic unit :=
do
ty ← infer_type e,
if is_genuine_not ty
then
dunfold_at [``not] e
else
failed
/- converts A -> B , A, Γ |- P to B, Γ |- P -/
private meta def apply_at_hyp (e : expr) (l : list expr) : tactic unit :=
do ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, a, _)) :=
do
ea ← find_same_type a l,
apply_to e ea,
clear e
end
/- converts true → A, Γ ⊢ P to A, Γ ⊢ P -/
private meta def flatten_true_imp (e : expr) : tactic unit :=
do ty ← infer_type e,
match option_arrow ty with
| none := failed
| (some (_, a, b)) :=
if is_true a
then
do
f ← to_expr `(iff.mp (@tauto.tauto_rules.true_imp_iff %%b)),
apply_to f e,
clear e
else failed
end
private meta def simplify_hyp : list expr → tactic unit
| [] := failed
| (e::es) :=
do
cases e
<|> reduce_not e
<|> local_context >>= apply_at_hyp e
<|> flatten_true_imp e
<|> flatten_not_imp e
<|> flatten_and_imp e
<|> flatten_or_imp e
<|> flatten_iff_imp e
<|> simplify_hyp es
private meta def simplify_goal : tactic unit :=
split <|> interactive.apply `(iff.intro) <|> failed
meta def simplify : tactic unit :=
do
not_dep_intros >> skip,
repeat_seq(
(do
ctx ← local_context,
simplify_hyp ctx
<|> simplify_goal
<|> not_dep_intros'))
/- main procedures -/
private meta def apply_axiom : tactic unit :=
triv <|> contradiction <|> assumption
private meta def tauto_core : unit → tactic unit
| u :=
do
seq simplify
(apply_axiom
<|>
do
{ctx ← local_context,
(e, ab, a, b, c) ← find_imp_imp ctx,
cut c,
focus [
do {h ← intro1, clear e, tauto_core ()},
do {cut ab, exact e,
x ← mk_fresh_name, y ← mk_fresh_name, z ← mk_fresh_name,
h ← mk_fresh_name,
-- assert : b -> c
assert h (pi z binder_info.default b c),
-- exact (λ y : b, e (λ x : a, y)) : b -> c
exact (lam y binder_info.default b (app e (lam x binder_info.default a (mk_var 1)))),
h1 ← intro1, clear e, tauto_core ()}]}
<|>
do
{goal ← target,
if is_arrow goal
then
do h ← intro1, tauto_core ()
else if is_or goal then
(left >> tauto_core ()) <|> (right >> tauto_core ())
else fail "tauto failed"})
meta def tauto : tactic unit :=
tauto_core ()
end tauto
|
560913124c9b007e99a6853c66de1ad24d30a342
|
c8af905dcd8475f414868d303b2eb0e9d3eb32f9
|
/src/data/cpi/transition/enumerate.lean
|
5eadbf61f4096c66c721fe281a481e0957e24715
|
[
"BSD-3-Clause"
] |
permissive
|
continuouspi/lean-cpi
|
81480a13842d67ff5f3698643210d8ed5dd08de4
|
443bf2cb236feadc45a01387099c236ab2b78237
|
refs/heads/master
| 1,650,307,316,582
| 1,587,033,364,000
| 1,587,033,364,000
| 207,499,661
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 40,670
|
lean
|
import data.cpi.transition.basic data.multiset2 data.option2
-- TODO: Move this somewhere sensible
instance quot.lift.decidable_pred {α : Type*} [setoid α] (p : α → Prop) [dec : decidable_pred p] (h : ∀ a b, a ≈ b → p a = p b)
: decidable_pred (quot.lift p h)
| x := quot.hrec_on x dec (λ x y r, subsingleton.helim (congr_arg decidable (h x y r)) (dec x) (dec y))
namespace cpi
namespace transition
variables {ℍ : Type} {ω : context}
/-- The set of all transitions from a nil species. -/
def enumerate_nil {Γ} {ℓ : lookup ℍ ω Γ}
: fintype (transition.transition_from ℓ species.nil)
:= ⟨ finset.empty, (λ ⟨ k, α, E, t ⟩, by cases t) ⟩
/-- ξ_choice acts as an embedding. This is effectively ξ_choice.inj but lifted
to transitions. -/
def ξ_choice.embed
{Γ f} (ℓ : lookup ℍ ω Γ)
(π : prefix_expr ℍ Γ f) (A : species ℍ ω (f.apply Γ)) (As : species.choices ℍ ω Γ)
: transition_from ℓ (Σ# As) ↪ transition_from ℓ (Σ# (whole.cons π A As))
:= { to_fun := λ t, transition_from.mk (ξ_choice t.2.2.2),
inj := λ ⟨ k, α, E, t ⟩ ⟨ k', α', E', t' ⟩ eq, by { cases eq, from rfl} }
private def enumerate_choice_communicate_ts {Γ} (ℓ : lookup ℍ ω Γ) :
∀ (a : name Γ) (b : list (name Γ)) (y : ℕ)
(A : species ℍ ω (context.extend y Γ))
{As : species.choices ℍ ω Γ}
, fintype (transition_from ℓ (Σ# As))
→ finset (transition_from ℓ (Σ# (whole.cons (a#(b;y)) A As)))
| a b y A As child :=
finset.insert_nmem'
(transition_from.mk (choice₁ a b rfl y A As))
(finset.map (ξ_choice.embed ℓ _ A As) child.elems)
(λ mem, begin
rcases finset.mem_map.mp mem with ⟨ ⟨ k, α, E, t ⟩, mem, eql ⟩,
unfold_coes at eql,
simp only [ξ_choice.embed, transition_from.mk] at eql,
unfold_projs at eql,
cases eql,
end)
private def enumerate_choice_communicate {Γ} (ℓ : lookup ℍ ω Γ) :
∀ (a : name Γ) (b : list (name Γ)) (y : ℕ)
(A : species ℍ ω (context.extend y Γ))
{As : species.choices ℍ ω Γ}
, fintype (transition_from ℓ (Σ# As))
→ fintype (transition_from ℓ (Σ# (whole.cons (a#(b;y)) A As)))
| a b y A As child :=
{ elems := enumerate_choice_communicate_ts ℓ a b y A child,
complete := λ x, begin
rcases x with ⟨ k, α, E, t ⟩,
cases t,
case ξ_choice {
have : transition_from.mk t_a ∈ child.elems := @fintype.complete _ child _,
have this := finset.mem_map_of_mem (ξ_choice.embed ℓ (a#(b;y)) A As) this,
from finset.mem_insert_nmem_of_mem this _,
},
case choice₁ {
subst t_b_len,
from finset.mem_insert_nmem_self _
},
end }
private def enumerate_choice_spontaneous_ts {Γ} (ℓ : lookup ℍ ω Γ) :
∀ (k : ℍ) (A : species ℍ ω Γ) {As : species.choices ℍ ω Γ}
, fintype (transition_from ℓ (Σ# As))
→ finset (transition_from ℓ (Σ# (whole.cons (τ@k) A As)))
| k A As child :=
finset.insert_nmem'
(transition_from.mk (choice₂ k A As))
(finset.map (ξ_choice.embed ℓ _ A As) child.elems)
(λ mem, begin
rcases finset.mem_map.mp mem with ⟨ ⟨ k, α, E, t ⟩, mem, eql ⟩,
unfold_coes at eql,
simp only [ξ_choice.embed, transition_from.mk] at eql,
unfold_projs at eql,
cases eql,
end)
private def enumerate_choice_spontaneous {Γ} (ℓ : lookup ℍ ω Γ) :
∀ (k : ℍ) (A : species ℍ ω Γ) {As : species.choices ℍ ω Γ}
, fintype (transition_from ℓ (Σ# As))
→ fintype (transition_from ℓ (Σ# (whole.cons (τ@k) A As)))
| k A As child :=
{ elems := enumerate_choice_spontaneous_ts ℓ k A child,
complete := λ x, begin
rcases x with ⟨ k', α, E, t ⟩,
cases t,
case ξ_choice {
have : transition_from.mk t_a ∈ child.elems := @fintype.complete _ child _,
have this := finset.mem_map_of_mem (ξ_choice.embed ℓ (τ@k) A As) this,
from finset.mem_insert_nmem_of_mem this _,
},
case choice₂ { from finset.mem_insert_nmem_self _ },
end }
/-- The set of all transitions from a guarded choice species. -/
def enumerate_choices {Γ} (ℓ : lookup ℍ ω Γ) :
∀ (As : species.choices ℍ ω Γ), fintype (transition_from ℓ (Σ# As))
| species.whole.empty :=
{ elems := finset.empty,
complete := λ ⟨ k, α, E, t ⟩, by cases t }
| (species.whole.cons (a#(b; y)) A As) := enumerate_choice_communicate ℓ a b y A (enumerate_choices As)
| (species.whole.cons (τ@k) A As) := enumerate_choice_spontaneous ℓ k A (enumerate_choices As)
private def defn.from {Γ n} (ℓ : lookup ℍ ω Γ) (D : reference n ω) (as : vector (name Γ) n)
: transition_from ℓ (Σ# (species.rename (name.mk_apply as) (ℓ _ D)))
→ transition_from ℓ (apply D as)
| ⟨ k, α, E, t ⟩ := ⟨ k, α, E, defn _ _ _ _ rfl t ⟩
private def defn.embed {Γ n} (ℓ : lookup ℍ ω Γ) (D : reference n ω) (as : vector (name Γ) n)
: transition_from ℓ (Σ# (species.rename (name.mk_apply as) (ℓ _ D)))
↪ transition_from ℓ (apply D as)
:= ⟨ defn.from ℓ D as, λ t t' eql, begin
rcases t with ⟨ k, α, E, t ⟩, rcases t' with ⟨ k', α', E', t' ⟩,
simp only [defn.from] at eql,
rcases sigma.mk.inj eql with ⟨ ⟨ _ ⟩, eql₁ ⟩, clear eql,
rcases sigma.mk.inj (eq_of_heq eql₁) with ⟨ ⟨ _ ⟩, eql₂ ⟩, clear eql₁,
rcases sigma.mk.inj (eq_of_heq eql₂) with ⟨ ⟨ _ ⟩, eql₃ ⟩, clear eql₂,
have eql := eq_of_heq (defn.inj (eq_of_heq eql₃)).2, clear eql₃, subst eql,
end ⟩
private def enumerate_apply_ts {Γ n} (ℓ : lookup ℍ ω Γ) (D : reference n ω) (as : vector (name Γ) n)
: finset (transition_from ℓ (apply D as))
:= finset.map (defn.embed ℓ D as)
(enumerate_choices ℓ (species.rename (name.mk_apply as) (ℓ _ D))).elems
private lemma enumerate_apply_complete {Γ n} (ℓ : lookup ℍ ω Γ) (D : reference n ω) (as : vector (name Γ) n)
: ∀ x, x ∈ enumerate_apply_ts ℓ D as
| ⟨ k, α, E, defn ℓ' D as B eql t ⟩ := begin
subst eql,
have h :=
@fintype.complete _
(enumerate_choices ℓ (species.rename (name.mk_apply as) (ℓ _ D)))
(transition_from.mk t),
from finset.mem_map_of_mem (defn.embed ℓ D as) h,
end
/-- The set of all transitions from a species invocation. -/
def enumerate_apply {Γ n} (ℓ : lookup ℍ ω Γ) (D : reference n ω) (as : vector (name Γ) n)
: fintype (transition_from ℓ (apply D as)) :=
⟨ enumerate_apply_ts ℓ D as, enumerate_apply_complete ℓ D as ⟩
/-- Wrap a transition in parL. -/
def parL {Γ : context} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: transition.transition_from ℓ A → transition.transition_from ℓ (A |ₛ B)
| ⟨ _, α, production.species E, t ⟩ := ⟨ _, α, _, transition.parL_species B t ⟩
| ⟨ _, α, production.concretion E, t ⟩ := ⟨ _, α, _, transition.parL_concretion B t ⟩
/-- Wrap a transition in parL, as a function embedding. -/
def parL.embed {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: transition.transition_from ℓ A ↪ transition.transition_from ℓ (A |ₛ B)
:= ⟨ parL A B, λ t t' eq, begin
rcases t with ⟨ k, α, E, t ⟩, rcases t' with ⟨ k', α', E', t' ⟩,
cases E; cases E'; cases eq; from rfl,
end ⟩
/-- Wrap a transition in parR. -/
def parR {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: transition.transition_from ℓ B → transition.transition_from ℓ (A |ₛ B)
| ⟨ _, α, production.species E, t ⟩ := ⟨ _, α, _, transition.parR_species A t ⟩
| ⟨ _, α, production.concretion E, t ⟩ := ⟨ _, α, _, transition.parR_concretion A t ⟩
/-- Wrap a transition in parL, as a function embedding. -/
def parR.embed {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: transition.transition_from ℓ B ↪ transition.transition_from ℓ (A |ₛ B)
:= ⟨ parR A B, λ t t' eq, begin
rcases t with ⟨ k, α, E, t ⟩, rcases t' with ⟨ k', α', E', t' ⟩,
cases E; cases E'; cases eq; from rfl,
end ⟩
/-- Determine if two transitions result have products with compatible
concretions. -/
private def com₁.is_compatible {Γ} (ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ)
: transition.transition_from ℓ A × transition.transition_from ℓ B
→ Prop
| ⟨ ⟨ _, l, @production.concretion _ _ _ a x F, t ⟩,
⟨ _, l', @production.concretion _ _ _ b y G, t' ⟩ ⟩ := a = y ∧ b = x
| ⟨ ⟨ _, l, production.concretion F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩ := false
| ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.concretion G, t' ⟩ ⟩ := false
| ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩ := false
instance com₁.is_compatible.decide {Γ} (ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ)
: decidable_pred (com₁.is_compatible ℓ A B)
| ⟨ ⟨ _, l, @production.concretion _ _ _ a x F, t ⟩,
⟨ _, l', @production.concretion _ _ _ b y G, t' ⟩ ⟩
:= by { unfold com₁.is_compatible, by apply_instance }
| ⟨ ⟨ _, l, production.concretion F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩ := decidable.false
| ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.concretion G, t' ⟩ ⟩ := decidable.false
| ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩ := decidable.false
/-- The subtype of all transition pairs which are compatible. -/
@[nolint has_inhabited_instance]
def com₁.compatible {Γ} (ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ) : Type
:= { p : transition.transition_from ℓ A × transition.transition_from ℓ B
// com₁.is_compatible ℓ A B p }
/-- Convert a compatible pair of transitions to a com₁ transition. -/
def com₁.of_compatible {Γ} (ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ)
: com₁.compatible ℓ A B → transition.transition_from ℓ (A |ₛ B)
| ⟨ ⟨ ⟨ _, l, @production.concretion _ _ _ a x F, t ⟩,
⟨ _, l', @production.concretion _ _ _ b y G, t' ⟩ ⟩, p ⟩ := begin
cases l with _ a, cases l' with _ b, rcases p with ⟨ ⟨ _ ⟩, ⟨ _ ⟩ ⟩,
refine ⟨ _, _, _, com₁ rfl rfl t t' ⟩,
end
| ⟨ ⟨ ⟨ _, l, production.concretion F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩, p ⟩ := false.elim p
| ⟨ ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.concretion G, t' ⟩ ⟩, p ⟩ := false.elim p
| ⟨ ⟨ ⟨ _, l, production.species F, t ⟩, ⟨ _, l', production.species G, t' ⟩ ⟩, p ⟩ := false.elim p
/-- We need a separate lemma to show com₁.of_compatible - we need to
generalise several variables, otherwise the case splits do not go through.
-/
private lemma com₁.of_compatible.inj_help {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ) :
∀ {a x} {a₁ b₁} {F₁ : concretion ℍ ω Γ a x} {G₁ : concretion ℍ ω Γ x a}
{b y} {a₂ b₂} {F₂ : concretion ℍ ω Γ b y} {G₂ : concretion ℍ ω Γ y b}
{FG₁ FG₂ : species ℍ ω Γ} {α₁ α₂ : label ℍ Γ kind.species}
(eFG₁ : FG₁ = concretion.pseudo_apply F₁ G₁)
(eFG₂ : FG₂ = concretion.pseudo_apply F₂ G₂)
(eα₁ : α₁ = τ⟨ a₁, b₁ ⟩) (eα₂ : α₂ = τ⟨ a₂, b₂ ⟩)
(tf₁ : A [ℓ, #a₁]⟶ (production.concretion F₁)) (tg₁ : B [ℓ, #b₁]⟶ (production.concretion G₁))
(tf₂ : A [ℓ, #a₂]⟶ (production.concretion F₂)) (tg₂ : B [ℓ, #b₂]⟶ (production.concretion G₂))
, (τ⟨ a₁, b₁ ⟩ = (τ⟨ a₂, b₂ ⟩ : label ℍ Γ _))
→ sigma.mk (production.species FG₁) (com₁ eFG₁ eα₁ tf₁ tg₁)
== sigma.mk (production.species FG₂) (com₁ eFG₂ eα₂ tf₂ tg₂)
→ a = b ∧ x = y ∧ a₁ = a₂ ∧ b₁ = b₂ ∧ F₁ == F₂ ∧ G₁ == G₂ ∧ tf₁ == tf₂ ∧ tg₁ == tg₂
| a x a₁ b₁ F₁ G₁ b y a₂ b₂ F₂ G₂ FG₁ FG₂ α₁ α₂ eFG₁ eFG₂ eα₁ eα₂
tf₁ tg₁ tf₂ tg₂ eqα eqT := begin
have : α₁ = α₂ := trans eα₁ (trans eqα eα₂.symm), subst this,
rcases sigma.mk.inj (eq_of_heq eqT) with ⟨ eqFG, eqT ⟩,
have : FG₁ = FG₂ := production.species.inj eqFG, subst this,
rcases com₁.inj (eq_of_heq eqT) with ⟨ ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, F, G, tf, tg ⟩,
from ⟨ rfl, rfl, rfl, rfl, F, G, tf, tg ⟩,
end
private lemma com₁.of_compatible.inj {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: function.injective (com₁.of_compatible ℓ A B)
| ⟨ ⟨ ⟨ k₁, α₁, E₁, t₁ ⟩, ⟨ k₁', α₁', E₁', t₁' ⟩ ⟩, is₁ ⟩
⟨ ⟨ ⟨ k₂, α₂, E₂, t₂ ⟩, ⟨ k₂', α₂', E₂', t₂' ⟩ ⟩, is₂ ⟩ eql := begin
cases E₁; cases E₁'; try { unfold com₁.is_compatible at is₁, contradiction },
rcases is₁ with ⟨ l, r ⟩, subst l, subst r, cases α₁, cases α₁',
cases E₂; cases E₂'; try { unfold com₁.is_compatible at is₂, contradiction },
rcases is₂ with ⟨ l, r ⟩, subst l, subst r, cases α₂, cases α₂',
simp only [com₁.of_compatible] at eql,
rcases sigma.mk.inj (eq_of_heq (sigma.mk.inj eql).2) with ⟨ eqα, eqT ⟩,
rcases com₁.of_compatible.inj_help A B rfl rfl rfl rfl t₁ t₁' t₂ t₂' eqα eqT
with ⟨ ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩ ⟩,
from rfl,
end
/-- Convert a compatible pair of transitions to a com₁ transition. -/
def com₁.embed {Γ} (ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ)
: com₁.compatible ℓ A B ↪ transition.transition_from ℓ (A |ₛ B)
:= ⟨ com₁.of_compatible ℓ A B, com₁.of_compatible.inj A B ⟩
private lemma com₁.impossible_l {Γ}
(ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ) {b y}
{F : concretion ℍ ω Γ b y} {G : concretion ℍ ω Γ y b}
: ∀ {a₁ a₂} {C FG : species ℍ ω Γ}
(t : A [ℓ, τ⟨ a₁, a₂ ⟩]⟶ (production.species C))
(t₁ : A [ℓ, #a₁]⟶ (production.concretion F))
(t₂ : B [ℓ, #a₂]⟶ (production.concretion G))
(h : FG = concretion.pseudo_apply F G)
, (C |ₛ B) = FG
→ ¬ (parL_species B t == com₁ h rfl t₁ t₂)
| a₁ a₂ C FG t t₁ t₂ h cfg eql := by { subst cfg, cases (eq_of_heq eql) }
private lemma com₁.impossible_r {Γ}
(ℓ : lookup ℍ ω Γ) (A B : species ℍ ω Γ) {b y}
{F : concretion ℍ ω Γ b y} {G : concretion ℍ ω Γ y b}
: ∀ {a₁ a₂} {C FG : species ℍ ω Γ}
(t : B [ℓ, τ⟨ a₁, a₂ ⟩]⟶ (production.species C))
(t₁ : A [ℓ, #a₁]⟶ (production.concretion F))
(t₂ : B [ℓ, #a₂]⟶ (production.concretion G))
(h : FG = concretion.pseudo_apply F G)
, (A |ₛ C) = FG
→ ¬ (parR_species A t == com₁ h rfl t₁ t₂)
| a₁ a₂ C FG t t₁ t₂ h cfg eql := by { subst cfg, cases (eq_of_heq eql) }
private def enumerate_parallel_ts {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
: fintype (transition.transition_from ℓ A)
→ fintype (transition.transition_from ℓ B)
→ finset (transition.transition_from ℓ (A |ₛ B))
| As Bs :=
finset.union_disjoint
(finset.map
(com₁.embed ℓ A B)
((finset.product As.elems Bs.elems).subtype (com₁.is_compatible ℓ A B)))
(finset.union_disjoint
(As.elems.map (parL.embed A B))
(Bs.elems.map (parR.embed A B))
(λ x memL memR, begin
rcases finset.mem_map.mp memL with ⟨ ⟨ k, α, E, t ⟩, mem, eql ⟩, clear mem,
unfold_coes at eql, simp only [parL.embed] at eql, subst eql,
rcases finset.mem_map.mp memR with ⟨ ⟨ k', α', E', t' ⟩, mem, eql ⟩, clear mem,
unfold_coes at eql, simp only [parR.embed] at eql,
cases E; cases E'; simp only [parL, parR] at eql; cases eql,
end))
(λ x memL memR, begin
rcases finset.mem_map.mp memL with ⟨ ⟨ ⟨ ⟨ k₁, α₁, E₁, t₁ ⟩, ⟨ k₂, α₂, E₂, t₂ ⟩ ⟩, d ⟩, mem, eql ⟩, clear memL mem,
unfold_coes at eql, simp only [com₁.embed] at eql,
cases E₁; cases E₂; try { simpa only [com₁.is_compatible] using d },
rcases d with ⟨ ⟨ _ ⟩, ⟨ _ ⟩ ⟩, cases α₁, cases α₂,
simp only [com₁.of_compatible] at eql, subst eql,
cases finset.mem_union_disjoint.mp memR,
case or.inl {
rcases finset.mem_map.mp h with ⟨ ⟨ k, α, E, t ⟩, mem, eql ⟩, clear mem memR,
unfold_coes at eql, simp only [parL.embed] at eql,
cases E,
case production.species {
rcases sigma.mk.inj eql with ⟨ ⟨ _ ⟩, eql₁ ⟩, clear eql,
rcases sigma.mk.inj (eq_of_heq eql₁) with ⟨ ⟨ _ ⟩, eql₂ ⟩, clear eql₁,
rcases sigma.mk.inj (eq_of_heq eql₂) with ⟨ eqlE, eqlT ⟩,
from com₁.impossible_l ℓ A B t t₁ t₂ rfl (production.species.inj eqlE) eqlT,
},
case production.concretion { cases (sigma.mk.inj eql).1 },
},
case or.inr {
rcases finset.mem_map.mp h with ⟨ ⟨ k, α, E, t ⟩, mem, eql ⟩, clear mem memR,
unfold_coes at eql, simp only [parR.embed] at eql,
cases E,
case production.species {
rcases sigma.mk.inj eql with ⟨ ⟨ _ ⟩, eql₁ ⟩, clear eql,
rcases sigma.mk.inj (eq_of_heq eql₁) with ⟨ ⟨ _ ⟩, eql₂ ⟩, clear eql₁,
rcases sigma.mk.inj (eq_of_heq eql₂) with ⟨ eqlE, eqlT ⟩,
from com₁.impossible_r ℓ A B t t₁ t₂ rfl (production.species.inj eqlE) eqlT,
},
case production.concretion { cases (sigma.mk.inj eql).1 },
},
end)
private lemma enumate_parallel_compute_l_species
{Γ ℓ} {A B : species ℍ ω Γ} {l : label ℍ Γ kind.species} {E}
(As : fintype (transition_from ℓ A)) (Bs : fintype (transition_from ℓ B))
(t : transition A ℓ l (production.species E))
: transition_from.mk (parL_species B t) ∈ enumerate_parallel_ts A B As Bs :=
let h := @fintype.complete _ As (transition_from.mk t) in
let g := finset.mem_map_of_mem (parL.embed A B) h in
finset.mem_union_disjoint.mpr (or.inr (finset.mem_union_disjoint.mpr (or.inl g)))
private lemma enumate_parallel_compute_l_concretion
{Γ ℓ} {A B} {l : label ℍ Γ kind.concretion} {b y} {E : concretion ℍ ω Γ b y}
(As : fintype (transition_from ℓ A)) (Bs : fintype (transition_from ℓ B))
(t : transition A ℓ l (production.concretion E))
: transition_from.mk (parL_concretion B t) ∈ enumerate_parallel_ts A B As Bs :=
let h := @fintype.complete _ As (transition_from.mk t) in
let g := finset.mem_map_of_mem (parL.embed A B) h in
finset.mem_union_disjoint.mpr (or.inr (finset.mem_union_disjoint.mpr (or.inl g)))
private lemma enumate_parallel_compute_r_species
{Γ ℓ} {A B : species ℍ ω Γ} {l : label ℍ Γ kind.species} {E}
(As : fintype (transition_from ℓ A)) (Bs : fintype (transition_from ℓ B))
(t : transition B ℓ l (production.species E))
: transition_from.mk (parR_species A t) ∈ enumerate_parallel_ts A B As Bs :=
let h := @fintype.complete _ Bs (transition_from.mk t) in
let g := finset.mem_map_of_mem (parR.embed A B) h in
finset.mem_union_disjoint.mpr (or.inr (finset.mem_union_disjoint.mpr (or.inr g)))
private lemma enumerate_parallel_complete {Γ} {ℓ : lookup ℍ ω Γ} (A B : species ℍ ω Γ)
(As : fintype (transition.transition_from ℓ A)) (Bs : fintype (transition.transition_from ℓ B))
: ∀ x, x ∈ enumerate_parallel_ts A B As Bs
| ⟨ k, α, E, parL_species _ t ⟩ := enumate_parallel_compute_l_species As Bs t
| ⟨ k, α, E, parL_concretion _ t ⟩ := enumate_parallel_compute_l_concretion As Bs t
| ⟨ k, α, E, parR_species _ t ⟩ := enumate_parallel_compute_r_species As Bs t
| ⟨ k, α, E, parR_concretion _ t ⟩ :=
let h := @fintype.complete _ Bs (transition_from.mk t) in
let g := finset.mem_map_of_mem (parR.embed A B) h in
finset.mem_union_disjoint.mpr (or.inr (finset.mem_union_disjoint.mpr (or.inr g)))
| ⟨ k, α, E, com₁ eqFG eqα tf tg ⟩ := begin
subst eqFG, subst eqα,
let t : com₁.compatible ℓ A B
:= ⟨ ( transition_from.mk tf, transition_from.mk tg ), ⟨ rfl, rfl ⟩ ⟩,
have h
:= finset.mem_subtype.mpr
(finset.mem_product.mpr ⟨ @fintype.complete _ As t.val.1, @fintype.complete _ Bs t.val.2 ⟩),
from finset.mem_union_disjoint.mpr (or.inl (finset.mem_map_of_mem (com₁.embed ℓ A B) h)),
end
/-- The set of all transitions from a parallel composition of species -/
def enumerate_parallel {Γ} {ℓ : lookup ℍ ω Γ} {A B : species ℍ ω Γ}
: fintype (transition.transition_from ℓ A)
→ fintype (transition.transition_from ℓ B)
→ fintype (transition.transition_from ℓ (A |ₛ B))
| As Bs := ⟨ enumerate_parallel_ts A B As Bs, enumerate_parallel_complete A B As Bs ⟩
private def is_restriction_name {Γ} (M : affinity ℍ)
: name (context.extend M.arity Γ) → name (context.extend M.arity Γ)
→ Prop
| (name.zero a) (name.zero b) := option.is_some' (M.f a b)
| (name.extend _) (name.extend _) := true
| (name.extend _) (name.zero _) := false
| (name.zero _) (name.extend _) := false
private def is_restriction_name.comm {Γ} (M : affinity ℍ)
: ∀ (a b : name (context.extend M.arity Γ))
, is_restriction_name M a b = is_restriction_name M b a
| a b := by { cases a; cases b; from rfl <|> simp only [is_restriction_name, M.symm] }
private def is_restriction_like {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: transition.transition_from (lookup.rename name.extend ℓ) A
→ Prop
| ⟨ _, τ⟨ p ⟩, E, t ⟩ := upair.lift_on p (is_restriction_name M) (is_restriction_name.comm M)
| ⟨ _, τ@' k, E, t ⟩ := true
| ⟨ _, # (name.zero n), E, t ⟩ := false
| ⟨ _, # (name.extend n), E, t ⟩ := true
private def is_restriction {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
:= { t : transition_from (lookup.rename name.extend ℓ) A // is_restriction_like ℓ M A t }
private def is_restriction.name_lift {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ)
(A : species ℍ ω (context.extend M.arity Γ))
(B : species ℍ ω (context.extend M.arity Γ)) :
∀ (a b : name (context.extend M.arity Γ))
, is_restriction_name M a b
→ A [lookup.rename name.extend ℓ, τ⟨ a, b ⟩]⟶ (production.species B)
→ transition.transition_from ℓ (ν(M) A)
| (name.zero a) (name.zero b) is_some t :=
let this : M.get (upair.mk a b) = some (option.get' is_some) := option.eq_some_of_is_some' is_some in
⟨ _, _, _, com₂ M (option.get' is_some) this rfl t ⟩
| (name.extend a) (name.extend b) fls t := begin
have : τ⟨ name.extend a, name.extend b ⟩ = label.rename (@name.extend Γ M.arity) (τ⟨ a, b ⟩),
{ simpa only [label.rename] },
from ⟨ _, _, _, ν₁_species M this t ⟩,
end
| (name.extend _) (name.zero _) fls t := false.elim fls
| (name.zero a) (name.extend b) fls t := false.elim fls
private lemma is_restriction_name_lift.comm_zero {Γ} {ℓ : lookup ℍ ω Γ} (M : affinity ℍ) (A B : species ℍ ω (context.extend M.arity Γ))
{x y : fin M.arity}
{tL : A [lookup.rename name.extend ℓ, τ⟨ (upair.mk (name.zero x) (name.zero y)) ⟩]⟶ (production.species B)}
{tR : A [lookup.rename name.extend ℓ, τ⟨ (upair.mk (name.zero y) (name.zero x)) ⟩]⟶ (production.species B)}
(eT : tL == tR)
(k₁ k₂ : ℍ)
(e₁ : M.get (upair.mk x y) = some k₁)
(e₂ : M.get (upair.mk y x) = some k₂)
: transition.transition_from.mk (com₂ M k₁ e₁ rfl tL) = transition.transition_from.mk (com₂ M k₂ e₂ rfl tR) := begin
have : k₁ = k₂,
{
rw upair.mk.comm at e₁,
from option.some.inj (trans e₁.symm e₂),
}, subst this,
refine sigma.mk.inj_iff.mpr ⟨ rfl, heq_of_eq _ ⟩, simp only [heq_iff_eq],
from ⟨ rfl, ⟨ rfl, rfl ⟩, quot.sound (or.inr ⟨ rfl, rfl ⟩), quot.sound (or.inr ⟨ rfl, rfl ⟩), eT ⟩,
end
private lemma is_restriction_name_lift.comm_extend {Γ} {ℓ : lookup ℍ ω Γ} (M : affinity ℍ) (A B : species ℍ ω (context.extend M.arity Γ))
{x y : name Γ}
{tL : A [lookup.rename name.extend ℓ, τ⟨ (upair.mk (name.extend x) (name.extend y)) ⟩]⟶ (production.species B)}
{tR : A [lookup.rename name.extend ℓ, τ⟨ (upair.mk (name.extend y) (name.extend x)) ⟩]⟶ (production.species B)}
(eT : tL == tR)
(l₁ l₂ : label ℍ Γ kind.species)
(e₁ : τ⟨ (upair.mk (name.extend x) (name.extend y)) ⟩ = label.rename name.extend l₁)
(e₂ : τ⟨ (upair.mk (name.extend y) (name.extend x)) ⟩ = label.rename name.extend l₂)
: transition.transition_from.mk (ν₁_species M e₁ tL) = transition.transition_from.mk (ν₁_species M e₂ tR) := begin
have : l₁ = l₂,
{
rw [upair.mk.comm] at e₁,
from label.rename.inj (@name.extend.inj _ _) (trans e₁.symm e₂),
}, subst this,
refine sigma.mk.inj_iff.mpr ⟨ rfl, heq_of_eq _ ⟩, simp only [heq_iff_eq],
from ⟨ rfl, ⟨ rfl, rfl ⟩, quot.sound (or.inr ⟨ rfl, rfl ⟩), eT ⟩,
end
private lemma is_restriction.name_lift.comm {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A B : species ℍ ω (context.extend M.arity Γ)) :
∀ (a b : name (context.extend M.arity Γ))
, is_restriction.name_lift ℓ M A B a b
== is_restriction.name_lift ℓ M A B b a
| (name.zero x) (name.zero y) := function.hfunext
(by { unfold is_restriction_name, rw M.symm })
(λ irlL irlR eIRL, function.hfunext
(by rw upair.mk.comm)
(λ tL tR eT, heq_of_eq (is_restriction_name_lift.comm_zero M A B eT _ _ _ _)))
| (name.extend x) (name.extend y) := function.hfunext rfl
(λ irlL irlR eIRL, function.hfunext
(by rw upair.mk.comm)
(λ tL tR eT, heq_of_eq (is_restriction_name_lift.comm_extend M A B eT _ _ _ _)))
| (name.zero x) (name.extend y) := function.hfunext rfl
(λ irlL irlR eIRL, function.hfunext (by rw upair.mk.comm) (λ x b z, heq.rfl))
| (name.extend x) (name.zero y) := function.hfunext rfl
(λ irlL irlR eIRL, function.hfunext (by rw upair.mk.comm) (λ x b z, heq.rfl))
lemma ν₁_species.inj' {Γ} {ℓ : lookup ℍ ω Γ} (M : affinity ℍ) {A} :
∀ {E₁ E₂}
{l₁ : label ℍ Γ kind.species} {l'₁ : label ℍ (context.extend M.arity Γ) kind.species}
{l₂ : label ℍ Γ kind.species} {l'₂ : label ℍ (context.extend M.arity Γ) kind.species}
{e₁ : l'₁ = label.rename name.extend l₁}
{e₂ : l'₂ = label.rename name.extend l₂}
{t₁ : A [lookup.rename name.extend ℓ, l'₁]⟶ (production.species E₁)}
{t₂ : A [lookup.rename name.extend ℓ, l'₂]⟶ (production.species E₂)}
, transition_from.mk (ν₁_species M e₁ t₁) = transition_from.mk (ν₁_species M e₂ t₂)
→ transition_from.mk t₁ = transition_from.mk t₂
| E₁ E₂ l₁ l'₁ l₂ l'₂ e₁ e₂ t₁ t₂ eql := begin
rcases sigma.mk.inj (eq_of_heq (sigma.mk.inj eql).2) with ⟨ this, eqR ⟩, subst ‹l₁ = l₂›,
have : l'₁ = l'₂ := trans e₁ e₂.symm, subst ‹l'₁ = l'₂›,
rcases sigma.mk.inj (eq_of_heq eqR) with ⟨ ⟨ _ ⟩, eqT ⟩,
rcases ν₁_species.inj (eq_of_heq eqT) with ⟨ _, ⟨ _ ⟩ ⟩,
from rfl,
end
lemma ν₁_concretion.inj' {Γ} {ℓ : lookup ℍ ω Γ} (M : affinity ℍ) {A} :
∀ {a x b y} {E₁ : concretion ℍ ω _ a x} {E₂ : concretion ℍ ω _ b y}
{l₁ : label ℍ Γ kind.concretion} {l'₁ : label ℍ (context.extend M.arity Γ) kind.concretion}
{l₂ : label ℍ Γ kind.concretion} {l'₂ : label ℍ (context.extend M.arity Γ) kind.concretion}
{e₁ : l'₁ = label.rename name.extend l₁}
{e₂ : l'₂ = label.rename name.extend l₂}
{t₁ : A [lookup.rename name.extend ℓ, l'₁]⟶ (production.concretion E₁)}
{t₂ : A [lookup.rename name.extend ℓ, l'₂]⟶ (production.concretion E₂)}
, transition_from.mk (ν₁_concretion M e₁ t₁) = transition_from.mk (ν₁_concretion M e₂ t₂)
→ transition_from.mk t₁ = transition_from.mk t₂
| a x b y E₁ E₂ l₁ l'₁ l₂ l'₂ e₁ e₂ t₁ t₂ eql := begin
rcases sigma.mk.inj (eq_of_heq (sigma.mk.inj eql).2) with ⟨ this, eqR ⟩, subst ‹l₁ = l₂›,
have : l'₁ = l'₂ := trans e₁ e₂.symm, subst ‹l'₁ = l'₂›,
rcases sigma.mk.inj (eq_of_heq eqR) with ⟨ ⟨ _ ⟩, eqT ⟩,
rcases ν₁_concretion.inj (eq_of_heq eqT) with ⟨ _, ⟨ _ ⟩ ⟩,
from rfl,
end
private lemma is_restriction.name_lift.inj_zero {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ)
(A : species ℍ ω (context.extend M.arity Γ)) :
∀ {B₁ B₂ : species ℍ ω (context.extend M.arity Γ)}
{p q : upair (fin M.arity)}
{t₁ : A [lookup.rename name.extend ℓ, τ⟨ (upair.map name.zero p) ⟩]⟶ (production.species B₁)}
{t₂ : A [lookup.rename name.extend ℓ, τ⟨ (upair.map name.zero q) ⟩]⟶ (production.species B₂)}
{k₁ k₂ : ℍ} {e₁ : M.get p = some k₁} {e₂ : M.get q = some k₂}
, transition_from.mk (com₂ M k₁ e₁ rfl t₁) = transition_from.mk (com₂ M k₂ e₂ rfl t₂)
→ transition_from.mk t₁ = transition_from.mk t₂
| B₁ B₂ p q t₁ t₂ k₁ k₂ e₁ e₂ eql := begin
rcases sigma.mk.inj (eq_of_heq (sigma.mk.inj eql).2) with ⟨ ⟨ _ ⟩, eqR ⟩,
rcases sigma.mk.inj (eq_of_heq eqR) with ⟨ ⟨ _ ⟩, eqT ⟩,
rcases com₂.inj (eq_of_heq eqT) with ⟨ ⟨ _ ⟩, ⟨ _ ⟩, ⟨ _ ⟩ ⟩,
from rfl,
end
private lemma is_restriction.name_lift.inj {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ)
(A : species ℍ ω (context.extend M.arity Γ)) {B₁ B₂ : species ℍ ω (context.extend M.arity Γ)} :
∀ {a₁ b₁ a₂ b₂ : name (context.extend M.arity Γ)}
(t₁ : A [lookup.rename name.extend ℓ, τ⟨ a₁, b₁ ⟩]⟶ (production.species B₁))
(t₂ : A [lookup.rename name.extend ℓ, τ⟨ a₂, b₂ ⟩]⟶ (production.species B₂))
(irl₁ : is_restriction_name M a₁ b₁)
(irl₂ : is_restriction_name M a₂ b₂)
, is_restriction.name_lift ℓ M A B₁ a₁ b₁ irl₁ t₁ = is_restriction.name_lift ℓ M A B₂ a₂ b₂ irl₂ t₂
→ transition_from.mk t₁ = transition_from.mk t₂
| a₁ b₁ a₂ b₂ t₁ t₂ irl₁ irl₂ eql := begin
cases a₁; cases b₁; try { from false.elim irl₁ };
cases a₂; cases b₂; try { from false.elim irl₂ <|> cases eql };
simp only [is_restriction.name_lift] at eql,
case name.extend { from ν₁_species.inj' M eql },
case name.zero { from is_restriction.name_lift.inj_zero ℓ M A eql },
end
private def is_restriction.lift {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: is_restriction ℓ M A
→ transition.transition_from ℓ (ν(M) A)
| ⟨ ⟨ _, τ@' k, production.species B, t ⟩, _ ⟩ := begin
have : τ@' k = label.rename (@name.extend Γ M.arity) (τ@' k), { simp only [label.rename] },
from ⟨ _, _, _, ν₁_species M this t ⟩,
end
| ⟨ ⟨ _, τ⟨ p ⟩, production.species B, t ⟩, irl ⟩ := begin
unfold is_restriction_like at irl,
from upair.rec_on p
(λ a b irl t, is_restriction.name_lift ℓ M A B a b irl t)
(is_restriction.name_lift.comm ℓ M A B)
irl t,
end
| ⟨ ⟨ _, # (name.zero n), E, t ⟩, irl ⟩ := false.elim irl
| ⟨ ⟨ _, # (name.extend n), production.concretion F, t ⟩, _ ⟩ := begin
have : # (name.extend n) = label.rename (@name.extend Γ M.arity) (# n), { simp only [label.rename] },
from ⟨ _, # n, production.concretion (ν'(M) F), ν₁_concretion M this t ⟩,
end
private def is_restriction.lift.inj_both {Γ : context} {ℓ : lookup ℍ ω Γ}
(M : affinity ℍ) (A B : species ℍ ω (context.extend M.arity Γ)) :
∀ {B B' : species ℍ ω (context.extend M.arity Γ)}
{p q : upair (name (context.extend M.arity Γ))}
(t : A [lookup.rename name.extend ℓ, τ⟨ p ⟩]⟶ (production.species B))
(irl : is_restriction_like ℓ M A ⟨kind.species, ⟨τ⟨ p ⟩, ⟨production.species B, t⟩⟩⟩)
(t' : A [lookup.rename name.extend ℓ, τ⟨ q ⟩]⟶ (production.species B'))
(irl' : is_restriction_like ℓ M A ⟨kind.species, ⟨τ⟨ q ⟩, ⟨production.species B', t'⟩⟩⟩)
, is_restriction.lift ℓ M A ⟨⟨kind.species, ⟨τ⟨ p ⟩, ⟨production.species B, t⟩⟩⟩, irl⟩
= is_restriction.lift ℓ M A ⟨⟨kind.species, ⟨τ⟨ q ⟩, ⟨production.species B', t'⟩⟩⟩, irl'⟩
→ (⟨⟨kind.species, ⟨τ⟨ p ⟩, ⟨production.species B, t⟩⟩⟩, irl⟩ : is_restriction ℓ M A)
= ⟨⟨kind.species, ⟨τ⟨ q ⟩, ⟨production.species B', t'⟩⟩⟩, irl'⟩
| B B' p q t irl t' irl' eql := begin
unfold is_restriction.lift at eql,
rcases upair.exists_rep p with ⟨ a, b, ⟨ _ ⟩ ⟩,
rcases upair.exists_rep q with ⟨ a', b', ⟨ _ ⟩ ⟩,
simp only [],
from is_restriction.name_lift.inj ℓ M A t t' irl irl' eql,
end
private def is_restriction.lift.inj {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: function.injective (is_restriction.lift ℓ M A)
| ⟨ ⟨ _, # (name.zero n), E, t ⟩, irl ⟩ _ eql := false.elim irl
| ⟨ ⟨ _, τ@' k, production.species B, t ⟩, _ ⟩ ⟨ ⟨ _, # (name.zero n), E, t' ⟩, irl ⟩ eql := false.elim irl
| ⟨ ⟨ _, τ@' k, production.species B, t ⟩, _ ⟩ ⟨ ⟨ _, τ@' k', production.species B', t' ⟩, _ ⟩ eql
:= by { cases ν₁_species.inj' M eql, from rfl }
| ⟨ ⟨ _, τ@' k, production.species B, t ⟩, _ ⟩ ⟨ ⟨ _, τ⟨ p ⟩, production.species B', t' ⟩, irl ⟩ eql := begin
rcases upair.exists_rep p with ⟨ a, b, ⟨ _ ⟩ ⟩,
simp only [is_restriction.lift, upair.rec_on_beta] at eql,
cases a; cases b; cases eql <|> from false.elim irl,
end
| ⟨ ⟨ _, τ@' k, production.species B, t ⟩, _ ⟩ ⟨ ⟨ _, # (name.extend n), production.concretion F, t' ⟩, _ ⟩ eql := by cases eql
| ⟨ ⟨ _, τ⟨ p ⟩, production.species B, t ⟩, _ ⟩ ⟨ ⟨ _, # (name.zero n'), E, t' ⟩, irl ⟩ eql := false.elim irl
| ⟨ ⟨ _, τ⟨ p ⟩, production.species B, t ⟩, irl ⟩ ⟨ ⟨ _, # (name.extend n'), production.concretion F', t' ⟩, _ ⟩ eql := begin
rcases upair.exists_rep p with ⟨ a, b, ⟨ _ ⟩ ⟩,
simp only [is_restriction.lift, upair.rec_on_beta] at eql,
cases a; cases b; cases eql <|> from false.elim irl,
end
| ⟨ ⟨ _, τ⟨ p ⟩, production.species B, t ⟩, irl ⟩ ⟨ ⟨ _, τ@' k', production.species B', t' ⟩, _ ⟩ eql := begin
rcases upair.exists_rep p with ⟨ a, b, ⟨ _ ⟩ ⟩,
simp only [is_restriction.lift, upair.rec_on_beta] at eql,
cases a; cases b; cases eql <|> from false.elim irl,
end
| ⟨ ⟨ _, τ⟨ p ⟩, production.species B, t ⟩, irl ⟩ ⟨ ⟨ _, τ⟨ q ⟩, production.species B', t' ⟩, irl' ⟩ eql
:= is_restriction.lift.inj_both M A B t irl t' irl' eql
| ⟨ ⟨ _, # (name.extend n), production.concretion F, t ⟩, _ ⟩ ⟨ ⟨ _, # (name.extend n'), production.concretion F', t' ⟩, _ ⟩ eql
:= by { cases ν₁_concretion.inj' M eql, from rfl }
| ⟨ ⟨ _, # (name.extend n), production.concretion F, t ⟩, _ ⟩ ⟨ ⟨ _, # (name.zero n'), E, t' ⟩, irl ⟩ eql := false.elim irl
| ⟨ ⟨ _, # (name.extend n), production.concretion F, t ⟩, _ ⟩ ⟨ ⟨ _, τ@' k', production.species B', t' ⟩, _ ⟩ eql := by cases eql
| ⟨ ⟨ _, # (name.extend n), production.concretion F, t ⟩, _ ⟩ ⟨ ⟨ _, τ⟨ p ⟩, production.species B', t' ⟩, irl ⟩ eql := begin
rcases upair.exists_rep p with ⟨ a, b, ⟨ _ ⟩ ⟩,
simp only [is_restriction.lift, upair.rec_on_beta] at eql,
cases a; cases b; cases eql <|> from false.elim irl,
end
private def is_restriction.embed {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: is_restriction ℓ M A
↪ transition.transition_from ℓ (ν(M) A)
:= ⟨ is_restriction.lift ℓ M A, is_restriction.lift.inj ℓ M A ⟩
instance is_restriction_name.decide {Γ} (M : affinity ℍ)
: decidable_pred (λ (p : upair.pair (name (context.extend M.arity Γ))), is_restriction_name M p.fst p.snd)
| ⟨ name.zero a, name.zero b ⟩ := by { unfold is_restriction_name, apply_instance }
| ⟨ name.extend _, name.extend _ ⟩ := decidable.true
| ⟨ name.extend _, name.zero _ ⟩ := decidable.false
| ⟨ name.zero _, name.extend _ ⟩ := decidable.false
instance is_restriction_like.decide {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: decidable_pred (is_restriction_like ℓ M A)
| ⟨ _, τ⟨ p ⟩, E, t ⟩ := quot.lift.decidable_pred _ _ _
| ⟨ _, τ@' k, E, t ⟩ := decidable.true
| ⟨ _, # (name.zero n), E, t ⟩ := decidable.false
| ⟨ _, # (name.extend n), E, t ⟩ := decidable.true
private def enumerate_restriction {Γ} (ℓ : lookup ℍ ω Γ) (M : affinity ℍ) (A : species ℍ ω (context.extend M.arity Γ))
: fintype (transition.transition_from (lookup.rename name.extend ℓ) A)
→ fintype (transition.transition_from ℓ (ν(M) A))
| As :=
⟨ finset.map (is_restriction.embed ℓ M A)
(finset.subtype (is_restriction_like ℓ M A) As.elems)
, λ t, begin
rcases t with ⟨ k, α, E, t ⟩,
cases t,
case com₂ : p p' B k eql eqp t {
rcases upair.exists_rep p with ⟨ a, b, ep' ⟩, subst ep',
simp only [upair.map_beta] at eqp, subst eqp, cases eqp,
have : is_restriction_like ℓ M A (transition_from.mk t),
{
show option.is_some' (M.f a b),
have : M.f a b = some k := eql,
rw this, from true.intro,
},
have kdef : k = option.get' this := option.some.inj (trans eql.symm (option.eq_some_of_is_some' this)),
subst kdef,
let t' : is_restriction ℓ M A := ⟨ ⟨ _, _, _, t ⟩, this ⟩,
have this := finset.mem_map_of_mem (is_restriction.embed ℓ M A)
(finset.mem_subtype.mpr (@fintype.complete _ As t'.val)),
unfold_coes at this,
simp only [is_restriction.embed, is_restriction.lift, upair.rec_on_beta, is_restriction.name_lift] at this,
from this,
},
case ν₁_species : l l' B eql t {
have : is_restriction_like ℓ M A (transition_from.mk t),
{
cases l; simp only [label.rename] at eql; subst eql;
simp only [transition_from.mk, is_restriction_like],
rcases upair.exists_rep l_k with ⟨ a, b, ⟨ _ ⟩ ⟩,
from true.intro,
},
let t' : is_restriction ℓ M A := ⟨ ⟨ _, _, _, t ⟩, this ⟩,
have this := finset.mem_map_of_mem (is_restriction.embed ℓ M A)
(finset.mem_subtype.mpr (@fintype.complete _ As t'.val)),
unfold_coes at this,
cases l,
case label.spontaneous { simp only [label.rename] at eql, subst eql, from this },
have up : ∀ (a b : name Γ), (quot.mk setoid.r (upair.pair.mk a b)) = upair.mk a b := λ a b, rfl,
rcases quot.exists_rep l_k with ⟨ ⟨ a, b ⟩, h ⟩, rw up at h, subst h,
simp only [label.rename, upair.map_beta] at eql, subst eql,
simp only [is_restriction.embed, is_restriction.lift, upair.rec_on_beta, is_restriction.name_lift] at this,
from this,
},
case ν₁_concretion : l l' b y B eql t {
cases l, simp only [label.rename] at eql, subst eql,
let t' : is_restriction ℓ M A := ⟨ ⟨ _, _, _, t ⟩, true.intro ⟩,
from finset.mem_map_of_mem (is_restriction.embed ℓ M A)
(finset.mem_subtype.mpr (@fintype.complete _ As t'.val)),
}
end ⟩
/-- Show that the available transitions from a species is finite and thus
enumerable.-/
def enumerate :
∀ {Γ} (ℓ : lookup ℍ ω Γ) (A : species ℍ ω Γ)
, fintype (transition_from ℓ A)
| Γ ℓ nil := enumerate_nil
| Γ ℓ (apply D as) := enumerate_apply ℓ D as
| Γ ℓ (A |ₛ B) := enumerate_parallel (enumerate ℓ A) (enumerate ℓ B)
| Γ ℓ (Σ# As) := enumerate_choices ℓ As
| Γ ℓ (ν(M) A) := enumerate_restriction ℓ M A (enumerate (lookup.rename name.extend ℓ) A)
using_well_founded {
rel_tac := λ _ _,
`[exact ⟨_, measure_wf (λ x, whole.sizeof _ _ _ _ x.2.2 ) ⟩ ],
dec_tac := tactic.fst_dec_tac,
}
instance {Γ} (ℓ : lookup ℍ ω Γ) (A : species ℍ ω Γ)
: fintype (transition_from ℓ A) := enumerate ℓ A
end transition
end cpi
#lint-
|
7ba43540a3102e6af0060999ab65275f684b3286
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/data/qpf/univariate/basic.lean
|
94c474a2a20557892e429e4fe25fe31402119fba
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 21,828
|
lean
|
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad
-/
import data.pfunctor.univariate.M
/-!
# Quotients of Polynomial Functors
We assume the following:
`P` : a polynomial functor
`W` : its W-type
`M` : its M-type
`F` : a functor
We define:
`q` : `qpf` data, representing `F` as a quotient of `P`
The main goal is to construct:
`fix` : the initial algebra with structure map `F fix → fix`.
`cofix` : the final coalgebra with structure map `cofix → F cofix`
We also show that the composition of qpfs is a qpf, and that the quotient of a qpf
is a qpf.
The present theory focuses on the univariate case for qpfs
## References
* [Jeremy Avigad, Mario M. Carneiro and Simon Hudon, *Data Types as Quotients of Polynomial Functors*][avigad-carneiro-hudon2019]
-/
universe u
/--
Quotients of polynomial functors.
Roughly speaking, saying that `F` is a quotient of a polynomial functor means that for each `α`,
elements of `F α` are represented by pairs `⟨a, f⟩`, where `a` is the shape of the object and
`f` indexes the relevant elements of `α`, in a suitably natural manner.
-/
class qpf (F : Type u → Type u) [functor F] :=
(P : pfunctor.{u})
(abs : Π {α}, P.obj α → F α)
(repr : Π {α}, F α → P.obj α)
(abs_repr : ∀ {α} (x : F α), abs (repr x) = x)
(abs_map : ∀ {α β} (f : α → β) (p : P.obj α), abs (f <$> p) = f <$> abs p)
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr)
/-
Show that every qpf is a lawful functor.
Note: every functor has a field, `map_const`, and is_lawful_functor has the defining
characterization. We can only propagate the assumption.
-/
theorem id_map {α : Type*} (x : F α) : id <$> x = x :=
by { rw ←abs_repr x, cases repr x with a f, rw [←abs_map], reflexivity }
theorem comp_map {α β γ : Type*} (f : α → β) (g : β → γ) (x : F α) :
(g ∘ f) <$> x = g <$> f <$> x :=
by { rw ←abs_repr x, cases repr x with a f, rw [←abs_map, ←abs_map, ←abs_map], reflexivity }
theorem is_lawful_functor
(h : ∀ α β : Type u, @functor.map_const F _ α _ = functor.map ∘ function.const β) :
is_lawful_functor F :=
{ map_const_eq := h,
id_map := @id_map F _ _,
comp_map := @comp_map F _ _ }
/-
Lifting predicates and relations
-/
section
open functor
theorem liftp_iff {α : Type u} (p : α → Prop) (x : F α) :
liftp p x ↔ ∃ a f, x = abs ⟨a, f⟩ ∧ ∀ i, p (f i) :=
begin
split,
{ rintros ⟨y, hy⟩, cases h : repr y with a f,
use [a, λ i, (f i).val], split,
{ rw [←hy, ←abs_repr y, h, ←abs_map], reflexivity },
intro i, apply (f i).property },
rintros ⟨a, f, h₀, h₁⟩, dsimp at *,
use abs (⟨a, λ i, ⟨f i, h₁ i⟩⟩),
rw [←abs_map, h₀], reflexivity
end
theorem liftp_iff' {α : Type u} (p : α → Prop) (x : F α) :
liftp p x ↔ ∃ u : q.P.obj α, abs u = x ∧ ∀ i, p (u.snd i) :=
begin
split,
{ rintros ⟨y, hy⟩, cases h : repr y with a f,
use ⟨a, λ i, (f i).val⟩, dsimp, split,
{ rw [←hy, ←abs_repr y, h, ←abs_map], reflexivity },
intro i, apply (f i).property },
rintros ⟨⟨a, f⟩, h₀, h₁⟩, dsimp at *,
use abs (⟨a, λ i, ⟨f i, h₁ i⟩⟩),
rw [←abs_map, ←h₀], reflexivity
end
theorem liftr_iff {α : Type u} (r : α → α → Prop) (x y : F α) :
liftr r x y ↔ ∃ a f₀ f₁, x = abs ⟨a, f₀⟩ ∧ y = abs ⟨a, f₁⟩ ∧ ∀ i, r (f₀ i) (f₁ i) :=
begin
split,
{ rintros ⟨u, xeq, yeq⟩, cases h : repr u with a f,
use [a, λ i, (f i).val.fst, λ i, (f i).val.snd],
split, { rw [←xeq, ←abs_repr u, h, ←abs_map], refl },
split, { rw [←yeq, ←abs_repr u, h, ←abs_map], refl },
intro i, exact (f i).property },
rintros ⟨a, f₀, f₁, xeq, yeq, h⟩,
use abs ⟨a, λ i, ⟨(f₀ i, f₁ i), h i⟩⟩,
dsimp, split,
{ rw [xeq, ←abs_map], refl },
rw [yeq, ←abs_map], refl
end
end
/-
Think of trees in the `W` type corresponding to `P` as representatives of elements of the
least fixed point of `F`, and assign a canonical representative to each equivalence class
of trees.
-/
/-- does recursion on `q.P.W` using `g : F α → α` rather than `g : P α → α` -/
def recF {α : Type*} (g : F α → α) : q.P.W → α
| ⟨a, f⟩ := g (abs ⟨a, λ x, recF (f x)⟩)
theorem recF_eq {α : Type*} (g : F α → α) (x : q.P.W) :
recF g x = g (abs (recF g <$> x.dest)) :=
by cases x; reflexivity
theorem recF_eq' {α : Type*} (g : F α → α) (a : q.P.A) (f : q.P.B a → q.P.W) :
recF g ⟨a, f⟩ = g (abs (recF g <$> ⟨a, f⟩)) :=
rfl
/-- two trees are equivalent if their F-abstractions are -/
inductive Wequiv : q.P.W → q.P.W → Prop
| ind (a : q.P.A) (f f' : q.P.B a → q.P.W) :
(∀ x, Wequiv (f x) (f' x)) → Wequiv ⟨a, f⟩ ⟨a, f'⟩
| abs (a : q.P.A) (f : q.P.B a → q.P.W) (a' : q.P.A) (f' : q.P.B a' → q.P.W) :
abs ⟨a, f⟩ = abs ⟨a', f'⟩ → Wequiv ⟨a, f⟩ ⟨a', f'⟩
| trans (u v w : q.P.W) : Wequiv u v → Wequiv v w → Wequiv u w
/-- recF is insensitive to the representation -/
theorem recF_eq_of_Wequiv {α : Type u} (u : F α → α) (x y : q.P.W) :
Wequiv x y → recF u x = recF u y :=
begin
cases x with a f, cases y with b g,
intro h, induction h,
case qpf.Wequiv.ind : a f f' h ih
{ simp only [recF_eq', pfunctor.map_eq, function.comp, ih] },
case qpf.Wequiv.abs : a f a' f' h
{ simp only [recF_eq', abs_map, h] },
case qpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact eq.trans ih₁ ih₂ }
end
theorem Wequiv.abs' (x y : q.P.W) (h : abs x.dest = abs y.dest) :
Wequiv x y :=
by { cases x, cases y, apply Wequiv.abs, apply h }
theorem Wequiv.refl (x : q.P.W) : Wequiv x x :=
by cases x with a f; exact Wequiv.abs a f a f rfl
theorem Wequiv.symm (x y : q.P.W) : Wequiv x y → Wequiv y x :=
begin
cases x with a f, cases y with b g,
intro h, induction h,
case qpf.Wequiv.ind : a f f' h ih
{ exact Wequiv.ind _ _ _ ih },
case qpf.Wequiv.abs : a f a' f' h
{ exact Wequiv.abs _ _ _ _ h.symm },
case qpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact qpf.Wequiv.trans _ _ _ ih₂ ih₁}
end
/-- maps every element of the W type to a canonical representative -/
def Wrepr : q.P.W → q.P.W := recF (pfunctor.W.mk ∘ repr)
theorem Wrepr_equiv (x : q.P.W) : Wequiv (Wrepr x) x :=
begin
induction x with a f ih,
apply Wequiv.trans,
{ change Wequiv (Wrepr ⟨a, f⟩) (pfunctor.W.mk (Wrepr <$> ⟨a, f⟩)),
apply Wequiv.abs',
have : Wrepr ⟨a, f⟩ = pfunctor.W.mk (repr (abs (Wrepr <$> ⟨a, f⟩))) := rfl,
rw [this, pfunctor.W.dest_mk, abs_repr],
reflexivity },
apply Wequiv.ind, exact ih
end
/--
Define the fixed point as the quotient of trees under the equivalence relation `Wequiv`.
-/
def W_setoid : setoid q.P.W :=
⟨Wequiv, @Wequiv.refl _ _ _, @Wequiv.symm _ _ _, @Wequiv.trans _ _ _⟩
local attribute [instance] W_setoid
/-- inductive type defined as initial algebra of a Quotient of Polynomial Functor -/
@[nolint has_nonempty_instance]
def fix (F : Type u → Type u) [functor F] [q : qpf F] := quotient (W_setoid : setoid q.P.W)
/-- recursor of a type defined by a qpf -/
def fix.rec {α : Type*} (g : F α → α) : fix F → α :=
quot.lift (recF g) (recF_eq_of_Wequiv g)
/-- access the underlying W-type of a fixpoint data type -/
def fix_to_W : fix F → q.P.W :=
quotient.lift Wrepr (recF_eq_of_Wequiv (λ x, @pfunctor.W.mk q.P (repr x)))
/-- constructor of a type defined by a qpf -/
def fix.mk (x : F (fix F)) : fix F := quot.mk _ (pfunctor.W.mk (fix_to_W <$> repr x))
/-- destructor of a type defined by a qpf -/
def fix.dest : fix F → F (fix F) := fix.rec (functor.map fix.mk)
theorem fix.rec_eq {α : Type*} (g : F α → α) (x : F (fix F)) :
fix.rec g (fix.mk x) = g (fix.rec g <$> x) :=
have recF g ∘ fix_to_W = fix.rec g,
by { apply funext, apply quotient.ind, intro x, apply recF_eq_of_Wequiv,
rw fix_to_W, apply Wrepr_equiv },
begin
conv { to_lhs, rw [fix.rec, fix.mk], dsimp },
cases h : repr x with a f,
rw [pfunctor.map_eq, recF_eq, ←pfunctor.map_eq, pfunctor.W.dest_mk, ←pfunctor.comp_map,
abs_map, ←h, abs_repr, this]
end
theorem fix.ind_aux (a : q.P.A) (f : q.P.B a → q.P.W) :
fix.mk (abs ⟨a, λ x, ⟦f x⟧⟩) = ⟦⟨a, f⟩⟧ :=
have fix.mk (abs ⟨a, λ x, ⟦f x⟧⟩) = ⟦Wrepr ⟨a, f⟩⟧,
begin
apply quot.sound, apply Wequiv.abs',
rw [pfunctor.W.dest_mk, abs_map, abs_repr, ←abs_map, pfunctor.map_eq],
conv { to_rhs, simp only [Wrepr, recF_eq, pfunctor.W.dest_mk, abs_repr] },
reflexivity
end,
by { rw this, apply quot.sound, apply Wrepr_equiv }
theorem fix.ind_rec {α : Type u} (g₁ g₂ : fix F → α)
(h : ∀ x : F (fix F), g₁ <$> x = g₂ <$> x → g₁ (fix.mk x) = g₂ (fix.mk x)) :
∀ x, g₁ x = g₂ x :=
begin
apply quot.ind,
intro x,
induction x with a f ih,
change g₁ ⟦⟨a, f⟩⟧ = g₂ ⟦⟨a, f⟩⟧,
rw [←fix.ind_aux a f], apply h,
rw [←abs_map, ←abs_map, pfunctor.map_eq, pfunctor.map_eq],
dsimp [function.comp],
congr' with x, apply ih
end
theorem fix.rec_unique {α : Type u} (g : F α → α) (h : fix F → α)
(hyp : ∀ x, h (fix.mk x) = g (h <$> x)) :
fix.rec g = h :=
begin
ext x,
apply fix.ind_rec,
intros x hyp',
rw [hyp, ←hyp', fix.rec_eq]
end
theorem fix.mk_dest (x : fix F) : fix.mk (fix.dest x) = x :=
begin
change (fix.mk ∘ fix.dest) x = id x,
apply fix.ind_rec,
intro x, dsimp,
rw [fix.dest, fix.rec_eq, id_map, comp_map],
intro h, rw h
end
theorem fix.dest_mk (x : F (fix F)) : fix.dest (fix.mk x) = x :=
begin
unfold fix.dest, rw [fix.rec_eq, ←fix.dest, ←comp_map],
conv { to_rhs, rw ←(id_map x) },
congr' with x, apply fix.mk_dest
end
theorem fix.ind (p : fix F → Prop)
(h : ∀ x : F (fix F), liftp p x → p (fix.mk x)) :
∀ x, p x :=
begin
apply quot.ind,
intro x,
induction x with a f ih,
change p ⟦⟨a, f⟩⟧,
rw [←fix.ind_aux a f],
apply h,
rw liftp_iff,
refine ⟨_, _, rfl, _⟩,
apply ih
end
end qpf
/-
Construct the final coalgebra to a qpf.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr)
/-- does recursion on `q.P.M` using `g : α → F α` rather than `g : α → P α` -/
def corecF {α : Type*} (g : α → F α) : α → q.P.M :=
pfunctor.M.corec (λ x, repr (g x))
theorem corecF_eq {α : Type*} (g : α → F α) (x : α) :
pfunctor.M.dest (corecF g x) = corecF g <$> repr (g x) :=
by rw [corecF, pfunctor.M.dest_corec]
/- Equivalence -/
/-- A pre-congruence on q.P.M *viewed as an F-coalgebra*. Not necessarily symmetric. -/
def is_precongr (r : q.P.M → q.P.M → Prop) : Prop :=
∀ ⦃x y⦄, r x y →
abs (quot.mk r <$> pfunctor.M.dest x) = abs (quot.mk r <$> pfunctor.M.dest y)
/-- The maximal congruence on q.P.M -/
def Mcongr : q.P.M → q.P.M → Prop :=
λ x y, ∃ r, is_precongr r ∧ r x y
/-- coinductive type defined as the final coalgebra of a qpf -/
def cofix (F : Type u → Type u) [functor F] [q : qpf F]:= quot (@Mcongr F _ q)
instance [inhabited q.P.A] : inhabited (cofix F) := ⟨ quot.mk _ default ⟩
/-- corecursor for type defined by `cofix` -/
def cofix.corec {α : Type*} (g : α → F α) (x : α) : cofix F :=
quot.mk _ (corecF g x)
/-- destructor for type defined by `cofix` -/
def cofix.dest : cofix F → F (cofix F) :=
quot.lift
(λ x, quot.mk Mcongr <$> (abs (pfunctor.M.dest x)))
begin
rintros x y ⟨r, pr, rxy⟩, dsimp,
have : ∀ x y, r x y → Mcongr x y,
{ intros x y h, exact ⟨r, pr, h⟩ },
rw [←quot.factor_mk_eq _ _ this], dsimp,
conv { to_lhs, rw [comp_map, ←abs_map, pr rxy, abs_map, ←comp_map] }
end
theorem cofix.dest_corec {α : Type u} (g : α → F α) (x : α) :
cofix.dest (cofix.corec g x) = cofix.corec g <$> g x :=
begin
conv { to_lhs, rw [cofix.dest, cofix.corec] }, dsimp,
rw [corecF_eq, abs_map, abs_repr, ←comp_map], reflexivity
end
private theorem cofix.bisim_aux
(r : cofix F → cofix F → Prop)
(h' : ∀ x, r x x)
(h : ∀ x y, r x y → quot.mk r <$> cofix.dest x = quot.mk r <$> cofix.dest y) :
∀ x y, r x y → x = y :=
begin
intro x, apply quot.induction_on x, clear x,
intros x y, apply quot.induction_on y, clear y,
intros y rxy,
apply quot.sound,
let r' := λ x y, r (quot.mk _ x) (quot.mk _ y),
have : is_precongr r',
{ intros a b r'ab,
have h₀: quot.mk r <$> quot.mk Mcongr <$> abs (pfunctor.M.dest a) =
quot.mk r <$> quot.mk Mcongr <$> abs (pfunctor.M.dest b) := h _ _ r'ab,
have h₁ : ∀ u v : q.P.M, Mcongr u v → quot.mk r' u = quot.mk r' v,
{ intros u v cuv, apply quot.sound, dsimp [r'], rw quot.sound cuv, apply h' },
let f : quot r → quot r' := quot.lift (quot.lift (quot.mk r') h₁)
begin
intro c, apply quot.induction_on c, clear c,
intros c d, apply quot.induction_on d, clear d,
intros d rcd, apply quot.sound, apply rcd
end,
have : f ∘ quot.mk r ∘ quot.mk Mcongr = quot.mk r' := rfl,
rw [←this, pfunctor.comp_map _ _ f, pfunctor.comp_map _ _ (quot.mk r),
abs_map, abs_map, abs_map, h₀],
rw [pfunctor.comp_map _ _ f, pfunctor.comp_map _ _ (quot.mk r),
abs_map, abs_map, abs_map] },
refine ⟨r', this, rxy⟩
end
theorem cofix.bisim_rel
(r : cofix F → cofix F → Prop)
(h : ∀ x y, r x y → quot.mk r <$> cofix.dest x = quot.mk r <$> cofix.dest y) :
∀ x y, r x y → x = y :=
let r' x y := x = y ∨ r x y in
begin
intros x y rxy,
apply cofix.bisim_aux r',
{ intro x, left, reflexivity },
{ intros x y r'xy,
cases r'xy, { rw r'xy },
have : ∀ x y, r x y → r' x y := λ x y h, or.inr h,
rw ←quot.factor_mk_eq _ _ this, dsimp,
rw [@comp_map _ _ q _ _ _ (quot.mk r), @comp_map _ _ q _ _ _ (quot.mk r)],
rw h _ _ r'xy },
right, exact rxy
end
theorem cofix.bisim
(r : cofix F → cofix F → Prop)
(h : ∀ x y, r x y → liftr r (cofix.dest x) (cofix.dest y)) :
∀ x y, r x y → x = y :=
begin
apply cofix.bisim_rel,
intros x y rxy,
rcases (liftr_iff r _ _).mp (h x y rxy) with ⟨a, f₀, f₁, dxeq, dyeq, h'⟩,
rw [dxeq, dyeq, ←abs_map, ←abs_map, pfunctor.map_eq, pfunctor.map_eq],
congr' 2 with i,
apply quot.sound,
apply h'
end
theorem cofix.bisim' {α : Type*} (Q : α → Prop) (u v : α → cofix F)
(h : ∀ x, Q x → ∃ a f f',
cofix.dest (u x) = abs ⟨a, f⟩ ∧
cofix.dest (v x) = abs ⟨a, f'⟩ ∧
∀ i, ∃ x', Q x' ∧ f i = u x' ∧ f' i = v x') :
∀ x, Q x → u x = v x :=
λ x Qx,
let R := λ w z : cofix F, ∃ x', Q x' ∧ w = u x' ∧ z = v x' in
cofix.bisim R
(λ x y ⟨x', Qx', xeq, yeq⟩,
begin
rcases h x' Qx' with ⟨a, f, f', ux'eq, vx'eq, h'⟩,
rw liftr_iff,
refine ⟨a, f, f', xeq.symm ▸ ux'eq, yeq.symm ▸ vx'eq, h'⟩,
end)
_ _ ⟨x, Qx, rfl, rfl⟩
end qpf
/-
Composition of qpfs.
-/
namespace qpf
variables {F₂ : Type u → Type u} [functor F₂] [q₂ : qpf F₂]
variables {F₁ : Type u → Type u} [functor F₁] [q₁ : qpf F₁]
include q₂ q₁
/-- composition of qpfs gives another qpf -/
def comp : qpf (functor.comp F₂ F₁) :=
{ P := pfunctor.comp (q₂.P) (q₁.P),
abs := λ α,
begin
dsimp [functor.comp],
intro p,
exact abs ⟨p.1.1, λ x, abs ⟨p.1.2 x, λ y, p.2 ⟨x, y⟩⟩⟩
end,
repr := λ α,
begin
dsimp [functor.comp],
intro y,
refine ⟨⟨(repr y).1, λ u, (repr ((repr y).2 u)).1⟩, _⟩,
dsimp [pfunctor.comp],
intro x,
exact (repr ((repr y).2 x.1)).snd x.2
end,
abs_repr := λ α,
begin
abstract
{ dsimp [functor.comp],
intro x,
conv { to_rhs, rw ←abs_repr x},
cases h : repr x with a f,
dsimp,
congr' with x,
cases h' : repr (f x) with b g,
dsimp, rw [←h', abs_repr] }
end,
abs_map := λ α β f,
begin
abstract
{ dsimp [functor.comp, pfunctor.comp],
intro p,
cases p with a g, dsimp,
cases a with b h, dsimp,
symmetry,
transitivity,
symmetry,
apply abs_map,
congr,
rw pfunctor.map_eq,
dsimp [function.comp],
simp [abs_map],
split,
reflexivity,
ext x,
rw ←abs_map,
reflexivity }
end }
end qpf
/-
Quotients.
We show that if `F` is a qpf and `G` is a suitable quotient of `F`, then `G` is a qpf.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
variables {G : Type u → Type u} [functor G]
variable {FG_abs : Π {α}, F α → G α}
variable {FG_repr : Π {α}, G α → F α}
/-- Given a qpf `F` and a well-behaved surjection `FG_abs` from F α to
functor G α, `G` is a qpf. We can consider `G` a quotient on `F` where
elements `x y : F α` are in the same equivalence class if
`FG_abs x = FG_abs y` -/
def quotient_qpf
(FG_abs_repr : Π {α} (x : G α), FG_abs (FG_repr x) = x)
(FG_abs_map : ∀ {α β} (f : α → β) (x : F α), FG_abs (f <$> x) = f <$> FG_abs x) :
qpf G :=
{ P := q.P,
abs := λ {α} p, FG_abs (abs p),
repr := λ {α} x, repr (FG_repr x),
abs_repr := λ {α} x, by rw [abs_repr, FG_abs_repr],
abs_map := λ {α β} f x, by { rw [abs_map, FG_abs_map] } }
end qpf
/-
Support.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr supp)
open set
theorem mem_supp {α : Type u} (x : F α) (u : α) :
u ∈ supp x ↔ ∀ a f, abs ⟨a, f⟩ = x → u ∈ f '' univ :=
begin
rw [supp], dsimp, split,
{ intros h a f haf,
have : liftp (λ u, u ∈ f '' univ) x,
{ rw liftp_iff, refine ⟨a, f, haf.symm, λ i, mem_image_of_mem _ (mem_univ _)⟩ },
exact h this },
intros h p, rw liftp_iff,
rintros ⟨a, f, xeq, h'⟩,
rcases h a f xeq.symm with ⟨i, _, hi⟩,
rw ←hi, apply h'
end
theorem supp_eq {α : Type u} (x : F α) : supp x = { u | ∀ a f, abs ⟨a, f⟩ = x → u ∈ f '' univ } :=
by ext; apply mem_supp
theorem has_good_supp_iff {α : Type u} (x : F α) :
(∀ p, liftp p x ↔ ∀ u ∈ supp x, p u) ↔
∃ a f, abs ⟨a, f⟩ = x ∧ ∀ a' f', abs ⟨a', f'⟩ = x → f '' univ ⊆ f' '' univ :=
begin
split,
{ intro h,
have : liftp (supp x) x, by rw h; intro u; exact id,
rw liftp_iff at this, rcases this with ⟨a, f, xeq, h'⟩,
refine ⟨a, f, xeq.symm, _⟩,
intros a' f' h'',
rintros u ⟨i, _, hfi⟩,
have : u ∈ supp x, by rw ←hfi; apply h',
exact (mem_supp x u).mp this _ _ h'' },
rintros ⟨a, f, xeq, h⟩ p, rw liftp_iff, split,
{ rintros ⟨a', f', xeq', h'⟩ u usuppx,
rcases (mem_supp x u).mp usuppx a' f' xeq'.symm with ⟨i, _, f'ieq⟩,
rw ←f'ieq, apply h' },
intro h',
refine ⟨a, f, xeq.symm, _⟩, intro i,
apply h', rw mem_supp,
intros a' f' xeq',
apply h a' f' xeq',
apply mem_image_of_mem _ (mem_univ _)
end
variable (q)
/-- A qpf is said to be uniform if every polynomial functor
representing a single value all have the same range. -/
def is_uniform : Prop := ∀ ⦃α : Type u⦄ (a a' : q.P.A)
(f : q.P.B a → α) (f' : q.P.B a' → α),
abs ⟨a, f⟩ = abs ⟨a', f'⟩ → f '' univ = f' '' univ
/-- does `abs` preserve `liftp`? -/
def liftp_preservation : Prop :=
∀ ⦃α⦄ (p : α → Prop) (x : q.P.obj α), liftp p (abs x) ↔ liftp p x
/-- does `abs` preserve `supp`? -/
def supp_preservation : Prop :=
∀ ⦃α⦄ (x : q.P.obj α), supp (abs x) = supp x
variable [q]
theorem supp_eq_of_is_uniform (h : q.is_uniform) {α : Type u} (a : q.P.A) (f : q.P.B a → α) :
supp (abs ⟨a, f⟩) = f '' univ :=
begin
ext u, rw [mem_supp], split,
{ intro h', apply h' _ _ rfl },
intros h' a' f' e,
rw [←h _ _ _ _ e.symm], apply h'
end
theorem liftp_iff_of_is_uniform (h : q.is_uniform) {α : Type u} (x : F α) (p : α → Prop) :
liftp p x ↔ ∀ u ∈ supp x, p u :=
begin
rw [liftp_iff, ←abs_repr x],
cases repr x with a f, split,
{ rintros ⟨a', f', abseq, hf⟩ u,
rw [supp_eq_of_is_uniform h, h _ _ _ _ abseq],
rintros ⟨i, _, hi⟩, rw ←hi, apply hf },
intro h',
refine ⟨a, f, rfl, λ i, h' _ _⟩,
rw supp_eq_of_is_uniform h,
exact ⟨i, mem_univ i, rfl⟩
end
theorem supp_map (h : q.is_uniform) {α β : Type u} (g : α → β) (x : F α) :
supp (g <$> x) = g '' supp x :=
begin
rw ←abs_repr x, cases repr x with a f, rw [←abs_map, pfunctor.map_eq],
rw [supp_eq_of_is_uniform h, supp_eq_of_is_uniform h, image_comp]
end
theorem supp_preservation_iff_uniform :
q.supp_preservation ↔ q.is_uniform :=
begin
split,
{ intros h α a a' f f' h',
rw [← pfunctor.supp_eq,← pfunctor.supp_eq,← h,h',h] },
{ rintros h α ⟨a,f⟩, rwa [supp_eq_of_is_uniform,pfunctor.supp_eq], }
end
theorem supp_preservation_iff_liftp_preservation :
q.supp_preservation ↔ q.liftp_preservation :=
begin
split; intro h,
{ rintros α p ⟨a,f⟩,
have h' := h, rw supp_preservation_iff_uniform at h',
dsimp only [supp_preservation,supp] at h,
rwa [liftp_iff_of_is_uniform,supp_eq_of_is_uniform,pfunctor.liftp_iff'];
try { assumption },
{ simp only [image_univ, mem_range, exists_imp_distrib],
split; intros; subst_vars; solve_by_elim } },
{ rintros α ⟨a,f⟩,
simp only [liftp_preservation] at h,
simp only [supp,h] }
end
theorem liftp_preservation_iff_uniform :
q.liftp_preservation ↔ q.is_uniform :=
by rw [← supp_preservation_iff_liftp_preservation, supp_preservation_iff_uniform]
end qpf
|
9e49de6b14f6f42ac739aa30894206b2f342c4d1
|
fa02ed5a3c9c0adee3c26887a16855e7841c668b
|
/src/group_theory/archimedean.lean
|
25b11da804a7c76bdf6ba8a7f82ecfc5c2926ba6
|
[
"Apache-2.0"
] |
permissive
|
jjgarzella/mathlib
|
96a345378c4e0bf26cf604aed84f90329e4896a2
|
395d8716c3ad03747059d482090e2bb97db612c8
|
refs/heads/master
| 1,686,480,124,379
| 1,625,163,323,000
| 1,625,163,323,000
| 281,190,421
| 2
| 0
|
Apache-2.0
| 1,595,268,170,000
| 1,595,268,169,000
| null |
UTF-8
|
Lean
| false
| false
| 3,266
|
lean
|
/-
Copyright (c) 2020 Heather Macbeth, Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth, Patrick Massot
-/
import group_theory.subgroup
import algebra.archimedean
/-!
# Archimedean groups
This file proves a few facts about ordered groups which satisfy the `archimedean` property, that is:
`class archimedean (α) [ordered_add_comm_monoid α] : Prop :=`
`(arch : ∀ (x : α) {y}, 0 < y → ∃ n : ℕ, x ≤ n • y)`
They are placed here in a separate file (rather than incorporated as a continuation of
`algebra.archimedean`) because they rely on some imports from `group_theory` -- bundled subgroups
in particular.
The main result is `add_subgroup.cyclic_of_min`: a subgroup of a decidable archimedean abelian
group is cyclic, if its set of positive elements has a minimal element.
This result is used in this file to deduce `int.subgroup_cyclic`, proving that every subgroup of `ℤ`
is cyclic. (There are several other methods one could use to prove this fact, including more purely
algebraic methods, but none seem to exist in mathlib as of writing. The closest is
`subgroup.is_cyclic`, but that has not been transferred to `add_subgroup`.)
The result is also used in `topology.instances.real` as an ingredient in the classification of
subgroups of `ℝ`.
-/
variables {G : Type*} [linear_ordered_add_comm_group G] [archimedean G]
open linear_ordered_add_comm_group
/-- Given a subgroup `H` of a decidable linearly ordered archimedean abelian group `G`, if there
exists a minimal element `a` of `H ∩ G_{>0}` then `H` is generated by `a`. -/
lemma add_subgroup.cyclic_of_min {H : add_subgroup G} {a : G}
(ha : is_least {g : G | g ∈ H ∧ 0 < g} a) : H = add_subgroup.closure {a} :=
begin
obtain ⟨⟨a_in, a_pos⟩, a_min⟩ := ha,
refine le_antisymm _ (H.closure_le.mpr $ by simp [a_in]),
intros g g_in,
obtain ⟨k, nonneg, lt⟩ : ∃ k, 0 ≤ g - k • a ∧ g - k • a < a :=
exists_int_smul_near_of_pos' a_pos g,
have h_zero : g - k • a = 0,
{ by_contra h,
have h : a ≤ g - k • a,
{ refine a_min ⟨_, _⟩,
{ exact add_subgroup.sub_mem H g_in (add_subgroup.gsmul_mem H a_in k) },
{ exact lt_of_le_of_ne nonneg (ne.symm h) } },
have h' : ¬ (a ≤ g - k • a) := not_le.mpr lt,
contradiction },
simp [sub_eq_zero.mp h_zero, add_subgroup.mem_closure_singleton],
end
/-- Every subgroup of `ℤ` is cyclic. -/
lemma int.subgroup_cyclic (H : add_subgroup ℤ) : ∃ a, H = add_subgroup.closure {a} :=
begin
cases add_subgroup.bot_or_exists_ne_zero H with h h,
{ use 0,
rw h,
exact add_subgroup.closure_singleton_zero.symm },
let s := {g : ℤ | g ∈ H ∧ 0 < g},
have h_bdd : ∀ g ∈ s, (0 : ℤ) ≤ g := λ _ h, le_of_lt h.2,
obtain ⟨g₀, g₀_in, g₀_ne⟩ := h,
obtain ⟨g₁, g₁_in, g₁_pos⟩ : ∃ g₁ : ℤ, g₁ ∈ H ∧ 0 < g₁,
{ cases lt_or_gt_of_ne g₀_ne with Hg₀ Hg₀,
{ exact ⟨-g₀, H.neg_mem g₀_in, neg_pos.mpr Hg₀⟩ },
{ exact ⟨g₀, g₀_in, Hg₀⟩ } },
obtain ⟨a, ha, ha'⟩ := int.exists_least_of_bdd ⟨(0 : ℤ), h_bdd⟩ ⟨g₁, g₁_in, g₁_pos⟩,
exact ⟨a, add_subgroup.cyclic_of_min ⟨ha, ha'⟩⟩,
end
|
d7befb942287d4e2088ff53906c1a86361621222
|
3c9dc4ea6cc92e02634ef557110bde9eae393338
|
/src/Lean/Environment.lean
|
2997d4ffa212a8c7fc768f6058c142d5499b0d30
|
[
"Apache-2.0"
] |
permissive
|
shingtaklam1324/lean4
|
3d7efe0c8743a4e33d3c6f4adbe1300df2e71492
|
351285a2e8ad0cef37af05851cfabf31edfb5970
|
refs/heads/master
| 1,676,827,679,740
| 1,610,462,623,000
| 1,610,552,340,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 30,215
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Std.Data.HashMap
import Lean.Data.SMap
import Lean.Declaration
import Lean.LocalContext
import Lean.Util.Path
import Lean.Util.FindExpr
import Lean.Util.Profile
namespace Lean
/- Opaque environment extension state. -/
constant EnvExtensionStateSpec : PointedType.{0}
def EnvExtensionState : Type := EnvExtensionStateSpec.type
instance : Inhabited EnvExtensionState where
default := EnvExtensionStateSpec.val
def ModuleIdx := Nat
instance : Inhabited ModuleIdx := inferInstanceAs (Inhabited Nat)
abbrev ConstMap := SMap Name ConstantInfo
structure Import where
module : Name
runtimeOnly : Bool := false
instance : ToString Import := ⟨fun imp => toString imp.module ++ if imp.runtimeOnly then " (runtime)" else ""⟩
/--
A compacted region holds multiple Lean objects in a contiguous memory region, which can be read/written to/from disk.
Objects inside the region do not have reference counters and cannot be freed individually. The contents of .olean
files are compacted regions. -/
def CompactedRegion := USize
/-- Free a compacted region and its contents. No live references to the contents may exist at the time of invocation. -/
@[extern 2 "lean_compacted_region_free"]
unsafe constant CompactedRegion.free : CompactedRegion → IO Unit
/- Environment fields that are not used often. -/
structure EnvironmentHeader where
trustLevel : UInt32 := 0
quotInit : Bool := false
mainModule : Name := arbitrary
imports : Array Import := #[] -- direct imports
regions : Array CompactedRegion := #[] -- compacted regions of all imported modules
moduleNames : Array Name := #[] -- names of all imported modules
deriving Inhabited
open Std (HashMap)
structure Environment where
const2ModIdx : HashMap Name ModuleIdx
constants : ConstMap
extensions : Array EnvExtensionState
header : EnvironmentHeader := {}
deriving Inhabited
namespace Environment
def addAux (env : Environment) (cinfo : ConstantInfo) : Environment :=
{ env with constants := env.constants.insert cinfo.name cinfo }
@[export lean_environment_find]
def find? (env : Environment) (n : Name) : Option ConstantInfo :=
/- It is safe to use `find'` because we never overwrite imported declarations. -/
env.constants.find?' n
def contains (env : Environment) (n : Name) : Bool :=
env.constants.contains n
def imports (env : Environment) : Array Import :=
env.header.imports
def allImportedModuleNames (env : Environment) : Array Name :=
env.header.moduleNames
@[export lean_environment_set_main_module]
def setMainModule (env : Environment) (m : Name) : Environment :=
{ env with header := { env.header with mainModule := m } }
@[export lean_environment_main_module]
def mainModule (env : Environment) : Name :=
env.header.mainModule
@[export lean_environment_mark_quot_init]
private def markQuotInit (env : Environment) : Environment :=
{ env with header := { env.header with quotInit := true } }
@[export lean_environment_quot_init]
private def isQuotInit (env : Environment) : Bool :=
env.header.quotInit
@[export lean_environment_trust_level]
private def getTrustLevel (env : Environment) : UInt32 :=
env.header.trustLevel
def getModuleIdxFor? (env : Environment) (c : Name) : Option ModuleIdx :=
env.const2ModIdx.find? c
def isConstructor (env : Environment) (c : Name) : Bool :=
match env.find? c with
| ConstantInfo.ctorInfo _ => true
| _ => false
end Environment
inductive KernelException where
| unknownConstant (env : Environment) (name : Name)
| alreadyDeclared (env : Environment) (name : Name)
| declTypeMismatch (env : Environment) (decl : Declaration) (givenType : Expr)
| declHasMVars (env : Environment) (name : Name) (expr : Expr)
| declHasFVars (env : Environment) (name : Name) (expr : Expr)
| funExpected (env : Environment) (lctx : LocalContext) (expr : Expr)
| typeExpected (env : Environment) (lctx : LocalContext) (expr : Expr)
| letTypeMismatch (env : Environment) (lctx : LocalContext) (name : Name) (givenType : Expr) (expectedType : Expr)
| exprTypeMismatch (env : Environment) (lctx : LocalContext) (expr : Expr) (expectedType : Expr)
| appTypeMismatch (env : Environment) (lctx : LocalContext) (app : Expr) (funType : Expr) (argType : Expr)
| invalidProj (env : Environment) (lctx : LocalContext) (proj : Expr)
| other (msg : String)
namespace Environment
/- Type check given declaration and add it to the environment -/
@[extern "lean_add_decl"]
constant addDecl (env : Environment) (decl : @& Declaration) : Except KernelException Environment
/- Compile the given declaration, it assumes the declaration has already been added to the environment using `addDecl`. -/
@[extern "lean_compile_decl"]
constant compileDecl (env : Environment) (opt : @& Options) (decl : @& Declaration) : Except KernelException Environment
def addAndCompile (env : Environment) (opt : Options) (decl : Declaration) : Except KernelException Environment := do
let env ← addDecl env decl
compileDecl env opt decl
end Environment
/- Interface for managing environment extensions. -/
structure EnvExtensionInterface where
ext : Type → Type
inhabitedExt {σ} : Inhabited σ → Inhabited (ext σ)
registerExt {σ} (mkInitial : IO σ) : IO (ext σ)
setState {σ} (e : ext σ) (env : Environment) : σ → Environment
modifyState {σ} (e : ext σ) (env : Environment) : (σ → σ) → Environment
getState {σ} (e : ext σ) (env : Environment) : σ
mkInitialExtStates : IO (Array EnvExtensionState)
instance : Inhabited EnvExtensionInterface where
default := {
ext := id,
inhabitedExt := id,
registerExt := fun mk => mk,
setState := fun _ env _ => env,
modifyState := fun _ env _ => env,
getState := fun ext _ => ext,
mkInitialExtStates := pure #[]
}
/- Unsafe implementation of `EnvExtensionInterface` -/
namespace EnvExtensionInterfaceUnsafe
structure Ext (σ : Type) where
idx : Nat
mkInitial : IO σ
deriving Inhabited
private def mkEnvExtensionsRef : IO (IO.Ref (Array (Ext EnvExtensionState))) := IO.mkRef #[]
@[builtinInit mkEnvExtensionsRef] private constant envExtensionsRef : IO.Ref (Array (Ext EnvExtensionState))
unsafe def setState {σ} (ext : Ext σ) (env : Environment) (s : σ) : Environment :=
{ env with extensions := env.extensions.set! ext.idx (unsafeCast s) }
@[inline] unsafe def modifyState {σ : Type} (ext : Ext σ) (env : Environment) (f : σ → σ) : Environment :=
{ env with
extensions := env.extensions.modify ext.idx fun s =>
let s : σ := unsafeCast s;
let s : σ := f s;
unsafeCast s }
unsafe def getState {σ} (ext : Ext σ) (env : Environment) : σ :=
let s : EnvExtensionState := env.extensions.get! ext.idx
unsafeCast s
unsafe def registerExt {σ} (mkInitial : IO σ) : IO (Ext σ) := do
let initializing ← IO.initializing
unless initializing do throw (IO.userError "failed to register environment, extensions can only be registered during initialization")
let exts ← envExtensionsRef.get
let idx := exts.size
let ext : Ext σ := {
idx := idx,
mkInitial := mkInitial,
}
envExtensionsRef.modify fun exts => exts.push (unsafeCast ext)
pure ext
def mkInitialExtStates : IO (Array EnvExtensionState) := do
let exts ← envExtensionsRef.get
exts.mapM fun ext => ext.mkInitial
unsafe def imp : EnvExtensionInterface := {
ext := Ext,
inhabitedExt := fun _ => ⟨arbitrary⟩,
registerExt := registerExt,
setState := setState,
modifyState := modifyState,
getState := getState,
mkInitialExtStates := mkInitialExtStates
}
end EnvExtensionInterfaceUnsafe
@[implementedBy EnvExtensionInterfaceUnsafe.imp]
constant EnvExtensionInterfaceImp : EnvExtensionInterface
def EnvExtension (σ : Type) : Type := EnvExtensionInterfaceImp.ext σ
namespace EnvExtension
instance {σ} [s : Inhabited σ] : Inhabited (EnvExtension σ) := EnvExtensionInterfaceImp.inhabitedExt s
def setState {σ : Type} (ext : EnvExtension σ) (env : Environment) (s : σ) : Environment := EnvExtensionInterfaceImp.setState ext env s
def modifyState {σ : Type} (ext : EnvExtension σ) (env : Environment) (f : σ → σ) : Environment := EnvExtensionInterfaceImp.modifyState ext env f
def getState {σ : Type} (ext : EnvExtension σ) (env : Environment) : σ := EnvExtensionInterfaceImp.getState ext env
end EnvExtension
/- Environment extensions can only be registered during initialization.
Reasons:
1- Our implementation assumes the number of extensions does not change after an environment object is created.
2- We do not use any synchronization primitive to access `envExtensionsRef`. -/
def registerEnvExtension {σ : Type} (mkInitial : IO σ) : IO (EnvExtension σ) := EnvExtensionInterfaceImp.registerExt mkInitial
private def mkInitialExtensionStates : IO (Array EnvExtensionState) := EnvExtensionInterfaceImp.mkInitialExtStates
@[export lean_mk_empty_environment]
def mkEmptyEnvironment (trustLevel : UInt32 := 0) : IO Environment := do
let initializing ← IO.initializing
if initializing then throw (IO.userError "environment objects cannot be created during initialization")
let exts ← mkInitialExtensionStates
pure {
const2ModIdx := {},
constants := {},
header := { trustLevel := trustLevel },
extensions := exts
}
structure PersistentEnvExtensionState (α : Type) (σ : Type) where
importedEntries : Array (Array α) -- entries per imported module
state : σ
structure ImportM.Context where
env : Environment
opts : Options
abbrev ImportM := ReaderT Lean.ImportM.Context IO
/- An environment extension with support for storing/retrieving entries from a .olean file.
- α is the type of the entries that are stored in .olean files.
- β is the type of values used to update the state.
- σ is the actual state.
Remark: for most extensions α and β coincide.
Note that `addEntryFn` is not in `IO`. This is intentional, and allows us to write simple functions such as
```
def addAlias (env : Environment) (a : Name) (e : Name) : Environment :=
aliasExtension.addEntry env (a, e)
```
without using `IO`. We have many functions like `addAlias`.
`α` and ‵β` do not coincide for extensions where the data used to update the state contains, for example,
closures which we currently cannot store in files. -/
structure PersistentEnvExtension (α : Type) (β : Type) (σ : Type) where
toEnvExtension : EnvExtension (PersistentEnvExtensionState α σ)
name : Name
addImportedFn : Array (Array α) → ImportM σ
addEntryFn : σ → β → σ
exportEntriesFn : σ → Array α
statsFn : σ → Format
/- Opaque persistent environment extension entry. -/
constant EnvExtensionEntrySpec : PointedType.{0}
def EnvExtensionEntry : Type := EnvExtensionEntrySpec.type
instance : Inhabited EnvExtensionEntry := ⟨EnvExtensionEntrySpec.val⟩
instance {α σ} [Inhabited σ] : Inhabited (PersistentEnvExtensionState α σ) :=
⟨{importedEntries := #[], state := arbitrary }⟩
instance {α β σ} [Inhabited σ] : Inhabited (PersistentEnvExtension α β σ) where
default := {
toEnvExtension := arbitrary,
name := arbitrary,
addImportedFn := fun _ => arbitrary,
addEntryFn := fun s _ => s,
exportEntriesFn := fun _ => #[],
statsFn := fun _ => Format.nil
}
namespace PersistentEnvExtension
def getModuleEntries {α β σ : Type} (ext : PersistentEnvExtension α β σ) (env : Environment) (m : ModuleIdx) : Array α :=
(ext.toEnvExtension.getState env).importedEntries.get! m
def addEntry {α β σ : Type} (ext : PersistentEnvExtension α β σ) (env : Environment) (b : β) : Environment :=
ext.toEnvExtension.modifyState env fun s =>
let state := ext.addEntryFn s.state b;
{ s with state := state }
def getState {α β σ : Type} (ext : PersistentEnvExtension α β σ) (env : Environment) : σ :=
(ext.toEnvExtension.getState env).state
def setState {α β σ : Type} (ext : PersistentEnvExtension α β σ) (env : Environment) (s : σ) : Environment :=
ext.toEnvExtension.modifyState env $ fun ps => { ps with state := s }
def modifyState {α β σ : Type} (ext : PersistentEnvExtension α β σ) (env : Environment) (f : σ → σ) : Environment :=
ext.toEnvExtension.modifyState env $ fun ps => { ps with state := f (ps.state) }
end PersistentEnvExtension
builtin_initialize persistentEnvExtensionsRef : IO.Ref (Array (PersistentEnvExtension EnvExtensionEntry EnvExtensionEntry EnvExtensionState)) ← IO.mkRef #[]
structure PersistentEnvExtensionDescr (α β σ : Type) where
name : Name
mkInitial : IO σ
addImportedFn : Array (Array α) → ImportM σ
addEntryFn : σ → β → σ
exportEntriesFn : σ → Array α
statsFn : σ → Format := fun _ => Format.nil
unsafe def registerPersistentEnvExtensionUnsafe {α β σ : Type} [Inhabited σ] (descr : PersistentEnvExtensionDescr α β σ) : IO (PersistentEnvExtension α β σ) := do
let pExts ← persistentEnvExtensionsRef.get
if pExts.any (fun ext => ext.name == descr.name) then throw (IO.userError s!"invalid environment extension, '{descr.name}' has already been used")
let ext ← registerEnvExtension do
let initial ← descr.mkInitial
let s : PersistentEnvExtensionState α σ := {
importedEntries := #[],
state := initial
}
pure s
let pExt : PersistentEnvExtension α β σ := {
toEnvExtension := ext,
name := descr.name,
addImportedFn := descr.addImportedFn,
addEntryFn := descr.addEntryFn,
exportEntriesFn := descr.exportEntriesFn,
statsFn := descr.statsFn
}
persistentEnvExtensionsRef.modify fun pExts => pExts.push (unsafeCast pExt)
return pExt
@[implementedBy registerPersistentEnvExtensionUnsafe]
constant registerPersistentEnvExtension {α β σ : Type} [Inhabited σ] (descr : PersistentEnvExtensionDescr α β σ) : IO (PersistentEnvExtension α β σ)
/- Simple PersistentEnvExtension that implements exportEntriesFn using a list of entries. -/
def SimplePersistentEnvExtension (α σ : Type) := PersistentEnvExtension α α (List α × σ)
@[specialize] def mkStateFromImportedEntries {α σ : Type} (addEntryFn : σ → α → σ) (initState : σ) (as : Array (Array α)) : σ :=
as.foldl (fun r es => es.foldl (fun r e => addEntryFn r e) r) initState
structure SimplePersistentEnvExtensionDescr (α σ : Type) where
name : Name
addEntryFn : σ → α → σ
addImportedFn : Array (Array α) → σ
toArrayFn : List α → Array α := fun es => es.toArray
def registerSimplePersistentEnvExtension {α σ : Type} [Inhabited σ] (descr : SimplePersistentEnvExtensionDescr α σ) : IO (SimplePersistentEnvExtension α σ) :=
registerPersistentEnvExtension {
name := descr.name,
mkInitial := pure ([], descr.addImportedFn #[]),
addImportedFn := fun as => pure ([], descr.addImportedFn as),
addEntryFn := fun s e => match s with
| (entries, s) => (e::entries, descr.addEntryFn s e),
exportEntriesFn := fun s => descr.toArrayFn s.1.reverse,
statsFn := fun s => format "number of local entries: " ++ format s.1.length
}
namespace SimplePersistentEnvExtension
instance {α σ : Type} [Inhabited σ] : Inhabited (SimplePersistentEnvExtension α σ) :=
inferInstanceAs (Inhabited (PersistentEnvExtension α α (List α × σ)))
def getEntries {α σ : Type} (ext : SimplePersistentEnvExtension α σ) (env : Environment) : List α :=
(PersistentEnvExtension.getState ext env).1
def getState {α σ : Type} (ext : SimplePersistentEnvExtension α σ) (env : Environment) : σ :=
(PersistentEnvExtension.getState ext env).2
def setState {α σ : Type} (ext : SimplePersistentEnvExtension α σ) (env : Environment) (s : σ) : Environment :=
PersistentEnvExtension.modifyState ext env (fun ⟨entries, _⟩ => (entries, s))
def modifyState {α σ : Type} (ext : SimplePersistentEnvExtension α σ) (env : Environment) (f : σ → σ) : Environment :=
PersistentEnvExtension.modifyState ext env (fun ⟨entries, s⟩ => (entries, f s))
end SimplePersistentEnvExtension
/-- Environment extension for tagging declarations.
Declarations must only be tagged in the module where they were declared. -/
def TagDeclarationExtension := SimplePersistentEnvExtension Name NameSet
def mkTagDeclarationExtension (name : Name) : IO TagDeclarationExtension :=
registerSimplePersistentEnvExtension {
name := name,
addImportedFn := fun as => {},
addEntryFn := fun s n => s.insert n,
toArrayFn := fun es => es.toArray.qsort Name.quickLt
}
namespace TagDeclarationExtension
instance : Inhabited TagDeclarationExtension :=
inferInstanceAs (Inhabited (SimplePersistentEnvExtension Name NameSet))
def tag (ext : TagDeclarationExtension) (env : Environment) (n : Name) : Environment :=
ext.addEntry env n
def isTagged (ext : TagDeclarationExtension) (env : Environment) (n : Name) : Bool :=
match env.getModuleIdxFor? n with
| some modIdx => (ext.getModuleEntries env modIdx).binSearchContains n Name.quickLt
| none => (ext.getState env).contains n
end TagDeclarationExtension
/-- Environment extension for mapping declarations to values. -/
def MapDeclarationExtension (α : Type) := SimplePersistentEnvExtension (Name × α) (NameMap α)
def mkMapDeclarationExtension [Inhabited α] (name : Name) : IO (MapDeclarationExtension α) :=
registerSimplePersistentEnvExtension {
name := name,
addImportedFn := fun as => {},
addEntryFn := fun s n => s.insert n.1 n.2 ,
toArrayFn := fun es => es.toArray.qsort (fun a b => Name.quickLt a.1 b.1)
}
namespace MapDeclarationExtension
instance : Inhabited (MapDeclarationExtension α) :=
inferInstanceAs (Inhabited (SimplePersistentEnvExtension ..))
def insert (ext : MapDeclarationExtension α) (env : Environment) (declName : Name) (val : α) : Environment :=
ext.addEntry env (declName, val)
def find? [Inhabited α] (ext : MapDeclarationExtension α) (env : Environment) (declName : Name) : Option α :=
match env.getModuleIdxFor? declName with
| some modIdx =>
match (ext.getModuleEntries env modIdx).binSearch (declName, arbitrary) (fun a b => Name.quickLt a.1 b.1) with
| some e => some e.2
| none => none
| none => (ext.getState env).find? declName
def contains [Inhabited α] (ext : MapDeclarationExtension α) (env : Environment) (declName : Name) : Bool :=
match env.getModuleIdxFor? declName with
| some modIdx => (ext.getModuleEntries env modIdx).binSearchContains (declName, arbitrary) (fun a b => Name.quickLt a.1 b.1)
| none => (ext.getState env).contains declName
end MapDeclarationExtension
/- Content of a .olean file.
We use `compact.cpp` to generate the image of this object in disk. -/
structure ModuleData where
imports : Array Import
constants : Array ConstantInfo
entries : Array (Name × Array EnvExtensionEntry)
instance : Inhabited ModuleData :=
⟨{imports := arbitrary, constants := arbitrary, entries := arbitrary }⟩
@[extern 3 "lean_save_module_data"]
constant saveModuleData (fname : @& String) (m : ModuleData) : IO Unit
@[extern 2 "lean_read_module_data"]
constant readModuleData (fname : @& String) : IO (ModuleData × CompactedRegion)
/--
Free compacted regions of imports. No live references to imported objects may exist at the time of invocation; in
particular, `env` should be the last reference to any `Environment` derived from these imports. -/
@[noinline, export lean_environment_free_regions]
unsafe def Environment.freeRegions (env : Environment) : IO Unit :=
/-
NOTE: This assumes `env` is not inferred as a borrowed parameter, and is freed after extracting the `header` field.
Otherwise, we would encounter undefined behavior when the constant map in `env`, which may reference objects in
compacted regions, is freed after the regions.
In the currently produced IR, we indeed see:
```
def Lean.Environment.freeRegions (x_1 : obj) (x_2 : obj) : obj :=
let x_3 : obj := proj[3] x_1;
inc x_3;
dec x_1;
...
```
TODO: statically check for this. -/
env.header.regions.forM CompactedRegion.free
def mkModuleData (env : Environment) : IO ModuleData := do
let pExts ← persistentEnvExtensionsRef.get
let entries : Array (Name × Array EnvExtensionEntry) := pExts.size.fold
(fun i result =>
let state := (pExts.get! i).getState env
let exportEntriesFn := (pExts.get! i).exportEntriesFn
let extName := (pExts.get! i).name
result.push (extName, exportEntriesFn state))
#[]
pure {
imports := env.header.imports,
constants := env.constants.foldStage2 (fun cs _ c => cs.push c) #[],
entries := entries
}
@[export lean_write_module]
def writeModule (env : Environment) (fname : String) : IO Unit := do
let modData ← mkModuleData env; saveModuleData fname modData
private partial def getEntriesFor (mod : ModuleData) (extId : Name) (i : Nat) : Array EnvExtensionEntry :=
if i < mod.entries.size then
let curr := mod.entries.get! i;
if curr.1 == extId then curr.2 else getEntriesFor mod extId (i+1)
else
#[]
private def setImportedEntries (env : Environment) (mods : Array ModuleData) : IO Environment := do
let mut env := env
let pExtDescrs ← persistentEnvExtensionsRef.get
for mod in mods do
for extDescr in pExtDescrs do
let entries := getEntriesFor mod extDescr.name 0
env ← extDescr.toEnvExtension.modifyState env fun s => { s with importedEntries := s.importedEntries.push entries }
return env
private def finalizePersistentExtensions (env : Environment) (opts : Options) : IO Environment := do
let mut env := env
let pExtDescrs ← persistentEnvExtensionsRef.get
for extDescr in pExtDescrs do
let s := extDescr.toEnvExtension.getState env
let newState ← extDescr.addImportedFn s.importedEntries { env := env, opts := opts }
env ← extDescr.toEnvExtension.setState env { s with state := newState }
return env
structure ImportState where
moduleNameSet : NameSet := {}
moduleNames : Array Name := #[]
moduleData : Array ModuleData := #[]
regions : Array CompactedRegion := #[]
@[export lean_import_modules]
partial def importModules (imports : List Import) (opts : Options) (trustLevel : UInt32 := 0) : IO Environment := profileitIO "import" opts ⟨0, 0⟩ do
let (_, s) ← importMods imports |>.run {}
-- (moduleNames, mods, regions)
let mut modIdx : Nat := 0
let mut const2ModIdx : HashMap Name ModuleIdx := {}
let mut constants : ConstMap := SMap.empty
for mod in s.moduleData do
for cinfo in mod.constants do
const2ModIdx := const2ModIdx.insert cinfo.name modIdx
if constants.contains cinfo.name then throw (IO.userError s!"import failed, environment already contains '{cinfo.name}'")
constants := constants.insert cinfo.name cinfo
modIdx := modIdx + 1
constants := constants.switch
let exts ← mkInitialExtensionStates
let env : Environment := {
const2ModIdx := const2ModIdx,
constants := constants,
extensions := exts,
header := {
quotInit := !imports.isEmpty, -- We assume `core.lean` initializes quotient module
trustLevel := trustLevel,
imports := imports.toArray,
regions := s.regions,
moduleNames := s.moduleNames
}
}
let env ← setImportedEntries env s.moduleData
let env ← finalizePersistentExtensions env opts
pure env
where
importMods : List Import → StateRefT ImportState IO Unit
| [] => pure ()
| i::is => do
if i.runtimeOnly || (← get).moduleNameSet.contains i.module then
importMods is
else do
modify fun s => { s with moduleNameSet := s.moduleNameSet.insert i.module }
let mFile ← findOLean i.module
unless (← IO.fileExists mFile) do
throw $ IO.userError s!"object file '{mFile}' of module {i.module} does not exist"
let (mod, region) ← readModuleData mFile
importMods mod.imports.toList
modify fun s => { s with
moduleData := s.moduleData.push mod
regions := s.regions.push region
moduleNames := s.moduleNames.push i.module
}
importMods is
/--
Create environment object from imports and free compacted regions after calling `act`. No live references to the
environment object or imported objects may exist after `act` finishes. -/
unsafe def withImportModules {α : Type} (imports : List Import) (opts : Options) (trustLevel : UInt32 := 0) (x : Environment → IO α) : IO α := do
let env ← importModules imports opts trustLevel
try x env finally env.freeRegions
builtin_initialize namespacesExt : SimplePersistentEnvExtension Name NameSet ←
registerSimplePersistentEnvExtension {
name := `namespaces,
addImportedFn := fun as => mkStateFromImportedEntries NameSet.insert {} as,
addEntryFn := fun s n => s.insert n
}
namespace Environment
def registerNamespace (env : Environment) (n : Name) : Environment :=
if (namespacesExt.getState env).contains n then env else namespacesExt.addEntry env n
def isNamespace (env : Environment) (n : Name) : Bool :=
(namespacesExt.getState env).contains n
def getNamespaceSet (env : Environment) : NameSet :=
namespacesExt.getState env
private def isNamespaceName : Name → Bool
| Name.str Name.anonymous _ _ => true
| Name.str p _ _ => isNamespaceName p
| _ => false
private def registerNamePrefixes : Environment → Name → Environment
| env, Name.str p _ _ => if isNamespaceName p then registerNamePrefixes (registerNamespace env p) p else env
| env, _ => env
@[export lean_environment_add]
def add (env : Environment) (cinfo : ConstantInfo) : Environment :=
let env := registerNamePrefixes env cinfo.name
env.addAux cinfo
@[export lean_display_stats]
def displayStats (env : Environment) : IO Unit := do
let pExtDescrs ← persistentEnvExtensionsRef.get
let numModules := ((pExtDescrs.get! 0).toEnvExtension.getState env).importedEntries.size;
IO.println ("direct imports: " ++ toString env.header.imports);
IO.println ("number of imported modules: " ++ toString numModules);
IO.println ("number of consts: " ++ toString env.constants.size);
IO.println ("number of imported consts: " ++ toString env.constants.stageSizes.1);
IO.println ("number of local consts: " ++ toString env.constants.stageSizes.2);
IO.println ("number of buckets for imported consts: " ++ toString env.constants.numBuckets);
IO.println ("trust level: " ++ toString env.header.trustLevel);
IO.println ("number of extensions: " ++ toString env.extensions.size);
pExtDescrs.forM $ fun extDescr => do
IO.println ("extension '" ++ toString extDescr.name ++ "'")
let s := extDescr.toEnvExtension.getState env
let fmt := extDescr.statsFn s.state
unless fmt.isNil do IO.println (" " ++ toString (Format.nest 2 (extDescr.statsFn s.state)))
IO.println (" number of imported entries: " ++ toString (s.importedEntries.foldl (fun sum es => sum + es.size) 0))
@[extern "lean_eval_const"]
unsafe constant evalConst (α) (env : @& Environment) (opts : @& Options) (constName : @& Name) : Except String α
private def throwUnexpectedType {α} (typeName : Name) (constName : Name) : ExceptT String Id α :=
throw ("unexpected type at '" ++ toString constName ++ "', `" ++ toString typeName ++ "` expected")
/-- Like `evalConst`, but first check that `constName` indeed is a declaration of type `typeName`.
This function is still unsafe because it cannot guarantee that `typeName` is in fact the name of the type `α`. -/
unsafe def evalConstCheck (α) (env : Environment) (opts : Options) (typeName : Name) (constName : Name) : ExceptT String Id α :=
match env.find? constName with
| none => throw ("unknow constant '" ++ toString constName ++ "'")
| some info =>
match info.type with
| Expr.const c _ _ =>
if c != typeName then throwUnexpectedType typeName constName
else env.evalConst α opts constName
| _ => throwUnexpectedType typeName constName
def hasUnsafe (env : Environment) (e : Expr) : Bool :=
let c? := e.find? $ fun e => match e with
| Expr.const c _ _ =>
match env.find? c with
| some cinfo => cinfo.isUnsafe
| none => false
| _ => false;
c?.isSome
end Environment
namespace Kernel
/- Kernel API -/
/--
Kernel isDefEq predicate. We use it mainly for debugging purposes.
Recall that the Kernel type checker does not support metavariables.
When implementing automation, consider using the `MetaM` methods. -/
@[extern "lean_kernel_is_def_eq"]
constant isDefEq (env : Environment) (lctx : LocalContext) (a b : Expr) : Bool
/--
Kernel WHNF function. We use it mainly for debugging purposes.
Recall that the Kernel type checker does not support metavariables.
When implementing automation, consider using the `MetaM` methods. -/
@[extern "lean_kernel_whnf"]
constant whnf (env : Environment) (lctx : LocalContext) (a : Expr) : Expr
end Kernel
class MonadEnv (m : Type → Type) where
getEnv : m Environment
modifyEnv : (Environment → Environment) → m Unit
export MonadEnv (getEnv modifyEnv)
instance (m n) [MonadEnv m] [MonadLift m n] : MonadEnv n := {
getEnv := liftM (getEnv : m Environment),
modifyEnv := fun f => liftM (modifyEnv f : m Unit)
}
end Lean
|
2426b5814da9ffd064f040065a0cdfb5f8c05996
|
30b012bb72d640ec30c8fdd4c45fdfa67beb012c
|
/data/real/basic.lean
|
1fbfd9ea700f6e102fb97178888a78f575b24b3d
|
[
"Apache-2.0"
] |
permissive
|
kckennylau/mathlib
|
21fb810b701b10d6606d9002a4004f7672262e83
|
47b3477e20ffb5a06588dd3abb01fe0fe3205646
|
refs/heads/master
| 1,634,976,409,281
| 1,542,042,832,000
| 1,542,319,733,000
| 109,560,458
| 0
| 0
|
Apache-2.0
| 1,542,369,208,000
| 1,509,867,494,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 24,457
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
The (classical) real numbers ℝ. This is a direct construction
from Cauchy sequences.
-/
import order.conditionally_complete_lattice data.real.cau_seq_completion
algebra.big_operators algebra.archimedean order.bounds
def real := @cau_seq.completion.Cauchy ℚ _ _ _ abs _
notation `ℝ` := real
namespace real
open cau_seq cau_seq.completion
def of_rat (x : ℚ) : ℝ := of_rat x
def mk (x : cau_seq ℚ abs) : ℝ := cau_seq.completion.mk x
def comm_ring_aux : comm_ring ℝ := cau_seq.completion.comm_ring
instance : comm_ring ℝ := { ..comm_ring_aux }
/- Extra instances to short-circuit type class resolution -/
instance : ring ℝ := by apply_instance
instance : comm_semiring ℝ := by apply_instance
instance : semiring ℝ := by apply_instance
instance : add_comm_group ℝ := by apply_instance
instance : add_group ℝ := by apply_instance
instance : add_comm_monoid ℝ := by apply_instance
instance : add_monoid ℝ := by apply_instance
instance : add_left_cancel_semigroup ℝ := by apply_instance
instance : add_right_cancel_semigroup ℝ := by apply_instance
instance : add_comm_semigroup ℝ := by apply_instance
instance : add_semigroup ℝ := by apply_instance
instance : comm_monoid ℝ := by apply_instance
instance : monoid ℝ := by apply_instance
instance : comm_semigroup ℝ := by apply_instance
instance : semigroup ℝ := by apply_instance
instance : inhabited ℝ := ⟨0⟩
theorem of_rat_sub (x y : ℚ) : of_rat (x - y) = of_rat x - of_rat y :=
congr_arg mk (const_sub _ _)
instance : has_lt ℝ :=
⟨λ x y, quotient.lift_on₂ x y (<) $
λ f₁ g₁ f₂ g₂ hf hg, propext $
⟨λ h, lt_of_eq_of_lt (setoid.symm hf) (lt_of_lt_of_eq h hg),
λ h, lt_of_eq_of_lt hf (lt_of_lt_of_eq h (setoid.symm hg))⟩⟩
@[simp] theorem mk_lt {f g : cau_seq ℚ abs} : mk f < mk g ↔ f < g := iff.rfl
theorem mk_eq {f g : cau_seq ℚ abs} : mk f = mk g ↔ f ≈ g := mk_eq
theorem quotient_mk_eq_mk (f : cau_seq ℚ abs) : ⟦f⟧ = mk f := rfl
theorem mk_eq_mk {f : cau_seq ℚ abs} : cau_seq.completion.mk f = mk f := rfl
@[simp] theorem mk_pos {f : cau_seq ℚ abs} : 0 < mk f ↔ pos f :=
iff_of_eq (congr_arg pos (sub_zero f))
protected def le (x y : ℝ) : Prop := x < y ∨ x = y
instance : has_le ℝ := ⟨real.le⟩
@[simp] theorem mk_le {f g : cau_seq ℚ abs} : mk f ≤ mk g ↔ f ≤ g :=
or_congr iff.rfl quotient.eq
theorem add_lt_add_iff_left {a b : ℝ} (c : ℝ) : c + a < c + b ↔ a < b :=
quotient.induction_on₃ a b c (λ f g h,
iff_of_eq (congr_arg pos $ by rw add_sub_add_left_eq_sub))
instance : linear_order ℝ :=
{ le := (≤), lt := (<),
le_refl := λ a, or.inr rfl,
le_trans := λ a b c, quotient.induction_on₃ a b c $
λ f g h, by simpa [quotient_mk_eq_mk] using le_trans,
lt_iff_le_not_le := λ a b, quotient.induction_on₂ a b $
λ f g, by simpa [quotient_mk_eq_mk] using lt_iff_le_not_le,
le_antisymm := λ a b, quotient.induction_on₂ a b $
λ f g, by simpa [mk_eq, quotient_mk_eq_mk] using @cau_seq.le_antisymm _ _ f g,
le_total := λ a b, quotient.induction_on₂ a b $
λ f g, by simpa [quotient_mk_eq_mk] using le_total f g }
instance : partial_order ℝ := by apply_instance
instance : preorder ℝ := by apply_instance
theorem of_rat_lt {x y : ℚ} : of_rat x < of_rat y ↔ x < y := const_lt
protected theorem zero_lt_one : (0 : ℝ) < 1 := of_rat_lt.2 zero_lt_one
protected theorem mul_pos {a b : ℝ} : 0 < a → 0 < b → 0 < a * b :=
quotient.induction_on₂ a b $ λ f g,
show pos (f - 0) → pos (g - 0) → pos (f * g - 0),
by simpa using cau_seq.mul_pos
instance : linear_ordered_comm_ring ℝ :=
{ add_le_add_left := λ a b h c,
(le_iff_le_iff_lt_iff_lt.2 $ real.add_lt_add_iff_left c).2 h,
zero_ne_one := ne_of_lt real.zero_lt_one,
mul_nonneg := λ a b a0 b0,
match a0, b0 with
| or.inl a0, or.inl b0 := le_of_lt (real.mul_pos a0 b0)
| or.inr a0, _ := by simp [a0.symm]
| _, or.inr b0 := by simp [b0.symm]
end,
mul_pos := @real.mul_pos,
zero_lt_one := real.zero_lt_one,
add_lt_add_left := λ a b h c, (real.add_lt_add_iff_left c).2 h,
..real.comm_ring, ..real.linear_order }
/- Extra instances to short-circuit type class resolution -/
instance : linear_ordered_ring ℝ := by apply_instance
instance : ordered_ring ℝ := by apply_instance
instance : linear_ordered_semiring ℝ := by apply_instance
instance : ordered_semiring ℝ := by apply_instance
instance : ordered_comm_group ℝ := by apply_instance
instance : ordered_cancel_comm_monoid ℝ := by apply_instance
instance : ordered_comm_monoid ℝ := by apply_instance
instance : domain ℝ := by apply_instance
local attribute [instance] classical.prop_decidable
noncomputable instance : discrete_linear_ordered_field ℝ :=
{ decidable_le := by apply_instance,
..real.linear_ordered_comm_ring,
..real.domain,
..cau_seq.completion.discrete_field }
/- Extra instances to short-circuit type class resolution -/
noncomputable instance : linear_ordered_field ℝ := by apply_instance
noncomputable instance : decidable_linear_ordered_comm_ring ℝ := by apply_instance
noncomputable instance : decidable_linear_ordered_semiring ℝ := by apply_instance
noncomputable instance : decidable_linear_ordered_comm_group ℝ := by apply_instance
noncomputable instance discrete_field : discrete_field ℝ := by apply_instance
noncomputable instance : field ℝ := by apply_instance
noncomputable instance : division_ring ℝ := by apply_instance
noncomputable instance : integral_domain ℝ := by apply_instance
noncomputable instance : nonzero_comm_ring ℝ := by apply_instance
noncomputable instance : decidable_linear_order ℝ := by apply_instance
noncomputable instance : lattice.distrib_lattice ℝ := by apply_instance
noncomputable instance : lattice.lattice ℝ := by apply_instance
noncomputable instance : lattice.semilattice_inf ℝ := by apply_instance
noncomputable instance : lattice.semilattice_sup ℝ := by apply_instance
noncomputable instance : lattice.has_inf ℝ := by apply_instance
noncomputable instance : lattice.has_sup ℝ := by apply_instance
lemma le_of_forall_epsilon_le {a b : real} (h : ∀ε, ε > 0 → a ≤ b + ε) : a ≤ b :=
le_of_forall_le_of_dense $ assume x hxb,
calc a ≤ b + (x - b) : h (x-b) $ sub_pos.2 hxb
... = x : by rw [add_comm]; simp
open rat
@[simp] theorem of_rat_eq_cast : ∀ x : ℚ, of_rat x = x :=
eq_cast of_rat rfl of_rat_add of_rat_mul
theorem le_mk_of_forall_le {x : ℝ} {f : cau_seq ℚ abs} :
(∃ i, ∀ j ≥ i, x ≤ f j) → x ≤ mk f :=
quotient.induction_on x $ λ g h, le_of_not_lt $
λ ⟨K, K0, hK⟩,
let ⟨i, H⟩ := exists_forall_ge_and h $
exists_forall_ge_and hK (f.cauchy₃ $ half_pos K0) in
begin
apply not_lt_of_le (H _ (le_refl _)).1,
rw ← of_rat_eq_cast,
refine ⟨_, half_pos K0, i, λ j ij, _⟩,
have := add_le_add (H _ ij).2.1
(le_of_lt (abs_lt.1 $ (H _ (le_refl _)).2.2 _ ij).1),
rwa [← sub_eq_add_neg, sub_self_div_two, sub_apply, sub_add_sub_cancel] at this
end
theorem mk_le_of_forall_le {f : cau_seq ℚ abs} {x : ℝ} :
(∃ i, ∀ j ≥ i, (f j : ℝ) ≤ x) → mk f ≤ x
| ⟨i, H⟩ := by rw [← neg_le_neg_iff, ← mk_eq_mk, mk_neg]; exact
le_mk_of_forall_le ⟨i, λ j ij, by simp [H _ ij]⟩
theorem mk_near_of_forall_near {f : cau_seq ℚ abs} {x : ℝ} {ε : ℝ}
(H : ∃ i, ∀ j ≥ i, abs ((f j : ℝ) - x) ≤ ε) : abs (mk f - x) ≤ ε :=
abs_sub_le_iff.2
⟨sub_le_iff_le_add'.2 $ mk_le_of_forall_le $
H.imp $ λ i h j ij, sub_le_iff_le_add'.1 (abs_sub_le_iff.1 $ h j ij).1,
sub_le.1 $ le_mk_of_forall_le $
H.imp $ λ i h j ij, sub_le.1 (abs_sub_le_iff.1 $ h j ij).2⟩
instance : archimedean ℝ :=
archimedean_iff_rat_le.2 $ λ x, quotient.induction_on x $ λ f,
let ⟨M, M0, H⟩ := f.bounded' 0 in
⟨M, mk_le_of_forall_le ⟨0, λ i _,
rat.cast_le.2 $ le_of_lt (abs_lt.1 (H i)).2⟩⟩
/- mark `real` irreducible in order to prevent `auto_cases` unfolding reals,
since users rarely want to consider real numbers as Cauchy sequences.
Marking `comm_ring_aux` `irreducible` is done to ensure that there are no problems
with non definitionally equal instances, caused by making `real` irreducible-/
attribute [irreducible] real comm_ring_aux
noncomputable instance : floor_ring ℝ := archimedean.floor_ring _
theorem is_cau_seq_iff_lift {f : ℕ → ℚ} : is_cau_seq abs f ↔ is_cau_seq abs (λ i, (f i : ℝ)) :=
⟨λ H ε ε0,
let ⟨δ, δ0, δε⟩ := exists_pos_rat_lt ε0 in
(H _ δ0).imp $ λ i hi j ij, lt_trans
(by simpa using (@rat.cast_lt ℝ _ _ _).2 (hi _ ij)) δε,
λ H ε ε0, (H _ (rat.cast_pos.2 ε0)).imp $
λ i hi j ij, (@rat.cast_lt ℝ _ _ _).1 $ by simpa using hi _ ij⟩
theorem of_near (f : ℕ → ℚ) (x : ℝ)
(h : ∀ ε > 0, ∃ i, ∀ j ≥ i, abs ((f j : ℝ) - x) < ε) :
∃ h', real.mk ⟨f, h'⟩ = x :=
⟨is_cau_seq_iff_lift.2 (of_near _ (const abs x) h),
sub_eq_zero.1 $ abs_eq_zero.1 $
eq_of_le_of_forall_le_of_dense (abs_nonneg _) $ λ ε ε0,
mk_near_of_forall_near $
(h _ ε0).imp (λ i h j ij, le_of_lt (h j ij))⟩
theorem exists_floor (x : ℝ) : ∃ (ub : ℤ), (ub:ℝ) ≤ x ∧
∀ (z : ℤ), (z:ℝ) ≤ x → z ≤ ub :=
int.exists_greatest_of_bdd
(let ⟨n, hn⟩ := exists_int_gt x in ⟨n, λ z h',
int.cast_le.1 $ le_trans h' $ le_of_lt hn⟩)
(let ⟨n, hn⟩ := exists_int_lt x in ⟨n, le_of_lt hn⟩)
theorem exists_sup (S : set ℝ) : (∃ x, x ∈ S) → (∃ x, ∀ y ∈ S, y ≤ x) →
∃ x, ∀ y, x ≤ y ↔ ∀ z ∈ S, z ≤ y
| ⟨L, hL⟩ ⟨U, hU⟩ := begin
choose f hf using begin
refine λ d : ℕ, @int.exists_greatest_of_bdd
(λ n, ∃ y ∈ S, (n:ℝ) ≤ y * d) _ _ _,
{ cases exists_int_gt U with k hk,
refine ⟨k * d, λ z h, _⟩,
rcases h with ⟨y, yS, hy⟩,
refine int.cast_le.1 (le_trans hy _),
simp,
exact mul_le_mul_of_nonneg_right
(le_trans (hU _ yS) (le_of_lt hk)) (nat.cast_nonneg _) },
{ exact ⟨⌊L * d⌋, L, hL, floor_le _⟩ }
end,
have hf₁ : ∀ n > 0, ∃ y ∈ S, ((f n / n:ℚ):ℝ) ≤ y := λ n n0,
let ⟨y, yS, hy⟩ := (hf n).1 in
⟨y, yS, by simpa using (div_le_iff ((nat.cast_pos.2 n0):((_:ℝ) < _))).2 hy⟩,
have hf₂ : ∀ (n > 0) (y ∈ S), (y - (n:ℕ)⁻¹ : ℝ) < (f n / n:ℚ),
{ intros n n0 y yS,
have := lt_of_lt_of_le (sub_one_lt_floor _)
(int.cast_le.2 $ (hf n).2 _ ⟨y, yS, floor_le _⟩),
simp [-sub_eq_add_neg],
rwa [lt_div_iff ((nat.cast_pos.2 n0):((_:ℝ) < _)), sub_mul, _root_.inv_mul_cancel],
exact ne_of_gt (nat.cast_pos.2 n0) },
suffices hg, let g : cau_seq ℚ abs := ⟨λ n, f n / n, hg⟩,
refine ⟨mk g, λ y, ⟨λ h x xS, le_trans _ h, λ h, _⟩⟩,
{ refine le_of_forall_ge_of_dense (λ z xz, _),
cases exists_nat_gt (x - z)⁻¹ with K hK,
refine le_mk_of_forall_le ⟨K, λ n nK, _⟩,
replace xz := sub_pos.2 xz,
replace hK := le_trans (le_of_lt hK) (nat.cast_le.2 nK),
have n0 : 0 < n := nat.cast_pos.1 (lt_of_lt_of_le (inv_pos xz) hK),
refine le_trans _ (le_of_lt $ hf₂ _ n0 _ xS),
rwa [le_sub, inv_le ((nat.cast_pos.2 n0):((_:ℝ) < _)) xz] },
{ exact mk_le_of_forall_le ⟨1, λ n n1,
let ⟨x, xS, hx⟩ := hf₁ _ n1 in le_trans hx (h _ xS)⟩ },
intros ε ε0,
suffices : ∀ j k ≥ nat_ceil ε⁻¹, (f j / j - f k / k : ℚ) < ε,
{ refine ⟨_, λ j ij, abs_lt.2 ⟨_, this _ _ ij (le_refl _)⟩⟩,
rw [neg_lt, neg_sub], exact this _ _ (le_refl _) ij },
intros j k ij ik,
replace ij := le_trans (le_nat_ceil _) (nat.cast_le.2 ij),
replace ik := le_trans (le_nat_ceil _) (nat.cast_le.2 ik),
have j0 := nat.cast_pos.1 (lt_of_lt_of_le (inv_pos ε0) ij),
have k0 := nat.cast_pos.1 (lt_of_lt_of_le (inv_pos ε0) ik),
rcases hf₁ _ j0 with ⟨y, yS, hy⟩,
refine lt_of_lt_of_le ((@rat.cast_lt ℝ _ _ _).1 _)
((inv_le ε0 (nat.cast_pos.2 k0)).1 ik),
simpa using sub_lt_iff_lt_add'.2
(lt_of_le_of_lt hy $ sub_lt_iff_lt_add.1 $ hf₂ _ k0 _ yS)
end
noncomputable def Sup (S : set ℝ) : ℝ :=
if h : (∃ x, x ∈ S) ∧ (∃ x, ∀ y ∈ S, y ≤ x)
then classical.some (exists_sup S h.1 h.2) else 0
theorem Sup_le (S : set ℝ) (h₁ : ∃ x, x ∈ S) (h₂ : ∃ x, ∀ y ∈ S, y ≤ x)
{y} : Sup S ≤ y ↔ ∀ z ∈ S, z ≤ y :=
by simp [Sup, h₁, h₂]; exact
classical.some_spec (exists_sup S h₁ h₂) y
theorem lt_Sup (S : set ℝ) (h₁ : ∃ x, x ∈ S) (h₂ : ∃ x, ∀ y ∈ S, y ≤ x)
{y} : y < Sup S ↔ ∃ z ∈ S, y < z :=
by simpa [not_forall] using not_congr (@Sup_le S h₁ h₂ y)
theorem le_Sup (S : set ℝ) (h₂ : ∃ x, ∀ y ∈ S, y ≤ x) {x} (xS : x ∈ S) : x ≤ Sup S :=
(Sup_le S ⟨_, xS⟩ h₂).1 (le_refl _) _ xS
theorem Sup_le_ub (S : set ℝ) (h₁ : ∃ x, x ∈ S) {ub} (h₂ : ∀ y ∈ S, y ≤ ub) : Sup S ≤ ub :=
(Sup_le S h₁ ⟨_, h₂⟩).2 h₂
protected lemma is_lub_Sup {s : set ℝ} {a b : ℝ} (ha : a ∈ s) (hb : b ∈ upper_bounds s) :
is_lub s (Sup s) :=
⟨λ x xs, real.le_Sup s ⟨_, hb⟩ xs,
λ u h, real.Sup_le_ub _ ⟨_, ha⟩ h⟩
noncomputable def Inf (S : set ℝ) : ℝ := -Sup {x | -x ∈ S}
theorem le_Inf (S : set ℝ) (h₁ : ∃ x, x ∈ S) (h₂ : ∃ x, ∀ y ∈ S, x ≤ y)
{y} : y ≤ Inf S ↔ ∀ z ∈ S, y ≤ z :=
begin
refine le_neg.trans ((Sup_le _ _ _).trans _),
{ cases h₁ with x xS, exact ⟨-x, by simp [xS]⟩ },
{ cases h₂ with ub h, exact ⟨-ub, λ y hy, le_neg.1 $ h _ hy⟩ },
split; intros H z hz,
{ exact neg_le_neg_iff.1 (H _ $ by simp [hz]) },
{ exact le_neg.2 (H _ hz) }
end
theorem Inf_lt (S : set ℝ) (h₁ : ∃ x, x ∈ S) (h₂ : ∃ x, ∀ y ∈ S, x ≤ y)
{y} : Inf S < y ↔ ∃ z ∈ S, z < y :=
by simpa [not_forall] using not_congr (@le_Inf S h₁ h₂ y)
theorem Inf_le (S : set ℝ) (h₂ : ∃ x, ∀ y ∈ S, x ≤ y) {x} (xS : x ∈ S) : Inf S ≤ x :=
(le_Inf S ⟨_, xS⟩ h₂).1 (le_refl _) _ xS
theorem lb_le_Inf (S : set ℝ) (h₁ : ∃ x, x ∈ S) {lb} (h₂ : ∀ y ∈ S, lb ≤ y) : lb ≤ Inf S :=
(le_Inf S h₁ ⟨_, h₂⟩).2 h₂
open lattice
noncomputable instance lattice : lattice ℝ := by apply_instance
noncomputable instance : conditionally_complete_linear_order ℝ :=
{ Sup := real.Sup,
Inf := real.Inf,
le_cSup :=
assume (s : set ℝ) (a : ℝ) (_ : bdd_above s) (_ : a ∈ s),
show a ≤ Sup s,
from le_Sup s ‹bdd_above s› ‹a ∈ s›,
cSup_le :=
assume (s : set ℝ) (a : ℝ) (_ : s ≠ ∅) (H : ∀b∈s, b ≤ a),
show Sup s ≤ a,
from Sup_le_ub s (set.exists_mem_of_ne_empty ‹s ≠ ∅›) H,
cInf_le :=
assume (s : set ℝ) (a : ℝ) (_ : bdd_below s) (_ : a ∈ s),
show Inf s ≤ a,
from Inf_le s ‹bdd_below s› ‹a ∈ s›,
le_cInf :=
assume (s : set ℝ) (a : ℝ) (_ : s ≠ ∅) (H : ∀b∈s, a ≤ b),
show a ≤ Inf s,
from lb_le_Inf s (set.exists_mem_of_ne_empty ‹s ≠ ∅›) H,
..real.linear_order, ..real.lattice}
theorem Sup_empty : lattice.Sup (∅ : set ℝ) = 0 := dif_neg $ by simp
theorem Sup_of_not_bdd_above {s : set ℝ} (hs : ¬ bdd_above s) : lattice.Sup s = 0 :=
dif_neg $ assume h, hs h.2
theorem Inf_empty : lattice.Inf (∅ : set ℝ) = 0 :=
show Inf ∅ = 0, by simp [Inf]; exact Sup_empty
theorem Inf_of_not_bdd_below {s : set ℝ} (hs : ¬ bdd_below s) : lattice.Inf s = 0 :=
have bdd_above {x | -x ∈ s} → bdd_below s, from
assume ⟨b, hb⟩, ⟨-b, assume x hxs, neg_le.2 $ hb _ $ by simp [hxs]⟩,
have ¬ bdd_above {x | -x ∈ s}, from mt this hs,
neg_eq_zero.2 $ Sup_of_not_bdd_above $ this
theorem cau_seq_converges (f : cau_seq ℝ abs) : ∃ x, f ≈ const abs x :=
begin
let S := {x : ℝ | const abs x < f},
have lb : ∃ x, x ∈ S := exists_lt f,
have ub' : ∀ x, f < const abs x → ∀ y ∈ S, y ≤ x :=
λ x h y yS, le_of_lt $ const_lt.1 $ cau_seq.lt_trans yS h,
have ub : ∃ x, ∀ y ∈ S, y ≤ x := (exists_gt f).imp ub',
refine ⟨Sup S,
((lt_total _ _).resolve_left (λ h, _)).resolve_right (λ h, _)⟩,
{ rcases h with ⟨ε, ε0, i, ih⟩,
refine not_lt_of_le (Sup_le_ub S lb (ub' _ _))
((sub_lt_self_iff _).2 (half_pos ε0)),
refine ⟨_, half_pos ε0, i, λ j ij, _⟩,
rw [sub_apply, const_apply, sub_right_comm,
le_sub_iff_add_le, add_halves],
exact ih _ ij },
{ rcases h with ⟨ε, ε0, i, ih⟩,
refine not_lt_of_le (le_Sup S ub _)
((lt_add_iff_pos_left _).2 (half_pos ε0)),
refine ⟨_, half_pos ε0, i, λ j ij, _⟩,
rw [sub_apply, const_apply, add_comm, ← sub_sub,
le_sub_iff_add_le, add_halves],
exact ih _ ij }
end
noncomputable instance : cau_seq.is_complete ℝ abs := ⟨cau_seq_converges⟩
theorem sqrt_exists : ∀ {x : ℝ}, 0 ≤ x → ∃ y, 0 ≤ y ∧ y * y = x :=
suffices H : ∀ {x : ℝ}, 0 < x → x ≤ 1 → ∃ y, 0 < y ∧ y * y = x, begin
intros x x0, cases x0,
cases le_total x 1 with x1 x1,
{ rcases H x0 x1 with ⟨y, y0, hy⟩,
exact ⟨y, le_of_lt y0, hy⟩ },
{ have := (inv_le_inv x0 zero_lt_one).2 x1,
rw inv_one at this,
rcases H (inv_pos x0) this with ⟨y, y0, hy⟩,
refine ⟨y⁻¹, le_of_lt (inv_pos y0), _⟩, rw [← mul_inv', hy, inv_inv'] },
{ exact ⟨0, by simp [x0.symm]⟩ }
end,
λ x x0 x1, begin
let S := {y | 0 < y ∧ y * y ≤ x},
have lb : x ∈ S := ⟨x0, by simpa using (mul_le_mul_right x0).2 x1⟩,
have ub : ∀ y ∈ S, (y:ℝ) ≤ 1,
{ intros y yS, cases yS with y0 yx,
refine (mul_self_le_mul_self_iff (le_of_lt y0) zero_le_one).2 _,
simpa using le_trans yx x1 },
have S0 : 0 < Sup S := lt_of_lt_of_le x0 (le_Sup _ ⟨_, ub⟩ lb),
refine ⟨Sup S, S0, le_antisymm (not_lt.1 $ λ h, _) (not_lt.1 $ λ h, _)⟩,
{ rw [← div_lt_iff S0, lt_Sup S ⟨_, lb⟩ ⟨_, ub⟩] at h,
rcases h with ⟨y, ⟨y0, yx⟩, hy⟩,
rw [div_lt_iff S0, ← div_lt_iff' y0, lt_Sup S ⟨_, lb⟩ ⟨_, ub⟩] at hy,
rcases hy with ⟨z, ⟨z0, zx⟩, hz⟩,
rw [div_lt_iff y0] at hz,
exact not_lt_of_lt
((mul_lt_mul_right y0).1 (lt_of_le_of_lt yx hz))
((mul_lt_mul_left z0).1 (lt_of_le_of_lt zx hz)) },
{ let s := Sup S, let y := s + (x - s * s) / 3,
replace h : 0 < x - s * s := sub_pos.2 h,
have _30 := bit1_pos zero_le_one,
have : s < y := (lt_add_iff_pos_right _).2 (div_pos h _30),
refine not_le_of_lt this (le_Sup S ⟨_, ub⟩ ⟨lt_trans S0 this, _⟩),
rw [add_mul_self_eq, add_assoc, ← le_sub_iff_add_le', ← add_mul,
← le_div_iff (div_pos h _30), div_div_cancel (ne_of_gt h)],
apply add_le_add,
{ simpa using (mul_le_mul_left (@two_pos ℝ _)).2 (Sup_le_ub _ ⟨_, lb⟩ ub) },
{ rw [div_le_one_iff_le _30],
refine le_trans (sub_le_self _ (mul_self_nonneg _)) (le_trans x1 _),
exact (le_add_iff_nonneg_left _).2 (le_of_lt two_pos) } }
end
def sqrt_aux (f : cau_seq ℚ abs) : ℕ → ℚ
| 0 := rat.mk_nat (f 0).num.to_nat.sqrt (f 0).denom.sqrt
| (n + 1) := let s := sqrt_aux n in max 0 $ (s + f (n+1) / s) / 2
theorem sqrt_aux_nonneg (f : cau_seq ℚ abs) : ∀ i : ℕ, 0 ≤ sqrt_aux f i
| 0 := by rw [sqrt_aux, mk_nat_eq, mk_eq_div];
apply div_nonneg'; exact int.cast_nonneg.2 (int.of_nat_nonneg _)
| (n + 1) := le_max_left _ _
/- TODO(Mario): finish the proof
theorem sqrt_aux_converges (f : cau_seq ℚ abs) : ∃ h x, 0 ≤ x ∧ x * x = max 0 (mk f) ∧
mk ⟨sqrt_aux f, h⟩ = x :=
begin
rcases sqrt_exists (le_max_left 0 (mk f)) with ⟨x, x0, hx⟩,
suffices : ∃ h, mk ⟨sqrt_aux f, h⟩ = x,
{ exact this.imp (λ h e, ⟨x, x0, hx, e⟩) },
apply of_near,
suffices : ∃ δ > 0, ∀ i, abs (↑(sqrt_aux f i) - x) < δ / 2 ^ i,
{ rcases this with ⟨δ, δ0, hδ⟩,
intros,
}
end -/
noncomputable def sqrt (x : ℝ) : ℝ :=
classical.some (sqrt_exists (le_max_left 0 x))
/-quotient.lift_on x
(λ f, mk ⟨sqrt_aux f, (sqrt_aux_converges f).fst⟩)
(λ f g e, begin
rcases sqrt_aux_converges f with ⟨hf, x, x0, xf, xs⟩,
rcases sqrt_aux_converges g with ⟨hg, y, y0, yg, ys⟩,
refine xs.trans (eq.trans _ ys.symm),
rw [← @mul_self_inj_of_nonneg ℝ _ x y x0 y0, xf, yg],
congr' 1, exact quotient.sound e
end)-/
theorem sqrt_prop (x : ℝ) : 0 ≤ sqrt x ∧ sqrt x * sqrt x = max 0 x :=
classical.some_spec (sqrt_exists (le_max_left 0 x))
/-quotient.induction_on x $ λ f,
by rcases sqrt_aux_converges f with ⟨hf, _, x0, xf, rfl⟩; exact ⟨x0, xf⟩-/
theorem sqrt_eq_zero_of_nonpos {x : ℝ} (h : x ≤ 0) : sqrt x = 0 :=
eq_zero_of_mul_self_eq_zero $ (sqrt_prop x).2.trans $ max_eq_left h
theorem sqrt_nonneg (x : ℝ) : 0 ≤ sqrt x := (sqrt_prop x).1
@[simp] theorem mul_self_sqrt {x : ℝ} (h : 0 ≤ x) : sqrt x * sqrt x = x :=
(sqrt_prop x).2.trans (max_eq_right h)
@[simp] theorem sqrt_mul_self {x : ℝ} (h : 0 ≤ x) : sqrt (x * x) = x :=
(mul_self_inj_of_nonneg (sqrt_nonneg _) h).1 (mul_self_sqrt (mul_self_nonneg _))
theorem sqrt_eq_iff_mul_self_eq {x y : ℝ} (hx : 0 ≤ x) (hy : 0 ≤ y) :
sqrt x = y ↔ y * y = x :=
⟨λ h, by rw [← h, mul_self_sqrt hx],
λ h, by rw [← h, sqrt_mul_self hy]⟩
@[simp] theorem sqr_sqrt {x : ℝ} (h : 0 ≤ x) : sqrt x ^ 2 = x :=
by rw [pow_two, mul_self_sqrt h]
@[simp] theorem sqrt_sqr {x : ℝ} (h : 0 ≤ x) : sqrt (x ^ 2) = x :=
by rw [pow_two, sqrt_mul_self h]
theorem sqrt_eq_iff_sqr_eq {x y : ℝ} (hx : 0 ≤ x) (hy : 0 ≤ y) :
sqrt x = y ↔ y ^ 2 = x :=
by rw [pow_two, sqrt_eq_iff_mul_self_eq hx hy]
theorem sqrt_mul_self_eq_abs (x : ℝ) : sqrt (x * x) = abs x :=
(le_total 0 x).elim
(λ h, (sqrt_mul_self h).trans (abs_of_nonneg h).symm)
(λ h, by rw [← neg_mul_neg,
sqrt_mul_self (neg_nonneg.2 h), abs_of_nonpos h])
theorem sqrt_sqr_eq_abs (x : ℝ) : sqrt (x ^ 2) = abs x :=
by rw [pow_two, sqrt_mul_self_eq_abs]
@[simp] theorem sqrt_zero : sqrt 0 = 0 :=
by simpa using sqrt_mul_self (le_refl _)
@[simp] theorem sqrt_one : sqrt 1 = 1 :=
by simpa using sqrt_mul_self zero_le_one
@[simp] theorem sqrt_le {x y : ℝ} (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x ≤ sqrt y ↔ x ≤ y :=
by rw [mul_self_le_mul_self_iff (sqrt_nonneg _) (sqrt_nonneg _),
mul_self_sqrt hx, mul_self_sqrt hy]
@[simp] theorem sqrt_lt {x y : ℝ} (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x < sqrt y ↔ x < y :=
lt_iff_lt_of_le_iff_le (sqrt_le hy hx)
@[simp] theorem sqrt_inj {x y : ℝ} (hx : 0 ≤ x) (hy : 0 ≤ y) : sqrt x = sqrt y ↔ x = y :=
by simp [le_antisymm_iff, hx, hy]
@[simp] theorem sqrt_eq_zero {x : ℝ} (h : 0 ≤ x) : sqrt x = 0 ↔ x = 0 :=
by simpa using sqrt_inj h (le_refl _)
theorem sqrt_eq_zero' {x : ℝ} : sqrt x = 0 ↔ x ≤ 0 :=
(le_total x 0).elim
(λ h, by simp [h, sqrt_eq_zero_of_nonpos])
(λ h, by simp [h]; simp [le_antisymm_iff, h])
@[simp] theorem sqrt_pos {x : ℝ} : 0 < sqrt x ↔ 0 < x :=
lt_iff_lt_of_le_iff_le (iff.trans
(by simp [le_antisymm_iff, sqrt_nonneg]) sqrt_eq_zero')
@[simp] theorem sqrt_mul' (x) {y : ℝ} (hy : 0 ≤ y) : sqrt (x * y) = sqrt x * sqrt y :=
begin
cases le_total 0 x with hx hx,
{ refine (mul_self_inj_of_nonneg _ (mul_nonneg _ _)).1 _; try {apply sqrt_nonneg},
rw [mul_self_sqrt (mul_nonneg hx hy), mul_assoc,
mul_left_comm (sqrt y), mul_self_sqrt hy, ← mul_assoc, mul_self_sqrt hx] },
{ rw [sqrt_eq_zero'.2 (mul_nonpos_of_nonpos_of_nonneg hx hy),
sqrt_eq_zero'.2 hx, zero_mul] }
end
@[simp] theorem sqrt_mul {x : ℝ} (hx : 0 ≤ x) (y : ℝ) : sqrt (x * y) = sqrt x * sqrt y :=
by rw [mul_comm, sqrt_mul' _ hx, mul_comm]
@[simp] theorem sqrt_inv (x : ℝ) : sqrt x⁻¹ = (sqrt x)⁻¹ :=
(le_or_lt x 0).elim
(λ h, by simp [sqrt_eq_zero'.2, inv_nonpos, h])
(λ h, by rw [
← mul_self_inj_of_nonneg (sqrt_nonneg _) (le_of_lt $ inv_pos $ sqrt_pos.2 h),
mul_self_sqrt (le_of_lt $ inv_pos h), ← mul_inv', mul_self_sqrt (le_of_lt h)])
@[simp] theorem sqrt_div {x : ℝ} (hx : 0 ≤ x) (y : ℝ) : sqrt (x / y) = sqrt x / sqrt y :=
by rw [division_def, sqrt_mul hx, sqrt_inv]; refl
attribute [irreducible] real.le
end real
|
ef6b42a514ef0c6a3281053e507cd5776109fd5b
|
77c5b91fae1b966ddd1db969ba37b6f0e4901e88
|
/src/data/list/cycle.lean
|
64ccc6baad5d546ad201a81704f0c5e989d848a7
|
[
"Apache-2.0"
] |
permissive
|
dexmagic/mathlib
|
ff48eefc56e2412429b31d4fddd41a976eb287ce
|
7a5d15a955a92a90e1d398b2281916b9c41270b2
|
refs/heads/master
| 1,693,481,322,046
| 1,633,360,193,000
| 1,633,360,193,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 24,222
|
lean
|
/-
Copyright (c) 2021 Yakov Pechersky. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yakov Pechersky
-/
import data.list.rotate
import data.finset.sort
import data.fintype.list
/-!
# Cycles of a list
Lists have an equivalence relation of whether they are rotational permutations of one another.
This relation is defined as `is_rotated`.
Based on this, we define the quotient of lists by the rotation relation, called `cycle`.
We also define a representation of concrete cycles, available when viewing them in a goal state or
via `#eval`, when over representatble types. For example, the cycle `(2 1 4 3)` will be shown
as `c[1, 4, 3, 2]`. The representation of the cycle sorts the elements by the string value of the
underlying element. This representation also supports cycles that can contain duplicates.
-/
namespace list
variables {α : Type*} [decidable_eq α]
/-- Return the `z` such that `x :: z :: _` appears in `xs`, or `default` if there is no such `z`. -/
def next_or : Π (xs : list α) (x default : α), α
| [] x default := default
| [y] x default := default -- Handles the not-found and the wraparound case
| (y :: z :: xs) x default := if x = y then z else next_or (z :: xs) x default
@[simp] lemma next_or_nil (x d : α) : next_or [] x d = d := rfl
@[simp] lemma next_or_singleton (x y d : α) : next_or [y] x d = d := rfl
@[simp] lemma next_or_self_cons_cons (xs : list α) (x y d : α) :
next_or (x :: y :: xs) x d = y :=
if_pos rfl
lemma next_or_cons_of_ne (xs : list α) (y x d : α) (h : x ≠ y) :
next_or (y :: xs) x d = next_or xs x d :=
begin
cases xs with z zs,
{ refl },
{ exact if_neg h }
end
/-- `next_or` does not depend on the default value, if the next value appears. -/
lemma next_or_eq_next_or_of_mem_of_ne (xs : list α) (x d d' : α)
(x_mem : x ∈ xs) (x_ne : x ≠ xs.last (ne_nil_of_mem x_mem)) :
next_or xs x d = next_or xs x d' :=
begin
induction xs with y ys IH,
{ cases x_mem },
cases ys with z zs,
{ simp at x_mem x_ne, contradiction },
by_cases h : x = y,
{ rw [h, next_or_self_cons_cons, next_or_self_cons_cons] },
{ rw [next_or, next_or, IH];
simpa [h] using x_mem }
end
lemma mem_of_next_or_ne {xs : list α} {x d : α} (h : next_or xs x d ≠ d) :
x ∈ xs :=
begin
induction xs with y ys IH,
{ simpa using h },
cases ys with z zs,
{ simpa using h },
{ by_cases hx : x = y,
{ simp [hx] },
{ rw [next_or_cons_of_ne _ _ _ _ hx] at h,
simpa [hx] using IH h } }
end
lemma next_or_concat {xs : list α} {x : α} (d : α) (h : x ∉ xs) :
next_or (xs ++ [x]) x d = d :=
begin
induction xs with z zs IH,
{ simp },
{ obtain ⟨hz, hzs⟩ := not_or_distrib.mp (mt (mem_cons_iff _ _ _).mp h),
rw [cons_append, next_or_cons_of_ne _ _ _ _ hz, IH hzs] }
end
lemma next_or_mem {xs : list α} {x d : α} (hd : d ∈ xs) :
next_or xs x d ∈ xs :=
begin
revert hd,
suffices : ∀ (xs' : list α) (h : ∀ x ∈ xs, x ∈ xs') (hd : d ∈ xs'), next_or xs x d ∈ xs',
{ exact this xs (λ _, id) },
intros xs' hxs' hd,
induction xs with y ys ih,
{ exact hd },
cases ys with z zs,
{ exact hd },
rw next_or,
split_ifs with h,
{ exact hxs' _ (mem_cons_of_mem _ (mem_cons_self _ _)) },
{ exact ih (λ _ h, hxs' _ (mem_cons_of_mem _ h)) },
end
/--
Given an element `x : α` of `l : list α` such that `x ∈ l`, get the next
element of `l`. This works from head to tail, (including a check for last element)
so it will match on first hit, ignoring later duplicates.
For example:
* `next [1, 2, 3] 2 _ = 3`
* `next [1, 2, 3] 3 _ = 1`
* `next [1, 2, 3, 2, 4] 2 _ = 3`
* `next [1, 2, 3, 2] 2 _ = 3`
* `next [1, 1, 2, 3, 2] 1 _ = 1`
-/
def next (l : list α) (x : α) (h : x ∈ l) : α :=
next_or l x (l.nth_le 0 (length_pos_of_mem h))
/--
Given an element `x : α` of `l : list α` such that `x ∈ l`, get the previous
element of `l`. This works from head to tail, (including a check for last element)
so it will match on first hit, ignoring later duplicates.
* `prev [1, 2, 3] 2 _ = 1`
* `prev [1, 2, 3] 1 _ = 3`
* `prev [1, 2, 3, 2, 4] 2 _ = 1`
* `prev [1, 2, 3, 4, 2] 2 _ = 1`
* `prev [1, 1, 2] 1 _ = 2`
-/
def prev : Π (l : list α) (x : α) (h : x ∈ l), α
| [] _ h := by simpa using h
| [y] _ _ := y
| (y :: z :: xs) x h := if hx : x = y then (last (z :: xs) (cons_ne_nil _ _)) else
if x = z then y else prev (z :: xs) x (by simpa [hx] using h)
variables (l : list α) (x : α) (h : x ∈ l)
@[simp] lemma next_singleton (x y : α) (h : x ∈ [y]) :
next [y] x h = y := rfl
@[simp] lemma prev_singleton (x y : α) (h : x ∈ [y]) :
prev [y] x h = y := rfl
lemma next_cons_cons_eq' (y z : α) (h : x ∈ (y :: z :: l)) (hx : x = y) :
next (y :: z :: l) x h = z :=
by rw [next, next_or, if_pos hx]
@[simp] lemma next_cons_cons_eq (z : α) (h : x ∈ (x :: z :: l)) :
next (x :: z :: l) x h = z :=
next_cons_cons_eq' l x x z h rfl
lemma next_ne_head_ne_last (y : α) (h : x ∈ (y :: l)) (hy : x ≠ y)
(hx : x ≠ last (y :: l) (cons_ne_nil _ _)) :
next (y :: l) x h = next l x (by simpa [hy] using h) :=
begin
rw [next, next, next_or_cons_of_ne _ _ _ _ hy, next_or_eq_next_or_of_mem_of_ne],
{ rwa last_cons at hx },
{ simpa [hy] using h }
end
lemma next_cons_concat (y : α) (hy : x ≠ y) (hx : x ∉ l)
(h : x ∈ y :: l ++ [x] := mem_append_right _ (mem_singleton_self x)) :
next (y :: l ++ [x]) x h = y :=
begin
rw [next, next_or_concat],
{ refl },
{ simp [hy, hx] }
end
lemma next_last_cons (y : α) (h : x ∈ (y :: l)) (hy : x ≠ y)
(hx : x = last (y :: l) (cons_ne_nil _ _)) (hl : nodup l) :
next (y :: l) x h = y :=
begin
rw [next, nth_le, ←init_append_last (cons_ne_nil y l), hx, next_or_concat],
subst hx,
intro H,
obtain ⟨_ | k, hk, hk'⟩ := nth_le_of_mem H,
{ simpa [init_eq_take, nth_le_take', hy.symm] using hk' },
suffices : k.succ = l.length,
{ simpa [this] using hk },
cases l with hd tl,
{ simpa using hk },
{ rw nodup_iff_nth_le_inj at hl,
rw [length, nat.succ_inj'],
apply hl,
simpa [init_eq_take, nth_le_take', last_eq_nth_le] using hk' }
end
lemma prev_last_cons' (y : α) (h : x ∈ (y :: l)) (hx : x = y) :
prev (y :: l) x h = last (y :: l) (cons_ne_nil _ _) :=
begin
cases l;
simp [prev, hx]
end
@[simp] lemma prev_last_cons (h : x ∈ (x :: l)) :
prev (x :: l) x h = last (x :: l) (cons_ne_nil _ _) :=
prev_last_cons' l x x h rfl
lemma prev_cons_cons_eq' (y z : α) (h : x ∈ (y :: z :: l)) (hx : x = y) :
prev (y :: z :: l) x h = last (z :: l) (cons_ne_nil _ _) :=
by rw [prev, dif_pos hx]
@[simp] lemma prev_cons_cons_eq (z : α) (h : x ∈ (x :: z :: l)) :
prev (x :: z :: l) x h = last (z :: l) (cons_ne_nil _ _) :=
prev_cons_cons_eq' l x x z h rfl
lemma prev_cons_cons_of_ne' (y z : α) (h : x ∈ (y :: z :: l)) (hy : x ≠ y) (hz : x = z) :
prev (y :: z :: l) x h = y :=
begin
cases l,
{ simp [prev, hy, hz] },
{ rw [prev, dif_neg hy, if_pos hz] }
end
lemma prev_cons_cons_of_ne (y : α) (h : x ∈ (y :: x :: l)) (hy : x ≠ y) :
prev (y :: x :: l) x h = y :=
prev_cons_cons_of_ne' _ _ _ _ _ hy rfl
lemma prev_ne_cons_cons (y z : α) (h : x ∈ (y :: z :: l)) (hy : x ≠ y) (hz : x ≠ z) :
prev (y :: z :: l) x h = prev (z :: l) x (by simpa [hy] using h) :=
begin
cases l,
{ simpa [hy, hz] using h },
{ rw [prev, dif_neg hy, if_neg hz] }
end
include h
lemma next_mem : l.next x h ∈ l :=
next_or_mem (nth_le_mem _ _ _)
lemma prev_mem : l.prev x h ∈ l :=
begin
cases l with hd tl,
{ simpa using h },
induction tl with hd' tl hl generalizing hd,
{ simp },
{ by_cases hx : x = hd,
{ simp only [hx, prev_cons_cons_eq],
exact mem_cons_of_mem _ (last_mem _) },
{ rw [prev, dif_neg hx],
split_ifs with hm,
{ exact mem_cons_self _ _ },
{ exact mem_cons_of_mem _ (hl _ _) } } }
end
lemma next_nth_le (l : list α) (h : nodup l) (n : ℕ) (hn : n < l.length) :
next l (l.nth_le n hn) (nth_le_mem _ _ _) = l.nth_le ((n + 1) % l.length)
(nat.mod_lt _ (n.zero_le.trans_lt hn)) :=
begin
cases l with x l,
{ simpa using hn },
induction l with y l hl generalizing x n,
{ simp },
{ cases n,
{ simp },
{ have hn' : n.succ ≤ l.length.succ,
{ refine nat.succ_le_of_lt _,
simpa [nat.succ_lt_succ_iff] using hn },
have hx': (x :: y :: l).nth_le n.succ hn ≠ x,
{ intro H,
suffices : n.succ = 0,
{ simpa },
rw nodup_iff_nth_le_inj at h,
refine h _ _ hn nat.succ_pos' _,
simpa using H },
rcases hn'.eq_or_lt with hn''|hn'',
{ rw [next_last_cons],
{ simp [hn''] },
{ exact hx' },
{ simp [last_eq_nth_le, hn''] },
{ exact nodup_of_nodup_cons h } },
{ have : n < l.length := by simpa [nat.succ_lt_succ_iff] using hn'' ,
rw [next_ne_head_ne_last _ _ _ _ hx'],
{ simp [nat.mod_eq_of_lt (nat.succ_lt_succ (nat.succ_lt_succ this)),
hl _ _ (nodup_of_nodup_cons h), nat.mod_eq_of_lt (nat.succ_lt_succ this)] },
{ rw last_eq_nth_le,
intro H,
suffices : n.succ = l.length.succ,
{ exact absurd hn'' this.ge.not_lt },
rw nodup_iff_nth_le_inj at h,
refine h _ _ hn _ _,
{ simp },
{ simpa using H } } } } }
end
lemma prev_nth_le (l : list α) (h : nodup l) (n : ℕ) (hn : n < l.length) :
prev l (l.nth_le n hn) (nth_le_mem _ _ _) = l.nth_le ((n + (l.length - 1)) % l.length)
(nat.mod_lt _ (n.zero_le.trans_lt hn)) :=
begin
cases l with x l,
{ simpa using hn },
induction l with y l hl generalizing n x,
{ simp },
{ rcases n with _|_|n,
{ simpa [last_eq_nth_le, nat.mod_eq_of_lt (nat.succ_lt_succ l.length.lt_succ_self)] },
{ simp only [mem_cons_iff, nodup_cons] at h,
push_neg at h,
simp [add_comm, prev_cons_cons_of_ne, h.left.left.symm] },
{ rw [prev_ne_cons_cons],
{ convert hl _ _ (nodup_of_nodup_cons h) _ using 1,
have : ∀ k hk, (y :: l).nth_le k hk = (x :: y :: l).nth_le (k + 1) (nat.succ_lt_succ hk),
{ intros,
simpa },
rw [this],
congr,
simp only [nat.add_succ_sub_one, add_zero, length],
simp only [length, nat.succ_lt_succ_iff] at hn,
set k := l.length,
rw [nat.succ_add, ←nat.add_succ, nat.add_mod_right, nat.succ_add, ←nat.add_succ _ k,
nat.add_mod_right, nat.mod_eq_of_lt, nat.mod_eq_of_lt],
{ exact nat.lt_succ_of_lt hn },
{ exact nat.succ_lt_succ (nat.lt_succ_of_lt hn) } },
{ intro H,
suffices : n.succ.succ = 0,
{ simpa },
rw nodup_iff_nth_le_inj at h,
refine h _ _ hn nat.succ_pos' _,
simpa using H },
{ intro H,
suffices : n.succ.succ = 1,
{ simpa },
rw nodup_iff_nth_le_inj at h,
refine h _ _ hn (nat.succ_lt_succ nat.succ_pos') _,
simpa using H } } }
end
lemma pmap_next_eq_rotate_one (h : nodup l) :
l.pmap l.next (λ _ h, h) = l.rotate 1 :=
begin
apply list.ext_le,
{ simp },
{ intros,
rw [nth_le_pmap, nth_le_rotate, next_nth_le _ h] }
end
lemma pmap_prev_eq_rotate_length_sub_one (h : nodup l) :
l.pmap l.prev (λ _ h, h) = l.rotate (l.length - 1) :=
begin
apply list.ext_le,
{ simp },
{ intros n hn hn',
rw [nth_le_rotate, nth_le_pmap, prev_nth_le _ h] }
end
lemma prev_next (l : list α) (h : nodup l) (x : α) (hx : x ∈ l) :
prev l (next l x hx) (next_mem _ _ _) = x :=
begin
obtain ⟨n, hn, rfl⟩ := nth_le_of_mem hx,
simp only [next_nth_le, prev_nth_le, h, nat.mod_add_mod],
cases l with hd tl,
{ simp },
{ have : n < 1 + tl.length := by simpa [add_comm] using hn,
simp [add_left_comm, add_comm, add_assoc, nat.mod_eq_of_lt this] }
end
lemma next_prev (l : list α) (h : nodup l) (x : α) (hx : x ∈ l) :
next l (prev l x hx) (prev_mem _ _ _) = x :=
begin
obtain ⟨n, hn, rfl⟩ := nth_le_of_mem hx,
simp only [next_nth_le, prev_nth_le, h, nat.mod_add_mod],
cases l with hd tl,
{ simp },
{ have : n < 1 + tl.length := by simpa [add_comm] using hn,
simp [add_left_comm, add_comm, add_assoc, nat.mod_eq_of_lt this] }
end
lemma prev_reverse_eq_next (l : list α) (h : nodup l) (x : α) (hx : x ∈ l) :
prev l.reverse x (mem_reverse.mpr hx) = next l x hx :=
begin
obtain ⟨k, hk, rfl⟩ := nth_le_of_mem hx,
have lpos : 0 < l.length := k.zero_le.trans_lt hk,
have key : l.length - 1 - k < l.length :=
(nat.sub_le _ _).trans_lt (nat.sub_lt_self lpos nat.succ_pos'),
rw ←nth_le_pmap l.next (λ _ h, h) (by simpa using hk),
simp_rw [←nth_le_reverse l k (key.trans_le (by simp)), pmap_next_eq_rotate_one _ h],
rw ←nth_le_pmap l.reverse.prev (λ _ h, h),
{ simp_rw [pmap_prev_eq_rotate_length_sub_one _ (nodup_reverse.mpr h), rotate_reverse,
length_reverse, nat.mod_eq_of_lt (nat.sub_lt_self lpos nat.succ_pos'),
nat.sub_sub_self (nat.succ_le_of_lt lpos)],
rw ←nth_le_reverse,
{ simp [nat.sub_sub_self (nat.le_pred_of_lt hk)] },
{ simpa using (nat.sub_le _ _).trans_lt (nat.sub_lt_self lpos nat.succ_pos') } },
{ simpa using (nat.sub_le _ _).trans_lt (nat.sub_lt_self lpos nat.succ_pos') }
end
lemma next_reverse_eq_prev (l : list α) (h : nodup l) (x : α) (hx : x ∈ l) :
next l.reverse x (mem_reverse.mpr hx) = prev l x hx :=
begin
convert (prev_reverse_eq_next l.reverse (nodup_reverse.mpr h) x (mem_reverse.mpr hx)).symm,
exact (reverse_reverse l).symm
end
lemma is_rotated_next_eq {l l' : list α} (h : l ~r l') (hn : nodup l) {x : α} (hx : x ∈ l) :
l.next x hx = l'.next x (h.mem_iff.mp hx) :=
begin
obtain ⟨k, hk, rfl⟩ := nth_le_of_mem hx,
obtain ⟨n, rfl⟩ := id h,
rw [next_nth_le _ hn],
simp_rw ←nth_le_rotate' _ n k,
rw [next_nth_le _ (h.nodup_iff.mp hn), ←nth_le_rotate' _ n],
simp [add_assoc]
end
lemma is_rotated_prev_eq {l l' : list α} (h : l ~r l') (hn : nodup l) {x : α} (hx : x ∈ l) :
l.prev x hx = l'.prev x (h.mem_iff.mp hx) :=
begin
rw [←next_reverse_eq_prev _ hn, ←next_reverse_eq_prev _ (h.nodup_iff.mp hn)],
exact is_rotated_next_eq h.reverse (nodup_reverse.mpr hn) _
end
end list
open list
/--
`cycle α` is the quotient of `list α` by cyclic permutation.
Duplicates are allowed.
-/
def cycle (α : Type*) : Type* := quotient (is_rotated.setoid α)
namespace cycle
variables {α : Type*}
instance : has_coe (list α) (cycle α) := ⟨quot.mk _⟩
@[simp] lemma coe_eq_coe {l₁ l₂ : list α} : (l₁ : cycle α) = l₂ ↔ (l₁ ~r l₂) :=
@quotient.eq _ (is_rotated.setoid _) _ _
@[simp] lemma mk_eq_coe (l : list α) :
quot.mk _ l = (l : cycle α) := rfl
@[simp] lemma mk'_eq_coe (l : list α) :
quotient.mk' l = (l : cycle α) := rfl
instance : inhabited (cycle α) := ⟨(([] : list α) : cycle α)⟩
/--
For `x : α`, `s : cycle α`, `x ∈ s` indicates that `x` occurs at least once in `s`.
-/
def mem (a : α) (s : cycle α) : Prop :=
quot.lift_on s (λ l, a ∈ l) (λ l₁ l₂ (e : l₁ ~r l₂), propext $ e.mem_iff)
instance : has_mem α (cycle α) := ⟨mem⟩
@[simp] lemma mem_coe_iff {a : α} {l : list α} :
a ∈ (l : cycle α) ↔ a ∈ l := iff.rfl
instance [decidable_eq α] : decidable_eq (cycle α) :=
λ s₁ s₂, quotient.rec_on_subsingleton₂' s₁ s₂ (λ l₁ l₂,
decidable_of_iff' _ quotient.eq')
instance [decidable_eq α] (x : α) (s : cycle α) : decidable (x ∈ s) :=
quotient.rec_on_subsingleton' s (λ l, list.decidable_mem x l)
/--
Reverse a `s : cycle α` by reversing the underlying `list`.
-/
def reverse (s : cycle α) : cycle α :=
quot.map reverse (λ l₁ l₂ (e : l₁ ~r l₂), e.reverse) s
@[simp] lemma reverse_coe (l : list α) :
(l : cycle α).reverse = l.reverse := rfl
@[simp] lemma mem_reverse_iff {a : α} {s : cycle α} :
a ∈ s.reverse ↔ a ∈ s :=
quot.induction_on s (λ _, mem_reverse)
@[simp] lemma reverse_reverse (s : cycle α) :
s.reverse.reverse = s :=
quot.induction_on s (λ _, by simp)
/--
The length of the `s : cycle α`, which is the number of elements, counting duplicates.
-/
def length (s : cycle α) : ℕ :=
quot.lift_on s length (λ l₁ l₂ (e : l₁ ~r l₂), e.perm.length_eq)
@[simp] lemma length_coe (l : list α) :
length (l : cycle α) = l.length := rfl
@[simp] lemma length_reverse (s : cycle α) :
s.reverse.length = s.length :=
quot.induction_on s length_reverse
/--
A `s : cycle α` that is at most one element.
-/
def subsingleton (s : cycle α) : Prop :=
s.length ≤ 1
lemma length_subsingleton_iff {s : cycle α} :
subsingleton s ↔ length s ≤ 1 := iff.rfl
@[simp] lemma subsingleton_reverse_iff {s : cycle α} :
s.reverse.subsingleton ↔ s.subsingleton :=
by simp [length_subsingleton_iff]
lemma subsingleton.congr {s : cycle α} (h : subsingleton s) :
∀ ⦃x⦄ (hx : x ∈ s) ⦃y⦄ (hy : y ∈ s), x = y :=
begin
induction s using quot.induction_on with l,
simp only [length_subsingleton_iff, length_coe, mk_eq_coe, le_iff_lt_or_eq, nat.lt_add_one_iff,
length_eq_zero, length_eq_one, nat.not_lt_zero, false_or] at h,
rcases h with rfl|⟨z, rfl⟩;
simp
end
/--
A `s : cycle α` that is made up of at least two unique elements.
-/
def nontrivial (s : cycle α) : Prop := ∃ (x y : α) (h : x ≠ y), x ∈ s ∧ y ∈ s
@[simp] lemma nontrivial_coe_nodup_iff {l : list α} (hl : l.nodup) :
nontrivial (l : cycle α) ↔ 2 ≤ l.length :=
begin
rw nontrivial,
rcases l with (_ | ⟨hd, _ | ⟨hd', tl⟩⟩),
{ simp },
{ simp },
{ simp only [mem_cons_iff, exists_prop, mem_coe_iff, list.length, ne.def, nat.succ_le_succ_iff,
zero_le, iff_true],
refine ⟨hd, hd', _, by simp⟩,
simp only [not_or_distrib, mem_cons_iff, nodup_cons] at hl,
exact hl.left.left }
end
@[simp] lemma nontrivial_reverse_iff {s : cycle α} :
s.reverse.nontrivial ↔ s.nontrivial :=
by simp [nontrivial]
lemma length_nontrivial {s : cycle α} (h : nontrivial s) :
2 ≤ length s :=
begin
obtain ⟨x, y, hxy, hx, hy⟩ := h,
induction s using quot.induction_on with l,
rcases l with (_ | ⟨hd, _ | ⟨hd', tl⟩⟩),
{ simpa using hx },
{ simp only [mem_coe_iff, mk_eq_coe, mem_singleton] at hx hy,
simpa [hx, hy] using hxy },
{ simp [bit0] }
end
/--
The `s : cycle α` contains no duplicates.
-/
def nodup (s : cycle α) : Prop :=
quot.lift_on s nodup (λ l₁ l₂ (e : l₁ ~r l₂), propext $ e.nodup_iff)
@[simp] lemma nodup_coe_iff {l : list α} :
nodup (l : cycle α) ↔ l.nodup := iff.rfl
@[simp] lemma nodup_reverse_iff {s : cycle α} :
s.reverse.nodup ↔ s.nodup :=
quot.induction_on s (λ _, nodup_reverse)
lemma subsingleton.nodup {s : cycle α} (h : subsingleton s) :
nodup s :=
begin
induction s using quot.induction_on with l,
cases l with hd tl,
{ simp },
{ have : tl = [] := by simpa [subsingleton, length_eq_zero] using h,
simp [this] }
end
lemma nodup.nontrivial_iff {s : cycle α} (h : nodup s) :
nontrivial s ↔ ¬ subsingleton s :=
begin
rw length_subsingleton_iff,
induction s using quotient.induction_on',
simp only [mk'_eq_coe, nodup_coe_iff] at h,
simp [h, nat.succ_le_iff]
end
/--
The `s : cycle α` as a `multiset α`.
-/
def to_multiset (s : cycle α) : multiset α :=
quotient.lift_on' s (λ l, (l : multiset α)) (λ l₁ l₂ (h : l₁ ~r l₂), multiset.coe_eq_coe.mpr h.perm)
/--
The lift of `list.map`.
-/
def map {β : Type*} (f : α → β) : cycle α → cycle β :=
quotient.map' (list.map f) $ λ l₁ l₂ h, h.map _
/--
The `multiset` of lists that can make the cycle.
-/
def lists (s : cycle α) : multiset (list α) :=
quotient.lift_on' s
(λ l, (l.cyclic_permutations : multiset (list α))) $
λ l₁ l₂ (h : l₁ ~r l₂), by simpa using h.cyclic_permutations.perm
@[simp] lemma mem_lists_iff_coe_eq {s : cycle α} {l : list α} :
l ∈ s.lists ↔ (l : cycle α) = s :=
begin
induction s using quotient.induction_on',
rw [lists, quotient.lift_on'_mk'],
simp
end
section decidable
variable [decidable_eq α]
/--
Auxiliary decidability algorithm for lists that contain at least two unique elements.
-/
def decidable_nontrivial_coe : Π (l : list α), decidable (nontrivial (l : cycle α))
| [] := is_false (by simp [nontrivial])
| [x] := is_false (by simp [nontrivial])
| (x :: y :: l) := if h : x = y
then @decidable_of_iff' _ (nontrivial ((x :: l) : cycle α))
(by simp [h, nontrivial])
(decidable_nontrivial_coe (x :: l))
else is_true ⟨x, y, h, by simp, by simp⟩
instance {s : cycle α} : decidable (nontrivial s) :=
quot.rec_on_subsingleton s decidable_nontrivial_coe
instance {s : cycle α} : decidable (nodup s) :=
quot.rec_on_subsingleton s (λ (l : list α), list.nodup_decidable l)
instance fintype_nodup_cycle [fintype α] : fintype {s : cycle α // s.nodup} :=
fintype.of_surjective (λ (l : {l : list α // l.nodup}), ⟨l.val, by simpa using l.prop⟩) (λ ⟨s, hs⟩,
begin
induction s using quotient.induction_on',
exact ⟨⟨s, hs⟩, by simp⟩
end)
instance fintype_nodup_nontrivial_cycle [fintype α] :
fintype {s : cycle α // s.nodup ∧ s.nontrivial} :=
fintype.subtype (((finset.univ : finset {s : cycle α // s.nodup}).map
(function.embedding.subtype _)).filter cycle.nontrivial)
(by simp)
/--
The `s : cycle α` as a `finset α`.
-/
def to_finset (s : cycle α) : finset α :=
s.to_multiset.to_finset
/-- Given a `s : cycle α` such that `nodup s`, retrieve the next element after `x ∈ s`. -/
def next : Π (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s), α :=
λ s, quot.hrec_on s (λ l hn x hx, next l x hx)
(λ l₁ l₂ (h : l₁ ~r l₂),
function.hfunext (propext h.nodup_iff) (λ h₁ h₂ he, function.hfunext rfl
(λ x y hxy, function.hfunext (propext (by simpa [eq_of_heq hxy] using h.mem_iff))
(λ hm hm' he', heq_of_eq (by simpa [eq_of_heq hxy] using is_rotated_next_eq h h₁ _)))))
/-- Given a `s : cycle α` such that `nodup s`, retrieve the previous element before `x ∈ s`. -/
def prev : Π (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s), α :=
λ s, quot.hrec_on s (λ l hn x hx, prev l x hx)
(λ l₁ l₂ (h : l₁ ~r l₂),
function.hfunext (propext h.nodup_iff) (λ h₁ h₂ he, function.hfunext rfl
(λ x y hxy, function.hfunext (propext (by simpa [eq_of_heq hxy] using h.mem_iff))
(λ hm hm' he', heq_of_eq (by simpa [eq_of_heq hxy] using is_rotated_prev_eq h h₁ _)))))
@[simp] lemma prev_reverse_eq_next (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.reverse.prev (nodup_reverse_iff.mpr hs) x (mem_reverse_iff.mpr hx) = s.next hs x hx :=
(quotient.induction_on' s prev_reverse_eq_next) hs x hx
@[simp] lemma next_reverse_eq_prev (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.reverse.next (nodup_reverse_iff.mpr hs) x (mem_reverse_iff.mpr hx) = s.prev hs x hx :=
by simp [←prev_reverse_eq_next]
@[simp] lemma next_mem (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.next hs x hx ∈ s :=
begin
induction s using quot.induction_on,
exact next_mem _ _ _
end
lemma prev_mem (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.prev hs x hx ∈ s :=
by { rw [←next_reverse_eq_prev, ←mem_reverse_iff], exact next_mem _ _ _ _ }
@[simp] lemma prev_next (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.prev hs (s.next hs x hx) (next_mem s hs x hx) = x :=
(quotient.induction_on' s prev_next) hs x hx
@[simp] lemma next_prev (s : cycle α) (hs : nodup s) (x : α) (hx : x ∈ s) :
s.next hs (s.prev hs x hx) (prev_mem s hs x hx) = x :=
(quotient.induction_on' s next_prev) hs x hx
end decidable
/--
We define a representation of concrete cycles, available when viewing them in a goal state or
via `#eval`, when over representatble types. For example, the cycle `(2 1 4 3)` will be shown
as `c[1, 4, 3, 2]`. The representation of the cycle sorts the elements by the string value of the
underlying element. This representation also supports cycles that can contain duplicates.
-/
instance [has_repr α] : has_repr (cycle α) :=
⟨λ s, "c[" ++ string.intercalate ", " ((s.map repr).lists.sort (≤)).head ++ "]"⟩
end cycle
|
f82b25264a5ac148dcaf4d1ec182895c5ed23f34
|
a9d0fb7b0e4f802bd3857b803e6c5c23d87fef91
|
/library/data/set/classical_inverse.lean
|
292f15ab67dd043ba6df59e51d63cc5b21dcc97d
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean-osx
|
4a954262c780e404c1369d6c06516161d07fcb40
|
3670278342d2f4faa49d95b46d86642d7875b47c
|
refs/heads/master
| 1,611,410,334,552
| 1,474,425,686,000
| 1,474,425,686,000
| 12,043,103
| 5
| 1
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,111
|
lean
|
/-
Copyright (c) 2014 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Jeremy Avigad, Andrew Zipperer
Using classical logic, defines an inverse function.
-/
import .function .map
open eq.ops classical
namespace set
variables {X Y : Type}
noncomputable definition inv_fun (f : X → Y) (a : set X) (dflt : X) (y : Y) : X :=
if H : ∃₀ x ∈ a, f x = y then some H else dflt
theorem inv_fun_pos {f : X → Y} {a : set X} {dflt : X} {y : Y}
(H : ∃₀ x ∈ a, f x = y) : (inv_fun f a dflt y ∈ a) ∧ (f (inv_fun f a dflt y) = y) :=
have H1 : inv_fun f a dflt y = some H, from dif_pos H,
H1⁻¹ ▸ some_spec H
theorem inv_fun_neg {f : X → Y} {a : set X} {dflt : X} {y : Y}
(H : ¬ ∃₀ x ∈ a, f x = y) : inv_fun f a dflt y = dflt :=
dif_neg H
variables {f : X → Y} {a : set X} {b : set Y}
theorem maps_to_inv_fun {dflt : X} (dflta : dflt ∈ a) :
maps_to (inv_fun f a dflt) b a :=
let f' := inv_fun f a dflt in
take y,
assume yb : y ∈ b,
show f' y ∈ a, from
by_cases
(assume H : ∃₀ x ∈ a, f x = y,
and.left (inv_fun_pos H))
(assume H : ¬ ∃₀ x ∈ a, f x = y,
(inv_fun_neg H)⁻¹ ▸ dflta)
theorem left_inv_on_inv_fun_of_inj_on (dflt : X) (H : inj_on f a) :
left_inv_on (inv_fun f a dflt) f a :=
let f' := inv_fun f a dflt in
take x,
assume xa : x ∈ a,
have H1 : ∃₀ x' ∈ a, f x' = f x, from exists.intro x (and.intro xa rfl),
have H2 : f' (f x) ∈ a ∧ f (f' (f x)) = f x, from inv_fun_pos H1,
show f' (f x) = x, from H (and.left H2) xa (and.right H2)
theorem surj_on_inv_fun_of_inj_on (dflt : X) (mapsto : maps_to f a b) (H : inj_on f a) :
surj_on (inv_fun f a dflt) b a :=
surj_on_of_right_inv_on mapsto (left_inv_on_inv_fun_of_inj_on dflt H)
theorem right_inv_on_inv_fun_of_surj_on (dflt : X) (H : surj_on f a b) :
right_inv_on (inv_fun f a dflt) f b :=
let f' := inv_fun f a dflt in
take y,
assume yb: y ∈ b,
obtain x (Hx : x ∈ a ∧ f x = y), from H yb,
have Hy : f' y ∈ a ∧ f (f' y) = y, from inv_fun_pos (exists.intro x Hx),
and.right Hy
theorem inj_on_inv_fun (dflt : X) (H : surj_on f a b) :
inj_on (inv_fun f a dflt) b :=
inj_on_of_left_inv_on (right_inv_on_inv_fun_of_surj_on dflt H)
end set
open set
namespace map
variables {X Y : Type} {a : set X} {b : set Y}
protected noncomputable definition inverse (f : map a b) {dflt : X} (dflta : dflt ∈ a) :=
map.mk (inv_fun f a dflt) (@maps_to_inv_fun _ _ _ _ b _ dflta)
theorem left_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.injective f) :
map.left_inverse (map.inverse f dflta) f :=
left_inv_on_inv_fun_of_inj_on dflt H
theorem right_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.surjective f) :
map.right_inverse (map.inverse f dflta) f :=
right_inv_on_inv_fun_of_surj_on dflt H
theorem is_inverse_inverse {f : map a b} {dflt : X} (dflta : dflt ∈ a) (H : map.bijective f) :
map.is_inverse (map.inverse f dflta) f :=
and.intro
(left_inverse_inverse dflta (and.left H))
(right_inverse_inverse dflta (and.right H))
end map
|
66c13762f56c16f8d4b9c8ec80f94907027df2eb
|
952248371e69ccae722eb20bfe6815d8641554a8
|
/test/multiplicative.lean
|
0147e7e3d8547a0693fe36e62fb2711c187fa7cb
|
[] |
no_license
|
robertylewis/lean_polya
|
5fd079031bf7114449d58d68ccd8c3bed9bcbc97
|
1da14d60a55ad6cd8af8017b1b64990fccb66ab7
|
refs/heads/master
| 1,647,212,226,179
| 1,558,108,354,000
| 1,558,108,354,000
| 89,933,264
| 1
| 2
| null | 1,560,964,118,000
| 1,493,650,551,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 7,898
|
lean
|
import control
open polya tactic
variables x y z u v w r : ℚ
#exit
example (h1 : u > 0) (h2 : u < 1*v) (h3 : z > 0) (h4 : 1*z + 1*1 < 1*w) (h5 : rat.pow (1*u + 1*v + 1*z) 3 ≥ 1* rat.pow (1*u + 1*v + 1*w + 1*1) 5) : false :=
by polya h1 h2 h3 h4 h5
example (h1 : u > 1*1) (h2 : u < 1*v) (h3 : rat.pow u 15 > 1*rat.pow v 17) : false :=
by polya h1 h2 h3
-- reconstruction fails
/-example (h1 : u > 1*1) (h2 : u < 1*v) (h3 : rat.pow u 15 > 1*rat.pow v 19007) : false :=
by polya h1 h2 h3
-/
example (h1 : x > 0) (h2 : x < 1*1) (h3 : rat.pow x 2 ≥ 1*x) : false :=
by polya h1 h2 h3
/-
0 \leq n, \; n < (K / 2)x, \; 0 < C, \; 0 < \varepsilon < 1 \myRightarrow \left(1 + \frac{\varepsilon}{3 (C + 3)}\right) \cdot n < K x
-/
section
variables n K C eps : ℚ
example (h1 : 0 ≤ n) (h2 : n < (1/2)*K*x) (h3 : 0 < C) (h4 : 0 < eps) (h5 : eps < 1) (h6 : 1 + (1/3)*eps*rat.pow (C+3) (-1)*n < K*x) : false :=
by polya h1 h2 h3 h4 h5 h6
end
/-
Tests from the old Polya repository.
-/
-- line 44
example (h1 : u > 0) (h2 : u < 1*v) (h3 : v < 1*1) (h4 : 1 ≤ (1/2)*x) (h5 : x ≤ 1*y) (h6 : ((rat.pow u 2)*x) ≥ (1/2)*(v*rat.pow y 2))
: false :=
by polya h1 h2 h3 h4 h5 h6 --h7 h8
-- line 50
example (h1 : x > 1*1) (h2 : (1 + rat.pow y 2)*x < 1*(1 + rat.pow y 2)) : false :=
by polya h1 h2
-- line 56
example (h1 : x > 0) (h2 : x < 1*1) (h3 : rat.pow (1*1 + (-1)*x) (-1) ≤ 1*(rat.pow (1*1 + (-1)*rat.pow x 2) (-1))) : false :=
by
polya h1 h2 h3
-- line 62
example (h1 : u > 0) (h2 : u < 1*v) (h3 : z > 0) (h4 : 1*z + 1*1 < 1*w) (h5 : rat.pow (1*u + 1*v + 1*z) 3 ≥ 1* rat.pow (1*u + 1*v + 1*w + 1*1) 5) : false :=
by polya h1 h2 h3 h4 h5
-- line 68
example (h1 : u > 0) (h2 : u < 1*v) (h3 : z > 0) (h4 : 1*z + 1*1 < 1*w) (h5 : rat.pow (1*u + 1*v + 1*z) 33 ≥ 1* rat.pow (1*u + 1*v + 1*w + 1*1) 55) : false :=
by polya h1 h2 h3 h4 h5
-- line 74
-- see top of file. goes forever, normalization problem?
/-example (h1 : u > 0) (h2 : u < 1*(rat.pow (1*rat.pow v 2 + 23*1) 3)) (h3 : z > 0) (h4 : 1*z + 1*1 < 1*w)
(h5 : rat.pow (1*u + (1*rat.pow (1*rat.pow v 2 + 23*1) 3) + 1*z) 3 ≥ 1*(rat.pow (1*u + 1*rat.pow (1*rat.pow v 2 + 23*1) 3 + 1*w + 1*1) 5)) : false :=
by polya h1 h2 h3 h4 h5-/
-- line 263
-- fails, sign inference too weak
example (h1 : x > 0) (h2 : y < 1*z) (h3 : x * y < 1*(x * z)) : false :=
by polya h1 h2 h3
-- line 295
example (h1 : x * (y+z) ≤ 0) (h2 : y + z > 0) (h3 : x ≥ 0) (h4 : x*w > 0) : false :=
by polya h1 h2 h3 h4
-- line 299
example (h1 : x > 0) (h2 : x < 3*y) (h3 : u < 1*v) (h4 : v < 0) (h5 : 1 < 1*rat.pow v 2) (h6 : rat.pow v 2 < 1*x) (h7 : 1*(u*rat.pow (3*y) 2) + 1*1 ≥ 1*(1*(rat.pow x 2*v) + 1*x)) : false :=
by polya h1 h2 h3 h4 h5 h6
-- line 304 hangs?
/-example (h1 : x > 0) (h2 : x < y) (h3 : 0 < u) (h4 : u < v) (h5 : 0 < w + z) (h6 : w + z < r - 1)
(h7 : u + (rat.pow (1+x) 2)*(2*w + 2*z + 3) < 2*v + rat.pow (1+y) 2 * (2*r + 1)) : false :=
by polya h1 h2 h3 h4 h5 h6 h7-/
local infix `**`:1000 := rat.pow
-- line 309
example (h1 : x + rat.pow y (-1) < 2) (h2 : y < 0) (h3 : y * rat.pow x (-1) > 1) (h4 : -2 ≤ x) (h5 : x < 2) (h6 : -2 ≤ y) (h7 : y ≤ 2) (h8 : rat.pow x 2 * rat.pow y (-1) > 1 - x) : false :=
by polya h1 h2 h3 h4 h5 h6 h7 h8
-- line 314
example (h1 : 0 < u) (h2 : u < v) (h3 : 1 < x) (h4 : x < y) (h5 : 0 < w) (h6 : w < z) (h7 : u + x*w < v+(rat.pow y 2)*z) : false :=
by polya h1 h2 h3 h4 h5 h6 h7
-- line 319
example (h1 : x + rat.pow y (-1) < 2) (h2 : y < 0) (h3 : y * rat.pow x (-1) > 1) (h4 : -2 ≤ x) (h5 : x ≤ 2) (h6 : y ≤ 2) (h7 : -2 ≤ y) (h8 : rat.pow x 2 * rat.pow y (-1) > 1 - x) : false :=
by polya h1 h2 h3 h4 h5 h6 h7 h8
-- 323
example (h1 : 0 < x) (h2 : x < y) (h3 : 0 < u) (h4 : u < v) (h5 : 0 < w + z) (h6 : w + z < r - 1)
(h7 : u + rat.pow (1 + x) 2 * (2 * w + 2 * z + 3) >= 2 * v + rat.pow (1 + y) 2 * (2 * r + 1)) : false :=
by polya h1 h2 h3 h4 h5 h6 h7
-- 328
example (h1 : 0 < x) (h2 : x < 3 * y) (h3 : u < v) (h3' : v < 0) (h4 : 1 < v**2) (h5 : v**2 < x) (h6 : u * (3 * y)**2 + 1 >= x**2 * v + x) : false :=
by polya h1 h2 h3 h3' h4 h5 h6
--337
example (h1 : 1 < x) (h2 : 1 < y) (h3 : 1 < z) (h4 : 1 >= x * (1 + z * y)) : false :=
by polya h1 h2 h3 h4
-- 346
example (h1 : x < 1) (h2 : 1 < y) (h3 : x * y > 1) (h4 : u + x >= y + 1) (h5 : x**2 * y < 2 - u * x * y) : false :=
by polya h1 h2 h3 h4 h5
-- 350
example (h1 : x**21 > 0) (h2 : x**3 < 1) (h3 : y**55 > 0) (h4 : y < 1) (h5 : a + b < a * b) : false :=
by polya h1 h2 h3 h4 h5
-- 354
example (h1 : 0 < x) (h2 : y < z) (h3 : y < 0) (h4 : z > 0) (h5 : x * y ≥ x * z) : false :=
by polya h1 h2 h3 h4 h5
--359
example (h1 : 0 < z) (h2 : y < z) (h3 : y = 0) (h4 : z > 0) (h5 : x * y ≥ x * z) : false :=
by polya h1 h2 h3 h4 h5
-- 371
example (h1 : x > 1) (h2 : z = y**2) (h3 : 1+z*x < 1+z) : false :=
by polya h1 h2 h3
-- 384
example (h1 : x = z) (h2 : y = w) (h3 : x > 0) (h4 : y > 0) (h5 : x * y ≠ z * w) : false :=
by polya h1 h2 h3 h4 h5
-- 389
example (h1 : x > 2*y) (h2 : x = 3*y) (h3 : y ≤ 0) : false :=
by polya h1 h2 h3
example (h1 : x > 0) (h2 : 1 < 1 * x) (h3 : 1 < 1 * (rat.pow x (-1))) : false :=
by polya h1 h2 h3
example (h1 : x > 0) (h2 : x < 1*1) (h3 : rat.pow (1*1 + (-1)*x) (-1) ≤ 1*(rat.pow (1*1 + (-1)*rat.pow x 2) (-1))) : false :=
by
polya h1 h2 h3
example (h1 : x > 1*1) (h1' : y ≥ 0) (h2 : (1 + y)*x < 1*(1 + y)) : false :=
by polya h1 h1' h2
example (e1 : x > 0) /-(e2' : x*y > 0)-/ (e2' : y > 0) (e2 : x > 1*y) (e3 : z > 1*x) (e3' : z > 0) (e4 : y > 1*z) : false :=
by polya e1 e2' e2 e3 e3' e4
example (e1 : x > 0) (e2 : y > 0) (e2' : z > 0) (e3 : y > 1*z) (e4 : x > 1*z) (e5 : x*y<1*(rat.pow z 2)) : false :=
by polya e1 e2 e2' e3 e4 e5
/-set_option trace.app_builder true
example (h0 : (1 : ℚ) > 0) (h1 : x > 1*1) (h1' : y ≥ 0) (h2 : (1 + y)*x < 1*(1 + y)) : false :=
by do
exps ← monad.mapm get_local [`h0, `h1, `h1', `h2],
bb ← add_proofs_to_blackboard blackboard.mk_empty exps,
bb.trace_exprs,
trace "-----",
bb.trace,
trace "-----",
(_, bb) ← return $ add_new_ineqs bb,
(_, bb) ← return $ prod_form.add_new_ineqs bb,
--(_, bb) ← return $ add_new_ineqs bb,
--(_, bb) ← return $ prod_form.add_new_ineqs bb,
trace "-----",
bb.trace,
trace $ ("contr found", bb.contr_found),
pf ← bb.contr.reconstruct,
apply pf
#exit
example (h0 : (1 : ℚ) > 0) (h1 : x > 1*1) (h2 : (1 + rat.pow y 2)*x < 1*(1 + rat.pow y 2)) (ht : rat.pow y 2 ≥ 0) : false :=
by do
exps ← monad.mapm get_local [`h0, `h1, `h2, `ht],
bb ← add_proofs_to_blackboard blackboard.mk_empty exps,
bb.trace_exprs,
trace "-----",
bb.trace,
trace "-----",
(_, bb) ← return $ add_new_ineqs bb,
(_, bb) ← return $ prod_form.add_new_ineqs bb,
(_, bb) ← return $ add_new_ineqs bb,
(_, bb) ← return $ prod_form.add_new_ineqs bb,
trace "-----",
bb.trace,
trace $ ("contr found", bb.contr_found),
pf ← bb.contr.reconstruct,
apply pf-/
-- left off with : bug at prod_form.to_expr??
example (e1 : x > 0) /-(e2' : x*y > 0)-/ (e2' : y > 0) (e2 : x > 1*y) (e3 : z > 1*x) (e3' : z > 0) (e4 : y > 1*z) : false :=
by do
exps ← monad.mapm get_local [`e1, `e2, `e2', `e3, `e3', `e4],
bb ← add_proofs_to_blackboard blackboard.mk_empty exps,
bb.trace_exprs,
trace "-----",
bb.trace,
trace "-----",
(_, bb) ← return $ prod_form.add_new_ineqs bb,
trace "-----",
bb.trace,
trace $ ("contr found", bb.contr_found),
pf ← bb.contr.reconstruct,
apply pf
example (e1 : x > 0) (e2 : y > 0) (e2' : z > 0) (e3 : y > 1*z) (e4 : x > 1*z) (e5 : x*y<1*(rat.pow z 2)) (e6 : rat.pow z 2 > 0) (e7 : x*y > 0) : false :=
by do
exps ← monad.mapm get_local [`e1, `e2, `e2', `e3, `e4, `e5, `e6, `e7],
bb ← add_proofs_to_blackboard blackboard.mk_empty exps,
bb.trace_exprs,
--(l, _) ← return $ get_add_defs bb,
--trace l,
(_, bb) ← return $ prod_form.add_new_ineqs bb,
--bb.trace,
trace $ ("contr found", bb.contr_found),
pf ← bb.contr.reconstruct,
apply pf
|
b23fbeca5f8fbbcafdf19a2e2a9d6f61a9f2d76c
|
a339bc2ac96174381fb610f4b2e1ba42df2be819
|
/hott/types/pointed.hlean
|
73feb5d0e973640d177747e253d32df6a209f94c
|
[
"Apache-2.0"
] |
permissive
|
kalfsvag/lean2
|
25b2dccc07a98e5aa20f9a11229831f9d3edf2e7
|
4d4a0c7c53a9922c5f630f6f8ebdccf7ddef2cc7
|
refs/heads/master
| 1,610,513,122,164
| 1,483,135,198,000
| 1,483,135,198,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 40,184
|
hlean
|
/-
Copyright (c) 2014-2016 Jakob von Raumer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jakob von Raumer, Floris van Doorn
Ported from Coq HoTT
The basic definitions are in init.pointed
-/
import .nat.basic ..arity ..prop_trunc
open is_trunc eq prod sigma nat equiv option is_equiv bool unit algebra sigma.ops sum
namespace pointed
variables {A B : Type}
definition pointed_loop [instance] [constructor] (a : A) : pointed (a = a) :=
pointed.mk idp
definition pointed_fun_closed [constructor] (f : A → B) [H : pointed A] : pointed B :=
pointed.mk (f pt)
definition loop [reducible] [constructor] (A : Type*) : Type* :=
pointed.mk' (point A = point A)
definition loopn [reducible] : ℕ → Type* → Type*
| loopn 0 X := X
| loopn (n+1) X := loop (loopn n X)
notation `Ω` := loop
notation `Ω[`:95 n:0 `]`:0 := loopn n
namespace ops
-- this is in a separate namespace because it caused type class inference to loop in some places
definition is_trunc_pointed_MK [instance] [priority 1100] (n : ℕ₋₂) {A : Type} (a : A)
[H : is_trunc n A] : is_trunc n (pointed.MK A a) :=
H
end ops
definition is_trunc_loop [instance] [priority 1100] (A : Type*)
(n : ℕ₋₂) [H : is_trunc (n.+1) A] : is_trunc n (Ω A) :=
!is_trunc_eq
definition loopn_zero_eq [unfold_full] (A : Type*)
: Ω[0] A = A := rfl
definition loopn_succ_eq [unfold_full] (k : ℕ) (A : Type*)
: Ω[succ k] A = Ω (Ω[k] A) := rfl
definition rfln [constructor] [reducible] {n : ℕ} {A : Type*} : Ω[n] A := pt
definition refln [constructor] [reducible] (n : ℕ) (A : Type*) : Ω[n] A := pt
definition refln_eq_refl [unfold_full] (A : Type*) (n : ℕ) : rfln = rfl :> Ω[succ n] A := rfl
definition loopn_space [unfold 3] (A : Type) [H : pointed A] (n : ℕ) : Type :=
Ω[n] (pointed.mk' A)
definition loop_mul {k : ℕ} {A : Type*} (mul : A → A → A) : Ω[k] A → Ω[k] A → Ω[k] A :=
begin cases k with k, exact mul, exact concat end
definition pType_eq {A B : Type*} (f : A ≃ B) (p : f pt = pt) : A = B :=
begin
cases A with A a, cases B with B b, esimp at *,
fapply apdt011 @pType.mk,
{ apply ua f},
{ rewrite [cast_ua, p]},
end
definition pType_eq_elim {A B : Type*} (p : A = B :> Type*)
: Σ(p : carrier A = carrier B :> Type), Point A =[p] Point B :=
by induction p; exact ⟨idp, idpo⟩
protected definition pType.sigma_char.{u} : pType.{u} ≃ Σ(X : Type.{u}), X :=
begin
fapply equiv.MK,
{ intro x, induction x with X x, exact ⟨X, x⟩},
{ intro x, induction x with X x, exact pointed.MK X x},
{ intro x, induction x with X x, reflexivity},
{ intro x, induction x with X x, reflexivity},
end
definition add_point [constructor] (A : Type) : Type* :=
pointed.Mk (none : option A)
postfix `₊`:(max+1) := add_point
-- the inclusion A → A₊ is called "some", the extra point "pt" or "none" ("@none A")
end pointed
namespace pointed
/- truncated pointed types -/
definition ptrunctype_eq {n : ℕ₋₂} {A B : n-Type*}
(p : A = B :> Type) (q : Point A =[p] Point B) : A = B :=
begin
induction A with A HA a, induction B with B HB b, esimp at *,
induction q, esimp,
refine ap010 (ptrunctype.mk A) _ a,
exact !is_prop.elim
end
definition ptrunctype_eq_of_pType_eq {n : ℕ₋₂} {A B : n-Type*} (p : A = B :> Type*)
: A = B :=
begin
cases pType_eq_elim p with q r,
exact ptrunctype_eq q r
end
definition is_trunc_ptrunctype [instance] {n : ℕ₋₂} (A : n-Type*) : is_trunc n A :=
trunctype.struct A
end pointed open pointed
namespace pointed
variables {A B C D : Type*} {f g h : A →* B}
/- categorical properties of pointed maps -/
definition pmap_of_map [constructor] {A B : Type} (f : A → B) (a : A) :
pointed.MK A a →* pointed.MK B (f a) :=
pmap.mk f idp
definition pid [constructor] [refl] (A : Type*) : A →* A :=
pmap.mk id idp
definition pcompose [constructor] [trans] (g : B →* C) (f : A →* B) : A →* C :=
pmap.mk (λa, g (f a)) (ap g (respect_pt f) ⬝ respect_pt g)
infixr ` ∘* `:60 := pcompose
definition passoc (h : C →* D) (g : B →* C) (f : A →* B) : (h ∘* g) ∘* f ~* h ∘* (g ∘* f) :=
begin
fconstructor, intro a, reflexivity,
cases A, cases B, cases C, cases D, cases f with f pf, cases g with g pg, cases h with h ph,
esimp at *,
induction pf, induction pg, induction ph, reflexivity
end
definition pid_pcompose [constructor] (f : A →* B) : pid B ∘* f ~* f :=
begin
fconstructor,
{ intro a, reflexivity},
{ reflexivity}
end
definition pcompose_pid [constructor] (f : A →* B) : f ∘* pid A ~* f :=
begin
fconstructor,
{ intro a, reflexivity},
{ reflexivity}
end
/- equivalences and equalities -/
definition pmap.sigma_char [constructor] {A B : Type*} : (A →* B) ≃ Σ(f : A → B), f pt = pt :=
begin
fapply equiv.MK : intros f,
{ exact ⟨f , respect_pt f⟩ },
all_goals cases f with f p,
{ exact pmap.mk f p },
all_goals reflexivity
end
definition pmap_eq (r : Πa, f a = g a) (s : respect_pt f = (r pt) ⬝ respect_pt g) : f = g :=
begin
cases f with f p, cases g with g q,
esimp at *,
fapply apd011 pmap.mk,
{ exact eq_of_homotopy r},
{ apply concato_eq, apply eq_pathover_Fl, apply inv_con_eq_of_eq_con,
rewrite [ap_eq_apd10, apd10_eq_of_homotopy, s]}
end
definition pmap_eq_of_homotopy {A B : Type*} {f g : A →* B} [is_set B] (p : f ~ g) : f = g :=
pmap_eq p !is_set.elim
definition pmap_equiv_left (A : Type) (B : Type*) : A₊ →* B ≃ (A → B) :=
begin
fapply equiv.MK,
{ intro f a, cases f with f p, exact f (some a)},
{ intro f, fconstructor,
intro a, cases a, exact pt, exact f a,
reflexivity},
{ intro f, reflexivity},
{ intro f, cases f with f p, esimp, fapply pmap_eq,
{ intro a, cases a; all_goals (esimp at *), exact p⁻¹},
{ esimp, exact !con.left_inv⁻¹}},
end
definition pmap_equiv_right (A : Type*) (B : Type)
: (Σ(b : B), A →* (pointed.Mk b)) ≃ (A → B) :=
begin
fapply equiv.MK,
{ intro u a, exact pmap.to_fun u.2 a},
{ intro f, refine ⟨f pt, _⟩, fapply pmap.mk,
intro a, esimp, exact f a,
reflexivity},
{ intro f, reflexivity},
{ intro u, cases u with b f, cases f with f p, esimp at *, induction p,
reflexivity}
end
-- pmap_pbool_pequiv is the pointed equivalence
definition pmap_pbool_equiv [constructor] (B : Type*) : (pbool →* B) ≃ B :=
begin
fapply equiv.MK,
{ intro f, cases f with f p, exact f tt},
{ intro b, fconstructor,
intro u, cases u, exact pt, exact b,
reflexivity},
{ intro b, reflexivity},
{ intro f, cases f with f p, esimp, fapply pmap_eq,
{ intro a, cases a; all_goals (esimp at *), exact p⁻¹},
{ esimp, exact !con.left_inv⁻¹}},
end
/- some specific pointed maps -/
-- The constant pointed map between any two types
definition pconst [constructor] (A B : Type*) : A →* B :=
pmap.mk (λ a, Point B) idp
-- the pointed type of pointed maps
definition ppmap [constructor] (A B : Type*) : Type* :=
pType.mk (A →* B) (pconst A B)
definition pcast [constructor] {A B : Type*} (p : A = B) : A →* B :=
pmap.mk (cast (ap pType.carrier p)) (by induction p; reflexivity)
definition pinverse [constructor] {X : Type*} : Ω X →* Ω X :=
pmap.mk eq.inverse idp
definition ap1 [constructor] (f : A →* B) : Ω A →* Ω B :=
begin
fconstructor,
{ intro p, exact !respect_pt⁻¹ ⬝ ap f p ⬝ !respect_pt},
{ esimp, apply con.left_inv}
end
definition apn (n : ℕ) (f : A →* B) : Ω[n] A →* Ω[n] B :=
begin
induction n with n IH,
{ exact f},
{ esimp [loopn], exact ap1 IH}
end
prefix `Ω→`:(max+5) := ap1
notation `Ω→[`:95 n:0 `]`:0 := apn n
definition ptransport [constructor] {A : Type} (B : A → Type*) {a a' : A} (p : a = a')
: B a →* B a' :=
pmap.mk (transport B p) (apdt (λa, Point (B a)) p)
definition pmap_of_eq_pt [constructor] {A : Type} {a a' : A} (p : a = a') :
pointed.MK A a →* pointed.MK A a' :=
pmap.mk id p
definition pbool_pmap [constructor] {A : Type*} (a : A) : pbool →* A :=
pmap.mk (bool.rec pt a) idp
-- properties of pointed maps
definition apn_zero [unfold_full] (f : A →* B) : Ω→[0] f = f := idp
definition apn_succ [unfold_full] (n : ℕ) (f : A →* B) : Ω→[n + 1] f = Ω→ (Ω→[n] f) := idp
theorem ap1_con (f : A →* B) (p q : Ω A) : ap1 f (p ⬝ q) = ap1 f p ⬝ ap1 f q :=
begin
rewrite [▸*,ap_con, +con.assoc, con_inv_cancel_left], repeat apply whisker_left
end
theorem ap1_inv (f : A →* B) (p : Ω A) : ap1 f p⁻¹ = (ap1 f p)⁻¹ :=
begin
rewrite [▸*,ap_inv, +con_inv, inv_inv, +con.assoc], repeat apply whisker_left
end
definition pinverse_con [constructor] {X : Type*} (p q : Ω X)
: pinverse (p ⬝ q) = pinverse q ⬝ pinverse p :=
!con_inv
definition pinverse_inv [constructor] {X : Type*} (p : Ω X)
: pinverse p⁻¹ = (pinverse p)⁻¹ :=
idp
theorem apn_con (n : ℕ) (f : A →* B) (p q : Ω[n+1] A)
: apn (n+1) f (p ⬝ q) = apn (n+1) f p ⬝ apn (n+1) f q :=
by rewrite [+apn_succ, ap1_con]
theorem apn_inv (n : ℕ) (f : A →* B) (p : Ω[n+1] A) : apn (n+1) f p⁻¹ = (apn (n+1) f p)⁻¹ :=
by rewrite [+apn_succ, ap1_inv]
definition is_equiv_ap1 (f : A →* B) [is_equiv f] : is_equiv (ap1 f) :=
begin
induction B with B b, induction f with f pf, esimp at *, cases pf, esimp,
apply is_equiv.homotopy_closed (ap f),
intro p, exact !idp_con⁻¹
end
definition is_equiv_apn (n : ℕ) (f : A →* B) [H : is_equiv f]
: is_equiv (apn n f) :=
begin
induction n with n IH,
{ exact H},
{ exact is_equiv_ap1 (apn n f)}
end
definition is_equiv_pcast [instance] {A B : Type*} (p : A = B) : is_equiv (pcast p) :=
!is_equiv_cast
/- categorical properties of pointed homotopies -/
protected definition phomotopy.refl [constructor] [refl] (f : A →* B) : f ~* f :=
begin
fconstructor,
{ intro a, exact idp},
{ apply idp_con}
end
protected definition phomotopy.rfl [constructor] {f : A →* B} : f ~* f :=
phomotopy.refl f
protected definition phomotopy.trans [constructor] [trans] (p : f ~* g) (q : g ~* h)
: f ~* h :=
phomotopy.mk (λa, p a ⬝ q a)
abstract begin
induction f, induction g, induction p with p p', induction q with q q', esimp at *,
induction p', induction q', esimp, apply con.assoc
end end
protected definition phomotopy.symm [constructor] [symm] (p : f ~* g) : g ~* f :=
phomotopy.mk (λa, (p a)⁻¹)
abstract begin
induction f, induction p with p p', esimp at *,
induction p', esimp, apply inv_con_cancel_left
end end
infix ` ⬝* `:75 := phomotopy.trans
postfix `⁻¹*`:(max+1) := phomotopy.symm
/- equalities and equivalences relating pointed homotopies -/
definition phomotopy.sigma_char [constructor] {A B : Type*} (f g : A →* B)
: (f ~* g) ≃ Σ(p : f ~ g), p pt ⬝ respect_pt g = respect_pt f :=
begin
fapply equiv.MK : intros h,
{ exact ⟨h , to_homotopy_pt h⟩ },
all_goals cases h with h p,
{ exact phomotopy.mk h p },
all_goals reflexivity
end
definition is_trunc_pmap [instance] (n : ℕ₋₂) (A B : Type*) [is_trunc n B] :
is_trunc n (A →* B) :=
is_trunc_equiv_closed_rev _ !pmap.sigma_char
definition is_trunc_ppmap [instance] (n : ℕ₋₂) {A B : Type*} [is_trunc n B] :
is_trunc n (ppmap A B) :=
!is_trunc_pmap
definition phomotopy_of_eq [constructor] {A B : Type*} {f g : A →* B} (p : f = g) : f ~* g :=
phomotopy.mk (ap010 pmap.to_fun p) begin induction p, apply idp_con end
definition pconcat_eq [constructor] {A B : Type*} {f g h : A →* B} (p : f ~* g) (q : g = h)
: f ~* h :=
p ⬝* phomotopy_of_eq q
definition eq_pconcat [constructor] {A B : Type*} {f g h : A →* B} (p : f = g) (q : g ~* h)
: f ~* h :=
phomotopy_of_eq p ⬝* q
infix ` ⬝*p `:75 := pconcat_eq
infix ` ⬝p* `:75 := eq_pconcat
definition eq_of_phomotopy (p : f ~* g) : f = g :=
begin
fapply pmap_eq,
{ intro a, exact p a},
{ exact !to_homotopy_pt⁻¹}
end
definition pmap_eq_equiv {A B : Type*} (f g : A →* B) : (f = g) ≃ (f ~* g) :=
calc (f = g) ≃ pmap.sigma_char f = pmap.sigma_char g
: eq_equiv_fn_eq pmap.sigma_char f g
... ≃ Σ(p : pmap.to_fun f = pmap.to_fun g),
pathover (λh, h pt = pt) (respect_pt f) p (respect_pt g)
: sigma_eq_equiv _ _
... ≃ Σ(p : pmap.to_fun f = pmap.to_fun g), respect_pt f = ap (λh, h pt) p ⬝ respect_pt g
: sigma_equiv_sigma_right (λp, eq_pathover_equiv_Fl p (respect_pt f)
(respect_pt g))
... ≃ Σ(p : pmap.to_fun f = pmap.to_fun g), respect_pt f = ap10 p pt ⬝ respect_pt g
: sigma_equiv_sigma_right
(λp, equiv_eq_closed_right _ (whisker_right _ (ap_eq_apd10 p _)))
... ≃ Σ(p : pmap.to_fun f ~ pmap.to_fun g), respect_pt f = p pt ⬝ respect_pt g
: sigma_equiv_sigma_left' eq_equiv_homotopy
... ≃ Σ(p : pmap.to_fun f ~ pmap.to_fun g), p pt ⬝ respect_pt g = respect_pt f
: sigma_equiv_sigma_right (λp, eq_equiv_eq_symm _ _)
... ≃ (f ~* g) : phomotopy.sigma_char f g
/-
Pointed maps respecting pointed homotopies.
In general we need function extensionality for pap,
but for particular F we can do it without function extensionality.
This is preferred, because such pointed homotopies compute
-/
definition pap (F : (A →* B) → (C →* D)) {f g : A →* B} (p : f ~* g) : F f ~* F g :=
phomotopy.mk (ap010 (λf, pmap.to_fun (F f)) (eq_of_phomotopy p))
begin cases eq_of_phomotopy p, apply idp_con end
definition ap1_phomotopy {f g : A →* B} (p : f ~* g)
: ap1 f ~* ap1 g :=
begin
induction p with p q, induction f with f pf, induction g with g pg, induction B with B b,
esimp at *, induction q, induction pg,
fapply phomotopy.mk,
{ intro l, esimp, refine _ ⬝ !idp_con⁻¹, refine !con.assoc ⬝ _, apply inv_con_eq_of_eq_con,
apply ap_con_eq_con_ap},
{ induction A with A a, unfold [ap_con_eq_con_ap], generalize p a, generalize g a, intro b q,
induction q, reflexivity}
end
definition apn_phomotopy {f g : A →* B} (n : ℕ) (p : f ~* g) : apn n f ~* apn n g :=
begin
induction n with n IH,
{ exact p},
{ exact ap1_phomotopy IH}
end
/- pointed homotopies between the given pointed maps -/
definition ap1_pid [constructor] {A : Type*} : ap1 (pid A) ~* pid (Ω A) :=
begin
fapply phomotopy.mk,
{ intro p, esimp, refine !idp_con ⬝ !ap_id},
{ reflexivity}
end
definition ap1_pinverse {A : Type*} : ap1 (@pinverse A) ~* @pinverse (Ω A) :=
begin
fapply phomotopy.mk,
{ intro p, esimp, refine !idp_con ⬝ _, exact !inv_eq_inv2⁻¹ },
{ reflexivity}
end
definition ap1_pcompose (g : B →* C) (f : A →* B) : ap1 (g ∘* f) ~* ap1 g ∘* ap1 f :=
begin
induction B, induction C, induction g with g pg, induction f with f pf, esimp at *,
induction pg, induction pf,
fconstructor,
{ intro p, esimp, apply whisker_left, exact ap_compose g f p ⬝ ap (ap g) !idp_con⁻¹},
{ reflexivity}
end
definition ap1_pcompose_pinverse (f : A →* B) : ap1 f ∘* pinverse ~* pinverse ∘* ap1 f :=
begin
fconstructor,
{ intro p, esimp, refine !con.assoc ⬝ _ ⬝ !con_inv⁻¹, apply whisker_left,
refine whisker_right _ !ap_inv ⬝ _ ⬝ !con_inv⁻¹, apply whisker_left,
exact !inv_inv⁻¹},
{ induction B with B b, induction f with f pf, esimp at *, induction pf, reflexivity},
end
definition ap1_pconst (A B : Type*) : Ω→(pconst A B) ~* pconst (Ω A) (Ω B) :=
phomotopy.mk (λp, idp_con _ ⬝ ap_constant p pt) rfl
definition ptransport_change_eq [constructor] {A : Type} (B : A → Type*) {a a' : A} {p q : a = a'}
(r : p = q) : ptransport B p ~* ptransport B q :=
phomotopy.mk (λb, ap (λp, transport B p b) r) begin induction r, apply idp_con end
definition pnatural_square {A B : Type} (X : B → Type*) {f g : A → B}
(h : Πa, X (f a) →* X (g a)) {a a' : A} (p : a = a') :
h a' ∘* ptransport X (ap f p) ~* ptransport X (ap g p) ∘* h a :=
by induction p; exact !pcompose_pid ⬝* !pid_pcompose⁻¹*
definition apn_pid [constructor] {A : Type*} (n : ℕ) : apn n (pid A) ~* pid (Ω[n] A) :=
begin
induction n with n IH,
{ reflexivity},
{ exact ap1_phomotopy IH ⬝* ap1_pid}
end
definition apn_pconst (A B : Type*) (n : ℕ) :
apn n (pconst A B) ~* pconst (Ω[n] A) (Ω[n] B) :=
begin
induction n with n IH,
{ reflexivity },
{ exact ap1_phomotopy IH ⬝* !ap1_pconst }
end
definition apn_pcompose (n : ℕ) (g : B →* C) (f : A →* B) :
apn n (g ∘* f) ~* apn n g ∘* apn n f :=
begin
induction n with n IH,
{ reflexivity},
{ refine ap1_phomotopy IH ⬝* _, apply ap1_pcompose}
end
definition pcast_idp [constructor] {A : Type*} : pcast (idpath A) ~* pid A :=
by reflexivity
definition pinverse_pinverse (A : Type*) : pinverse ∘* pinverse ~* pid (Ω A) :=
begin
fapply phomotopy.mk,
{ apply inv_inv},
{ reflexivity}
end
definition pcast_ap_loop [constructor] {A B : Type*} (p : A = B) :
pcast (ap Ω p) ~* ap1 (pcast p) :=
begin
fapply phomotopy.mk,
{ intro a, induction p, esimp, exact (!idp_con ⬝ !ap_id)⁻¹},
{ induction p, reflexivity}
end
definition ap1_pmap_of_map [constructor] {A B : Type} (f : A → B) (a : A) :
ap1 (pmap_of_map f a) ~* pmap_of_map (ap f) (idpath a) :=
begin
fapply phomotopy.mk,
{ intro a, esimp, apply idp_con},
{ reflexivity}
end
definition pcast_commute [constructor] {A : Type} {B C : A → Type*} (f : Πa, B a →* C a)
{a₁ a₂ : A} (p : a₁ = a₂) : pcast (ap C p) ∘* f a₁ ~* f a₂ ∘* pcast (ap B p) :=
phomotopy.mk
begin induction p, reflexivity end
begin induction p, esimp, refine !idp_con ⬝ !idp_con ⬝ !ap_id⁻¹ end
/- pointed equivalences -/
/- constructors / projections + variants -/
definition pequiv_of_pmap [constructor] (f : A →* B) (H : is_equiv f) : A ≃* B :=
pequiv.mk f _ (respect_pt f)
definition pequiv_of_equiv [constructor] (f : A ≃ B) (H : f pt = pt) : A ≃* B :=
pequiv.mk f _ H
protected definition pequiv.MK [constructor] (f : A →* B) (g : B → A)
(gf : Πa, g (f a) = a) (fg : Πb, f (g b) = b) : A ≃* B :=
pequiv.mk f (adjointify f g fg gf) (respect_pt f)
definition equiv_of_pequiv [constructor] (f : A ≃* B) : A ≃ B :=
equiv.mk f _
definition to_pinv [constructor] (f : A ≃* B) : B →* A :=
pmap.mk f⁻¹ ((ap f⁻¹ (respect_pt f))⁻¹ ⬝ left_inv f pt)
definition to_pmap_pequiv_of_pmap {A B : Type*} (f : A →* B) (H : is_equiv f)
: pequiv.to_pmap (pequiv_of_pmap f H) = f :=
by cases f; reflexivity
/-
A version of pequiv.MK with stronger conditions.
The advantage of defining a pointed equivalence with this definition is that there is a
pointed homotopy between the inverse of the resulting equivalence and the given pointed map g.
This is not the case when using `pequiv.MK` (if g is a pointed map),
that will only give an ordinary homotopy.
-/
protected definition pequiv.MK2 [constructor] (f : A →* B) (g : B →* A)
(gf : g ∘* f ~* !pid) (fg : f ∘* g ~* !pid) : A ≃* B :=
pequiv.MK f g gf fg
definition to_pmap_pequiv_MK2 [constructor] (f : A →* B) (g : B →* A)
(gf : g ∘* f ~* !pid) (fg : f ∘* g ~* !pid) : pequiv.MK2 f g gf fg ~* f :=
phomotopy.mk (λb, idp) !idp_con
definition to_pinv_pequiv_MK2 [constructor] (f : A →* B) (g : B →* A)
(gf : g ∘* f ~* !pid) (fg : f ∘* g ~* !pid) : to_pinv (pequiv.MK2 f g gf fg) ~* g :=
phomotopy.mk (λb, idp)
abstract [irreducible] begin
esimp,
note H := to_homotopy_pt gf, note H2 := to_homotopy_pt fg,
note H3 := eq_top_of_square (natural_square (to_homotopy fg) (respect_pt f)),
rewrite [▸* at *, H, H3, H2, ap_id, - +con.assoc, ap_compose' f g, con_inv,
- ap_inv, - +ap_con g],
apply whisker_right, apply ap02 g,
rewrite [ap_con, - + con.assoc, +ap_inv, +inv_con_cancel_right, con.left_inv],
end end
/- categorical properties of pointed equivalences -/
protected definition pequiv.refl [refl] [constructor] (A : Type*) : A ≃* A :=
pequiv_of_pmap !pid !is_equiv_id
protected definition pequiv.rfl [constructor] : A ≃* A :=
pequiv.refl A
protected definition pequiv.symm [symm] [constructor] (f : A ≃* B) : B ≃* A :=
pequiv_of_pmap (to_pinv f) !is_equiv_inv
protected definition pequiv.trans [trans] [constructor] (f : A ≃* B) (g : B ≃* C) : A ≃* C :=
pequiv_of_pmap (g ∘* f) !is_equiv_compose
definition pequiv_compose {A B C : Type*} (g : B ≃* C) (f : A ≃* B) : A ≃* C :=
pequiv_of_pmap (g ∘* f) (is_equiv_compose g f)
infixr ` ∘*ᵉ `:60 := pequiv_compose
postfix `⁻¹ᵉ*`:(max + 1) := pequiv.symm
infix ` ⬝e* `:75 := pequiv.trans
/- more on pointed equivalences -/
definition pequiv_ap [constructor] {A : Type} (B : A → Type*) {a a' : A} (p : a = a')
: B a ≃* B a' :=
pequiv_of_pmap (ptransport B p) !is_equiv_tr
definition to_pmap_pequiv_trans {A B C : Type*} (f : A ≃* B) (g : B ≃* C)
: pequiv.to_pmap (f ⬝e* g) = g ∘* f :=
!to_pmap_pequiv_of_pmap
definition pequiv_change_fun [constructor] (f : A ≃* B) (f' : A →* B) (Heq : f ~ f') : A ≃* B :=
pequiv_of_pmap f' (is_equiv.homotopy_closed f Heq)
definition pequiv_change_inv [constructor] (f : A ≃* B) (f' : B →* A) (Heq : to_pinv f ~ f')
: A ≃* B :=
pequiv.MK f f' (to_left_inv (equiv_change_inv f Heq)) (to_right_inv (equiv_change_inv f Heq))
definition pequiv_rect' (f : A ≃* B) (P : A → B → Type)
(g : Πb, P (f⁻¹ b) b) (a : A) : P a (f a) :=
left_inv f a ▸ g (f a)
definition pua {A B : Type*} (f : A ≃* B) : A = B :=
pType_eq (equiv_of_pequiv f) !respect_pt
definition pequiv_of_eq [constructor] {A B : Type*} (p : A = B) : A ≃* B :=
pequiv_of_pmap (pcast p) !is_equiv_tr
definition peconcat_eq {A B C : Type*} (p : A ≃* B) (q : B = C) : A ≃* C :=
p ⬝e* pequiv_of_eq q
definition eq_peconcat {A B C : Type*} (p : A = B) (q : B ≃* C) : A ≃* C :=
pequiv_of_eq p ⬝e* q
definition eq_of_pequiv {A B : Type*} (p : A ≃* B) : A = B :=
pType_eq (equiv_of_pequiv p) !respect_pt
definition peap {A B : Type*} (F : Type* → Type*) (p : A ≃* B) : F A ≃* F B :=
pequiv_of_pmap (pcast (ap F (eq_of_pequiv p))) begin cases eq_of_pequiv p, apply is_equiv_id end
infix ` ⬝e*p `:75 := peconcat_eq
infix ` ⬝pe* `:75 := eq_peconcat
definition pequiv_of_eq_commute [constructor] {A : Type} {B C : A → Type*} (f : Πa, B a →* C a)
{a₁ a₂ : A} (p : a₁ = a₂) : pequiv_of_eq (ap C p) ∘* f a₁ ~* f a₂ ∘* pequiv_of_eq (ap B p) :=
pcast_commute f p
/-
the theorem pequiv_eq, which gives a condition for two pointed equivalences are equal
is in types.equiv to avoid circular imports
-/
/- computation rules of pointed homotopies, possibly combined with pointed equivalences -/
definition pwhisker_left [constructor] (h : B →* C) (p : f ~* g) : h ∘* f ~* h ∘* g :=
phomotopy.mk (λa, ap h (p a))
abstract begin
induction A, induction B, induction C,
induction f with f pf, induction g with g pg, induction h with h ph,
induction p with p p', esimp at *, induction ph, induction pg, induction p', reflexivity
end end
definition pwhisker_right [constructor] (h : C →* A) (p : f ~* g) : f ∘* h ~* g ∘* h :=
phomotopy.mk (λa, p (h a))
abstract begin
induction A, induction B, induction C,
induction f with f pf, induction g with g pg, induction h with h ph,
induction p with p p', esimp at *, induction ph, induction pg, induction p', esimp,
exact !idp_con⁻¹
end end
definition pconcat2 [constructor] {A B C : Type*} {h i : B →* C} {f g : A →* B}
(q : h ~* i) (p : f ~* g) : h ∘* f ~* i ∘* g :=
pwhisker_left _ p ⬝* pwhisker_right _ q
definition pleft_inv (f : A ≃* B) : f⁻¹ᵉ* ∘* f ~* pid A :=
phomotopy.mk (left_inv f)
abstract begin
esimp, symmetry, apply con_inv_cancel_left
end end
definition pright_inv (f : A ≃* B) : f ∘* f⁻¹ᵉ* ~* pid B :=
phomotopy.mk (right_inv f)
abstract begin
induction f with f H p, esimp,
rewrite [ap_con, +ap_inv, -adj f, -ap_compose],
note q := natural_square (right_inv f) p,
rewrite [ap_id at q],
apply eq_bot_of_square,
exact q
end end
definition pcancel_left (f : B ≃* C) {g h : A →* B} (p : f ∘* g ~* f ∘* h) : g ~* h :=
begin
refine _⁻¹* ⬝* pwhisker_left f⁻¹ᵉ* p ⬝* _:
refine !passoc⁻¹* ⬝* _:
refine pwhisker_right _ (pleft_inv f) ⬝* _:
apply pid_pcompose
end
definition pcancel_right (f : A ≃* B) {g h : B →* C} (p : g ∘* f ~* h ∘* f) : g ~* h :=
begin
refine _⁻¹* ⬝* pwhisker_right f⁻¹ᵉ* p ⬝* _:
refine !passoc ⬝* _:
refine pwhisker_left _ (pright_inv f) ⬝* _:
apply pcompose_pid
end
definition phomotopy_pinv_right_of_phomotopy {f : A ≃* B} {g : B →* C} {h : A →* C}
(p : g ∘* f ~* h) : g ~* h ∘* f⁻¹ᵉ* :=
begin
refine _ ⬝* pwhisker_right _ p, symmetry,
refine !passoc ⬝* _,
refine pwhisker_left _ (pright_inv f) ⬝* _,
apply pcompose_pid
end
definition phomotopy_of_pinv_right_phomotopy {f : B ≃* A} {g : B →* C} {h : A →* C}
(p : g ∘* f⁻¹ᵉ* ~* h) : g ~* h ∘* f :=
begin
refine _ ⬝* pwhisker_right _ p, symmetry,
refine !passoc ⬝* _,
refine pwhisker_left _ (pleft_inv f) ⬝* _,
apply pcompose_pid
end
definition pinv_right_phomotopy_of_phomotopy {f : A ≃* B} {g : B →* C} {h : A →* C}
(p : h ~* g ∘* f) : h ∘* f⁻¹ᵉ* ~* g :=
(phomotopy_pinv_right_of_phomotopy p⁻¹*)⁻¹*
definition phomotopy_of_phomotopy_pinv_right {f : B ≃* A} {g : B →* C} {h : A →* C}
(p : h ~* g ∘* f⁻¹ᵉ*) : h ∘* f ~* g :=
(phomotopy_of_pinv_right_phomotopy p⁻¹*)⁻¹*
definition phomotopy_pinv_left_of_phomotopy {f : B ≃* C} {g : A →* B} {h : A →* C}
(p : f ∘* g ~* h) : g ~* f⁻¹ᵉ* ∘* h :=
begin
refine _ ⬝* pwhisker_left _ p, symmetry,
refine !passoc⁻¹* ⬝* _,
refine pwhisker_right _ (pleft_inv f) ⬝* _,
apply pid_pcompose
end
definition phomotopy_of_pinv_left_phomotopy {f : C ≃* B} {g : A →* B} {h : A →* C}
(p : f⁻¹ᵉ* ∘* g ~* h) : g ~* f ∘* h :=
begin
refine _ ⬝* pwhisker_left _ p, symmetry,
refine !passoc⁻¹* ⬝* _,
refine pwhisker_right _ (pright_inv f) ⬝* _,
apply pid_pcompose
end
definition pinv_left_phomotopy_of_phomotopy {f : B ≃* C} {g : A →* B} {h : A →* C}
(p : h ~* f ∘* g) : f⁻¹ᵉ* ∘* h ~* g :=
(phomotopy_pinv_left_of_phomotopy p⁻¹*)⁻¹*
definition phomotopy_of_phomotopy_pinv_left {f : C ≃* B} {g : A →* B} {h : A →* C}
(p : h ~* f⁻¹ᵉ* ∘* g) : f ∘* h ~* g :=
(phomotopy_of_pinv_left_phomotopy p⁻¹*)⁻¹*
definition pcompose2 {A B C : Type*} {g g' : B →* C} {f f' : A →* B} (p : f ~* f') (q : g ~* g') :
g ∘* f ~* g' ∘* f' :=
pwhisker_right f q ⬝* pwhisker_left g' p
infixr ` ◾* `:80 := pcompose2
definition phomotopy_pinv_of_phomotopy_pid {A B : Type*} {f : A →* B} {g : B ≃* A}
(p : g ∘* f ~* pid A) : f ~* g⁻¹ᵉ* :=
phomotopy_pinv_left_of_phomotopy p ⬝* !pcompose_pid
definition phomotopy_pinv_of_phomotopy_pid' {A B : Type*} {f : A →* B} {g : B ≃* A}
(p : f ∘* g ~* pid B) : f ~* g⁻¹ᵉ* :=
phomotopy_pinv_right_of_phomotopy p ⬝* !pid_pcompose
definition pinv_phomotopy_of_pid_phomotopy {A B : Type*} {f : A →* B} {g : B ≃* A}
(p : pid A ~* g ∘* f) : g⁻¹ᵉ* ~* f :=
(phomotopy_pinv_of_phomotopy_pid p⁻¹*)⁻¹*
definition pinv_phomotopy_of_pid_phomotopy' {A B : Type*} {f : A →* B} {g : B ≃* A}
(p : pid B ~* f ∘* g) : g⁻¹ᵉ* ~* f :=
(phomotopy_pinv_of_phomotopy_pid' p⁻¹*)⁻¹*
definition pinv_pinv {A B : Type*} (f : A ≃* B) : (f⁻¹ᵉ*)⁻¹ᵉ* ~* f :=
(phomotopy_pinv_of_phomotopy_pid (pleft_inv f))⁻¹*
definition pinv2 {A B : Type*} {f f' : A ≃* B} (p : f ~* f') : f⁻¹ᵉ* ~* f'⁻¹ᵉ* :=
phomotopy_pinv_of_phomotopy_pid (pinv_right_phomotopy_of_phomotopy (!pid_pcompose ⬝* p)⁻¹*)
postfix [parsing_only] `⁻²*`:(max+10) := pinv2
definition trans_pinv {A B C : Type*} (f : A ≃* B) (g : B ≃* C) :
(f ⬝e* g)⁻¹ᵉ* ~* f⁻¹ᵉ* ∘* g⁻¹ᵉ* :=
begin
refine (phomotopy_pinv_of_phomotopy_pid _)⁻¹*,
refine !passoc ⬝* _,
refine pwhisker_left _ (!passoc⁻¹* ⬝* pwhisker_right _ !pright_inv ⬝* !pid_pcompose) ⬝* _,
apply pright_inv
end
definition pinv_trans_pinv_left {A B C : Type*} (f : B ≃* A) (g : B ≃* C) :
(f⁻¹ᵉ* ⬝e* g)⁻¹ᵉ* ~* f ∘* g⁻¹ᵉ* :=
!trans_pinv ⬝* pwhisker_right _ !pinv_pinv
definition pinv_trans_pinv_right {A B C : Type*} (f : A ≃* B) (g : C ≃* B) :
(f ⬝e* g⁻¹ᵉ*)⁻¹ᵉ* ~* f⁻¹ᵉ* ∘* g :=
!trans_pinv ⬝* pwhisker_left _ !pinv_pinv
definition pinv_trans_pinv_pinv {A B C : Type*} (f : B ≃* A) (g : C ≃* B) :
(f⁻¹ᵉ* ⬝e* g⁻¹ᵉ*)⁻¹ᵉ* ~* f ∘* g :=
!trans_pinv ⬝* !pinv_pinv ◾* !pinv_pinv
definition pinv_pcompose_cancel_left {A B C : Type*} (g : B ≃* C) (f : A →* B) :
g⁻¹ᵉ* ∘* (g ∘* f) ~* f :=
!passoc⁻¹* ⬝* pwhisker_right f !pleft_inv ⬝* !pid_pcompose
definition pcompose_pinv_cancel_left {A B C : Type*} (g : C ≃* B) (f : A →* B) :
g ∘* (g⁻¹ᵉ* ∘* f) ~* f :=
!passoc⁻¹* ⬝* pwhisker_right f !pright_inv ⬝* !pid_pcompose
definition pinv_pcompose_cancel_right {A B C : Type*} (g : B →* C) (f : B ≃* A) :
(g ∘* f⁻¹ᵉ*) ∘* f ~* g :=
!passoc ⬝* pwhisker_left g !pleft_inv ⬝* !pcompose_pid
definition pcompose_pinv_cancel_right {A B C : Type*} (g : B →* C) (f : A ≃* B) :
(g ∘* f) ∘* f⁻¹ᵉ* ~* g :=
!passoc ⬝* pwhisker_left g !pright_inv ⬝* !pcompose_pid
/- pointed equivalences between particular pointed types -/
definition loopn_pequiv_loopn [constructor] (n : ℕ) (f : A ≃* B) : Ω[n] A ≃* Ω[n] B :=
pequiv.MK2 (apn n f) (apn n f⁻¹ᵉ*)
abstract begin
induction n with n IH,
{ apply pleft_inv},
{ replace nat.succ n with n + 1,
rewrite [+apn_succ],
refine !ap1_pcompose⁻¹* ⬝* _,
refine ap1_phomotopy IH ⬝* _,
apply ap1_pid}
end end
abstract begin
induction n with n IH,
{ apply pright_inv},
{ replace nat.succ n with n + 1,
rewrite [+apn_succ],
refine !ap1_pcompose⁻¹* ⬝* _,
refine ap1_phomotopy IH ⬝* _,
apply ap1_pid}
end end
definition loop_pequiv_loop [constructor] (f : A ≃* B) : Ω A ≃* Ω B :=
loopn_pequiv_loopn 1 f
definition to_pmap_loopn_pequiv_loopn [constructor] (n : ℕ) (f : A ≃* B)
: loopn_pequiv_loopn n f ~* apn n f :=
!to_pmap_pequiv_MK2
definition to_pinv_loopn_pequiv_loopn [constructor] (n : ℕ) (f : A ≃* B)
: (loopn_pequiv_loopn n f)⁻¹ᵉ* ~* apn n f⁻¹ᵉ* :=
!to_pinv_pequiv_MK2
definition loopn_pequiv_loopn_con (n : ℕ) (f : A ≃* B) (p q : Ω[n+1] A)
: loopn_pequiv_loopn (n+1) f (p ⬝ q) =
loopn_pequiv_loopn (n+1) f p ⬝ loopn_pequiv_loopn (n+1) f q :=
ap1_con (loopn_pequiv_loopn n f) p q
definition loop_pequiv_loop_con {A B : Type*} (f : A ≃* B) (p q : Ω A)
: loop_pequiv_loop f (p ⬝ q) = loop_pequiv_loop f p ⬝ loop_pequiv_loop f q :=
loopn_pequiv_loopn_con 0 f p q
definition loopn_pequiv_loopn_rfl (n : ℕ) (A : Type*) :
loopn_pequiv_loopn n (pequiv.refl A) ~* pequiv.refl (Ω[n] A) :=
begin
exact !to_pmap_loopn_pequiv_loopn ⬝* apn_pid n,
end
definition loop_pequiv_loop_rfl (A : Type*) :
loop_pequiv_loop (pequiv.refl A) ~* pequiv.refl (Ω A) :=
loopn_pequiv_loopn_rfl 1 A
definition pmap_functor [constructor] {A A' B B' : Type*} (f : A' →* A) (g : B →* B') :
ppmap A B →* ppmap A' B' :=
pmap.mk (λh, g ∘* h ∘* f)
abstract begin
fapply pmap_eq,
{ esimp, intro a, exact respect_pt g},
{ rewrite [▸*, ap_constant], apply idp_con}
end end
definition pequiv_pinverse (A : Type*) : Ω A ≃* Ω A :=
pequiv_of_pmap pinverse !is_equiv_eq_inverse
definition pequiv_of_eq_pt [constructor] {A : Type} {a a' : A} (p : a = a') :
pointed.MK A a ≃* pointed.MK A a' :=
pequiv_of_pmap (pmap_of_eq_pt p) !is_equiv_id
definition pointed_eta_pequiv [constructor] (A : Type*) : A ≃* pointed.MK A pt :=
pequiv.mk id !is_equiv_id idp
/- every pointed map is homotopic to one of the form `pmap_of_map _ _`, up to some
pointed equivalences -/
definition phomotopy_pmap_of_map {A B : Type*} (f : A →* B) :
(pointed_eta_pequiv B ⬝e* (pequiv_of_eq_pt (respect_pt f))⁻¹ᵉ*) ∘* f ∘*
(pointed_eta_pequiv A)⁻¹ᵉ* ~* pmap_of_map f pt :=
begin
fapply phomotopy.mk,
{ reflexivity},
{ esimp [pequiv.trans, pequiv.symm],
exact !con.right_inv⁻¹ ⬝ ((!idp_con⁻¹ ⬝ !ap_id⁻¹) ◾ (!ap_id⁻¹⁻² ⬝ !idp_con⁻¹)), }
end
/- -- TODO
definition pmap_pequiv_pmap {A A' B B' : Type*} (f : A ≃* A') (g : B ≃* B') :
ppmap A B ≃* ppmap A' B' :=
pequiv.MK (pmap_functor f⁻¹ᵉ* g) (pmap_functor f g⁻¹ᵉ*)
abstract begin
intro a, esimp, apply pmap_eq,
{ esimp, },
{ }
end end
abstract begin
end end
-/
/- properties of iterated loop space -/
variable (A)
definition loopn_succ_in (n : ℕ) : Ω[succ n] A ≃* Ω[n] (Ω A) :=
begin
induction n with n IH,
{ reflexivity},
{ exact loop_pequiv_loop IH}
end
definition loopn_add (n m : ℕ) : Ω[n] (Ω[m] A) ≃* Ω[m+n] (A) :=
begin
induction n with n IH,
{ reflexivity},
{ exact loop_pequiv_loop IH}
end
definition loopn_succ_out (n : ℕ) : Ω[succ n] A ≃* Ω(Ω[n] A) :=
by reflexivity
variable {A}
theorem loopn_succ_in_con {n : ℕ} (p q : Ω[succ (succ n)] A) :
loopn_succ_in A (succ n) (p ⬝ q) =
loopn_succ_in A (succ n) p ⬝ loopn_succ_in A (succ n) q :=
!loop_pequiv_loop_con
definition loopn_loop_irrel (p : point A = point A) : Ω(pointed.Mk p) = Ω[2] A :=
begin
intros, fapply pType_eq,
{ esimp, transitivity _,
apply eq_equiv_fn_eq_of_equiv (equiv_eq_closed_right _ p⁻¹),
esimp, apply eq_equiv_eq_closed, apply con.right_inv, apply con.right_inv},
{ esimp, apply con.left_inv}
end
definition loopn_space_loop_irrel (n : ℕ) (p : point A = point A)
: Ω[succ n](pointed.Mk p) = Ω[succ (succ n)] A :> pType :=
calc
Ω[succ n](pointed.Mk p) = Ω[n](Ω (pointed.Mk p)) : eq_of_pequiv !loopn_succ_in
... = Ω[n] (Ω[2] A) : loopn_loop_irrel
... = Ω[2+n] A : eq_of_pequiv !loopn_add
... = Ω[n+2] A : by rewrite [algebra.add.comm]
definition apn_succ_phomotopy_in (n : ℕ) (f : A →* B) :
loopn_succ_in B n ∘* Ω→[n + 1] f ~* Ω→[n] (Ω→ f) ∘* loopn_succ_in A n :=
begin
induction n with n IH,
{ reflexivity},
{ exact !ap1_pcompose⁻¹* ⬝* ap1_phomotopy IH ⬝* !ap1_pcompose}
end
definition loopn_succ_in_natural {A B : Type*} (n : ℕ) (f : A →* B) :
loopn_succ_in B n ∘* Ω→[n+1] f ~* Ω→[n] (Ω→ f) ∘* loopn_succ_in A n :=
!apn_succ_phomotopy_in
definition loopn_succ_in_inv_natural {A B : Type*} (n : ℕ) (f : A →* B) :
Ω→[n + 1] f ∘* (loopn_succ_in A n)⁻¹ᵉ* ~* (loopn_succ_in B n)⁻¹ᵉ* ∘* Ω→[n] (Ω→ f):=
begin
apply pinv_right_phomotopy_of_phomotopy,
refine _ ⬝* !passoc⁻¹*,
apply phomotopy_pinv_left_of_phomotopy,
apply apn_succ_phomotopy_in
end
/- properties of ppmap, the pointed type of pointed maps -/
definition ppcompose_left [constructor] (g : B →* C) : ppmap A B →* ppmap A C :=
pmap.mk (pcompose g) (eq_of_phomotopy (phomotopy.mk (λa, respect_pt g) (idp_con _)⁻¹))
definition is_pequiv_ppcompose_left [instance] [constructor] (g : B →* C) [H : is_equiv g] :
is_equiv (@ppcompose_left A B C g) :=
begin
fapply is_equiv.adjointify,
{ exact (ppcompose_left (pequiv_of_pmap g H)⁻¹ᵉ*) },
all_goals (intros f; esimp; apply eq_of_phomotopy),
{ exact calc g ∘* ((pequiv_of_pmap g H)⁻¹ᵉ* ∘* f)
~* (g ∘* (pequiv_of_pmap g H)⁻¹ᵉ*) ∘* f : passoc
... ~* pid _ ∘* f : pwhisker_right f (pright_inv (pequiv_of_pmap g H))
... ~* f : pid_pcompose f },
{ exact calc (pequiv_of_pmap g H)⁻¹ᵉ* ∘* (g ∘* f)
~* ((pequiv_of_pmap g H)⁻¹ᵉ* ∘* g) ∘* f : passoc
... ~* pid _ ∘* f : pwhisker_right f (pleft_inv (pequiv_of_pmap g H))
... ~* f : pid_pcompose f }
end
definition pequiv_ppcompose_left [constructor] (g : B ≃* C) : ppmap A B ≃* ppmap A C :=
pequiv_of_pmap (ppcompose_left g) _
definition pcompose_pconst [constructor] (f : B →* C) : f ∘* pconst A B ~* pconst A C :=
phomotopy.mk (λa, respect_pt f) (idp_con _)⁻¹
definition pconst_pcompose [constructor] (f : A →* B) : pconst B C ∘* f ~* pconst A C :=
phomotopy.mk (λa, rfl) (ap_constant _ _)⁻¹
definition ppcompose_right [constructor] (f : A →* B) : ppmap B C →* ppmap A C :=
begin
fconstructor,
{ intro g, exact g ∘* f },
{ apply eq_of_phomotopy, esimp, apply pconst_pcompose }
end
definition pequiv_ppcompose_right [constructor] (f : A ≃* B) : ppmap B C ≃* ppmap A C :=
begin
fapply pequiv.MK,
{ exact ppcompose_right f },
{ exact ppcompose_right f⁻¹ᵉ* },
{ intro g, apply eq_of_phomotopy, refine !passoc ⬝* _,
refine pwhisker_left g !pright_inv ⬝* !pcompose_pid, },
{ intro g, apply eq_of_phomotopy, refine !passoc ⬝* _,
refine pwhisker_left g !pleft_inv ⬝* !pcompose_pid, },
end
definition loop_pmap_commute (A B : Type*) : Ω(ppmap A B) ≃* (ppmap A (Ω B)) :=
pequiv_of_equiv
(calc Ω(ppmap A B) ≃ (pconst A B ~* pconst A B) : pmap_eq_equiv _ _
... ≃ Σ(p : pconst A B ~ pconst A B), p pt ⬝ rfl = rfl : phomotopy.sigma_char
... ≃ (A →* Ω B) : pmap.sigma_char)
(by reflexivity)
definition papply [constructor] {A : Type*} (B : Type*) (a : A) : ppmap A B →* B :=
pmap.mk (λ(f : A →* B), f a) idp
definition papply_pcompose [constructor] {A : Type*} (B : Type*) (a : A) : ppmap A B →* B :=
pmap.mk (λ(f : A →* B), f a) idp
definition pmap_pbool_pequiv [constructor] (B : Type*) : ppmap pbool B ≃* B :=
begin
fapply pequiv.MK,
{ exact papply B tt },
{ exact pbool_pmap },
{ intro f, fapply pmap_eq,
{ intro b, cases b, exact !respect_pt⁻¹, reflexivity },
{ exact !con.left_inv⁻¹ }},
{ intro b, reflexivity },
end
definition papn_pt [constructor] (n : ℕ) (A B : Type*) : ppmap A B →* ppmap (Ω[n] A) (Ω[n] B) :=
pmap.mk (λf, apn n f) (eq_of_phomotopy !apn_pconst)
definition papn_fun [constructor] {n : ℕ} {A : Type*} (B : Type*) (p : Ω[n] A) :
ppmap A B →* Ω[n] B :=
papply _ p ∘* papn_pt n A B
end pointed
|
0c3cc8897f3d12f37b63cd6f0141cee5e5abb72e
|
302c785c90d40ad3d6be43d33bc6a558354cc2cf
|
/src/algebra/module/linear_map.lean
|
408f1b028bbce2d9c8120298e8b60022c85b2b64
|
[
"Apache-2.0"
] |
permissive
|
ilitzroth/mathlib
|
ea647e67f1fdfd19a0f7bdc5504e8acec6180011
|
5254ef14e3465f6504306132fe3ba9cec9ffff16
|
refs/heads/master
| 1,680,086,661,182
| 1,617,715,647,000
| 1,617,715,647,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 18,981
|
lean
|
/-
Copyright (c) 2020 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Nathaniel Thomas, Jeremy Avigad, Johannes Hölzl, Mario Carneiro, Anne Baanen
-/
import algebra.group.hom
import algebra.module.basic
import algebra.group_action_hom
/-!
# Linear maps and linear equivalences
In this file we define
* `linear_map R M M₂`, `M →ₗ[R] M₂` : a linear map between two R-`semimodule`s.
* `is_linear_map R f` : predicate saying that `f : M → M₂` is a linear map.
* `linear_equiv R M ₂`, `M ≃ₗ[R] M₂`: an invertible linear map
## Tags
linear map, linear equiv, linear equivalences, linear isomorphism, linear isomorphic
-/
open function
open_locale big_operators
universes u u' v w x y z
variables {R : Type u} {k : Type u'} {S : Type v} {M : Type w} {M₂ : Type x} {M₃ : Type y}
{ι : Type z}
/-- A map `f` between semimodules over a semiring is linear if it satisfies the two properties
`f (x + y) = f x + f y` and `f (c • x) = c • f x`. The predicate `is_linear_map R f` asserts this
property. A bundled version is available with `linear_map`, and should be favored over
`is_linear_map` most of the time. -/
structure is_linear_map (R : Type u) {M : Type v} {M₂ : Type w}
[semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [semimodule R M] [semimodule R M₂]
(f : M → M₂) : Prop :=
(map_add : ∀ x y, f (x + y) = f x + f y)
(map_smul : ∀ (c : R) x, f (c • x) = c • f x)
section
set_option old_structure_cmd true
/-- A map `f` between semimodules over a semiring is linear if it satisfies the two properties
`f (x + y) = f x + f y` and `f (c • x) = c • f x`. Elements of `linear_map R M M₂` (available under
the notation `M →ₗ[R] M₂`) are bundled versions of such maps. An unbundled version is available with
the predicate `is_linear_map`, but it should be avoided most of the time. -/
structure linear_map (R : Type u) (M : Type v) (M₂ : Type w)
[semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [semimodule R M] [semimodule R M₂]
extends add_hom M M₂, M →[R] M₂
end
/-- The `add_hom` underlying a `linear_map`. -/
add_decl_doc linear_map.to_add_hom
/-- The `mul_action_hom` underlying a `linear_map`. -/
add_decl_doc linear_map.to_mul_action_hom
infixr ` →ₗ `:25 := linear_map _
notation M ` →ₗ[`:25 R:25 `] `:0 M₂:0 := linear_map R M M₂
namespace linear_map
section add_comm_monoid
variables [semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
section
variables [semimodule R M] [semimodule R M₂]
/-- The `distrib_mul_action_hom` underlying a `linear_map`. -/
def to_distrib_mul_action_hom (f : M →ₗ[R] M₂) : distrib_mul_action_hom R M M₂ :=
{ map_zero' := zero_smul R (0 : M) ▸ zero_smul R (f.to_fun 0) ▸ f.map_smul' 0 0, ..f }
instance : has_coe_to_fun (M →ₗ[R] M₂) := ⟨_, to_fun⟩
initialize_simps_projections linear_map (to_fun → apply)
@[simp] lemma coe_mk (f : M → M₂) (h₁ h₂) :
((linear_map.mk f h₁ h₂ : M →ₗ[R] M₂) : M → M₂) = f := rfl
/-- Identity map as a `linear_map` -/
def id : M →ₗ[R] M :=
⟨id, λ _ _, rfl, λ _ _, rfl⟩
lemma id_apply (x : M) :
@id R M _ _ _ x = x := rfl
@[simp, norm_cast] lemma id_coe : ((linear_map.id : M →ₗ[R] M) : M → M) = _root_.id :=
by { ext x, refl }
end
section
variables [semimodule R M] [semimodule R M₂]
variables (f g : M →ₗ[R] M₂)
@[simp] lemma to_fun_eq_coe : f.to_fun = ⇑f := rfl
theorem is_linear : is_linear_map R f := ⟨f.2, f.3⟩
variables {f g}
theorem coe_injective : injective (λ f : M →ₗ[R] M₂, show M → M₂, from f) :=
by rintro ⟨f, _⟩ ⟨g, _⟩ ⟨h⟩; congr
@[ext] theorem ext (H : ∀ x, f x = g x) : f = g :=
coe_injective $ funext H
protected lemma congr_arg : Π {x x' : M}, x = x' → f x = f x'
| _ _ rfl := rfl
/-- If two linear maps are equal, they are equal at each point. -/
protected lemma congr_fun (h : f = g) (x : M) : f x = g x := h ▸ rfl
theorem ext_iff : f = g ↔ ∀ x, f x = g x :=
⟨by { rintro rfl x, refl }, ext⟩
@[simp] lemma mk_coe (f : M →ₗ[R] M₂) (h₁ h₂) :
(linear_map.mk f h₁ h₂ : M →ₗ[R] M₂) = f := ext $ λ _, rfl
variables (f g)
@[simp] lemma map_add (x y : M) : f (x + y) = f x + f y := f.map_add' x y
@[simp] lemma map_smul (c : R) (x : M) : f (c • x) = c • f x := f.map_smul' c x
@[simp] lemma map_zero : f 0 = 0 :=
f.to_distrib_mul_action_hom.map_zero
variables (M M₂)
/--
A typeclass for `has_scalar` structures which can be moved through a `linear_map`.
This typeclass is generated automatically from a `is_scalar_tower` instance, but exists so that
we can also add an instance for `add_comm_group.int_module`, allowing `z •` to be moved even if
`R` does not support negation.
-/
class compatible_smul (R S : Type*) [semiring S] [has_scalar R M]
[semimodule S M] [has_scalar R M₂] [semimodule S M₂] :=
(map_smul : ∀ (f : M →ₗ[S] M₂) (c : R) (x : M), f (c • x) = c • f x)
variables {M M₂}
@[priority 100]
instance compatible_smul.is_scalar_tower
{R S : Type*} [semiring S] [has_scalar R S]
[has_scalar R M] [semimodule S M] [is_scalar_tower R S M]
[has_scalar R M₂] [semimodule S M₂] [is_scalar_tower R S M₂] : compatible_smul M M₂ R S :=
⟨λ f c x, by rw [← smul_one_smul S c x, ← smul_one_smul S c (f x), map_smul]⟩
@[simp, priority 900]
lemma map_smul_of_tower {R S : Type*} [semiring S] [has_scalar R M]
[semimodule S M] [has_scalar R M₂] [semimodule S M₂]
[compatible_smul M M₂ R S] (f : M →ₗ[S] M₂) (c : R) (x : M) :
f (c • x) = c • f x :=
compatible_smul.map_smul f c x
instance : is_add_monoid_hom f :=
{ map_add := map_add f,
map_zero := map_zero f }
/-- convert a linear map to an additive map -/
def to_add_monoid_hom : M →+ M₂ :=
{ to_fun := f,
map_zero' := f.map_zero,
map_add' := f.map_add }
@[simp] lemma to_add_monoid_hom_coe : ⇑f.to_add_monoid_hom = f := rfl
variable (R)
/-- If `M` and `M₂` are both `R`-semimodules and `S`-semimodules and `R`-semimodule structures
are defined by an action of `R` on `S` (formally, we have two scalar towers), then any `S`-linear
map from `M` to `M₂` is `R`-linear.
See also `linear_map.map_smul_of_tower`. -/
def restrict_scalars {S : Type*} [semiring S] [semimodule S M] [semimodule S M₂]
[compatible_smul M M₂ R S] (f : M →ₗ[S] M₂) : M →ₗ[R] M₂ :=
{ to_fun := f,
map_add' := f.map_add,
map_smul' := f.map_smul_of_tower }
variable {R}
@[simp] lemma map_sum {ι} {t : finset ι} {g : ι → M} :
f (∑ i in t, g i) = (∑ i in t, f (g i)) :=
f.to_add_monoid_hom.map_sum _ _
theorem to_add_monoid_hom_injective :
function.injective (to_add_monoid_hom : (M →ₗ[R] M₂) → (M →+ M₂)) :=
λ f g h, ext $ add_monoid_hom.congr_fun h
/-- If two `R`-linear maps from `R` are equal on `1`, then they are equal. -/
@[ext] theorem ext_ring {f g : R →ₗ[R] M} (h : f 1 = g 1) : f = g :=
ext $ λ x, by rw [← mul_one x, ← smul_eq_mul, f.map_smul, g.map_smul, h]
theorem ext_ring_iff {f g : R →ₗ[R] M} : f = g ↔ f 1 = g 1 :=
⟨λ h, h ▸ rfl, ext_ring⟩
end
section
variables {semimodule_M : semimodule R M} {semimodule_M₂ : semimodule R M₂}
{semimodule_M₃ : semimodule R M₃}
variables (f : M₂ →ₗ[R] M₃) (g : M →ₗ[R] M₂)
/-- Composition of two linear maps is a linear map -/
def comp : M →ₗ[R] M₃ := ⟨f ∘ g, by simp, by simp⟩
lemma comp_apply (x : M) : f.comp g x = f (g x) := rfl
@[simp, norm_cast] lemma coe_comp : (f.comp g : M → M₃) = f ∘ g := rfl
@[simp] theorem comp_id : f.comp id = f :=
linear_map.ext $ λ x, rfl
@[simp] theorem id_comp : id.comp f = f :=
linear_map.ext $ λ x, rfl
end
/-- If a function `g` is a left and right inverse of a linear map `f`, then `g` is linear itself. -/
def inverse [semimodule R M] [semimodule R M₂]
(f : M →ₗ[R] M₂) (g : M₂ → M) (h₁ : left_inverse g f) (h₂ : right_inverse g f) :
M₂ →ₗ[R] M :=
by dsimp [left_inverse, function.right_inverse] at h₁ h₂; exact
⟨g, λ x y, by { rw [← h₁ (g (x + y)), ← h₁ (g x + g y)]; simp [h₂] },
λ a b, by { rw [← h₁ (g (a • b)), ← h₁ (a • g b)]; simp [h₂] }⟩
end add_comm_monoid
section add_comm_group
variables [semiring R] [add_comm_group M] [add_comm_group M₂]
variables {semimodule_M : semimodule R M} {semimodule_M₂ : semimodule R M₂}
variables (f : M →ₗ[R] M₂)
@[simp] lemma map_neg (x : M) : f (- x) = - f x :=
f.to_add_monoid_hom.map_neg x
@[simp] lemma map_sub (x y : M) : f (x - y) = f x - f y :=
f.to_add_monoid_hom.map_sub x y
instance : is_add_group_hom f :=
{ map_add := map_add f }
instance compatible_smul.int_module
{S : Type*} [semiring S] [semimodule ℤ M]
[semimodule S M] [semimodule ℤ M₂] [semimodule S M₂] : compatible_smul M M₂ ℤ S :=
⟨λ f c x, begin
induction c using int.induction_on,
case hz : { simp },
case hp : n ih { simpa [add_smul] using ih },
case hn : n ih { simpa [sub_smul] using ih }
end⟩
end add_comm_group
end linear_map
namespace is_linear_map
section add_comm_monoid
variables [semiring R] [add_comm_monoid M] [add_comm_monoid M₂]
variables [semimodule R M] [semimodule R M₂]
include R
/-- Convert an `is_linear_map` predicate to a `linear_map` -/
def mk' (f : M → M₂) (H : is_linear_map R f) : M →ₗ M₂ := ⟨f, H.1, H.2⟩
@[simp] theorem mk'_apply {f : M → M₂} (H : is_linear_map R f) (x : M) :
mk' f H x = f x := rfl
lemma is_linear_map_smul {R M : Type*} [comm_semiring R] [add_comm_monoid M] [semimodule R M]
(c : R) :
is_linear_map R (λ (z : M), c • z) :=
begin
refine is_linear_map.mk (smul_add c) _,
intros _ _,
simp only [smul_smul, mul_comm]
end
lemma is_linear_map_smul' {R M : Type*} [semiring R] [add_comm_monoid M] [semimodule R M] (a : M) :
is_linear_map R (λ (c : R), c • a) :=
is_linear_map.mk (λ x y, add_smul x y a) (λ x y, mul_smul x y a)
variables {f : M → M₂} (lin : is_linear_map R f)
include M M₂ lin
lemma map_zero : f (0 : M) = (0 : M₂) := (lin.mk' f).map_zero
end add_comm_monoid
section add_comm_group
variables [semiring R] [add_comm_group M] [add_comm_group M₂]
variables [semimodule R M] [semimodule R M₂]
include R
lemma is_linear_map_neg :
is_linear_map R (λ (z : M), -z) :=
is_linear_map.mk neg_add (λ x y, (smul_neg x y).symm)
variables {f : M → M₂} (lin : is_linear_map R f)
include M M₂ lin
lemma map_neg (x : M) : f (- x) = - f x := (lin.mk' f).map_neg x
lemma map_sub (x y) : f (x - y) = f x - f y := (lin.mk' f).map_sub x y
end add_comm_group
end is_linear_map
/-- Linear endomorphisms of a module, with associated ring structure
`linear_map.endomorphism_semiring` and algebra structure `module.endomorphism_algebra`. -/
abbreviation module.End (R : Type u) (M : Type v)
[semiring R] [add_comm_monoid M] [semimodule R M] := M →ₗ[R] M
/-- Reinterpret an additive homomorphism as a `ℤ`-linear map. -/
def add_monoid_hom.to_int_linear_map [add_comm_group M] [add_comm_group M₂] (f : M →+ M₂) :
M →ₗ[ℤ] M₂ :=
⟨f, f.map_add, f.map_int_module_smul⟩
/-- Reinterpret an additive homomorphism as a `ℚ`-linear map. -/
def add_monoid_hom.to_rat_linear_map [add_comm_group M] [vector_space ℚ M]
[add_comm_group M₂] [vector_space ℚ M₂] (f : M →+ M₂) :
M →ₗ[ℚ] M₂ :=
{ map_smul' := f.map_rat_module_smul, ..f }
/-! ### Linear equivalences -/
section
set_option old_structure_cmd true
/-- A linear equivalence is an invertible linear map. -/
@[nolint has_inhabited_instance]
structure linear_equiv (R : Type u) (M : Type v) (M₂ : Type w)
[semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [semimodule R M] [semimodule R M₂]
extends M →ₗ[R] M₂, M ≃+ M₂
end
attribute [nolint doc_blame] linear_equiv.to_linear_map
attribute [nolint doc_blame] linear_equiv.to_add_equiv
infix ` ≃ₗ ` := linear_equiv _
notation M ` ≃ₗ[`:50 R `] ` M₂ := linear_equiv R M M₂
namespace linear_equiv
section add_comm_monoid
variables {M₄ : Type*}
variables [semiring R]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃] [add_comm_monoid M₄]
section
variables [semimodule R M] [semimodule R M₂] [semimodule R M₃]
include R
instance : has_coe (M ≃ₗ[R] M₂) (M →ₗ[R] M₂) := ⟨to_linear_map⟩
-- see Note [function coercion]
instance : has_coe_to_fun (M ≃ₗ[R] M₂) := ⟨_, λ f, f.to_fun⟩
@[simp] lemma coe_mk {to_fun inv_fun map_add map_smul left_inv right_inv } :
⇑(⟨to_fun, map_add, map_smul, inv_fun, left_inv, right_inv⟩ : M ≃ₗ[R] M₂) = to_fun :=
rfl
-- This exists for compatibility, previously `≃ₗ[R]` extended `≃` instead of `≃+`.
@[nolint doc_blame]
def to_equiv : (M ≃ₗ[R] M₂) → M ≃ M₂ := λ f, f.to_add_equiv.to_equiv
lemma to_equiv_injective : function.injective (to_equiv : (M ≃ₗ[R] M₂) → M ≃ M₂) :=
λ ⟨_, _, _, _, _, _⟩ ⟨_, _, _, _, _, _⟩ h, linear_equiv.mk.inj_eq.mpr (equiv.mk.inj h)
@[simp] lemma to_equiv_inj {e₁ e₂ : M ≃ₗ[R] M₂} : e₁.to_equiv = e₂.to_equiv ↔ e₁ = e₂ :=
to_equiv_injective.eq_iff
lemma to_linear_map_injective : function.injective (coe : (M ≃ₗ[R] M₂) → (M →ₗ[R] M₂)) :=
λ e₁ e₂ H, to_equiv_injective $ equiv.ext $ linear_map.congr_fun H
@[simp, norm_cast] lemma to_linear_map_inj {e₁ e₂ : M ≃ₗ[R] M₂} :
(e₁ : M →ₗ[R] M₂) = e₂ ↔ e₁ = e₂ :=
to_linear_map_injective.eq_iff
end
section
variables {semimodule_M : semimodule R M} {semimodule_M₂ : semimodule R M₂}
variables (e e' : M ≃ₗ[R] M₂)
lemma to_linear_map_eq_coe : e.to_linear_map = ↑e := rfl
@[simp, norm_cast] theorem coe_coe : ⇑(e : M →ₗ[R] M₂) = e := rfl
@[simp] lemma coe_to_equiv : ⇑e.to_equiv = e := rfl
@[simp] lemma coe_to_linear_map : ⇑e.to_linear_map = e := rfl
@[simp] lemma to_fun_eq_coe : e.to_fun = e := rfl
section
variables {e e'}
@[ext] lemma ext (h : ∀ x, e x = e' x) : e = e' :=
to_equiv_injective (equiv.ext h)
protected lemma congr_arg : Π {x x' : M}, x = x' → e x = e x'
| _ _ rfl := rfl
protected lemma congr_fun (h : e = e') (x : M) : e x = e' x := h ▸ rfl
lemma ext_iff : e = e' ↔ ∀ x, e x = e' x :=
⟨λ h x, h ▸ rfl, ext⟩
end
section
variables (M R)
/-- The identity map is a linear equivalence. -/
@[refl]
def refl [semimodule R M] : M ≃ₗ[R] M := { .. linear_map.id, .. equiv.refl M }
end
@[simp] lemma refl_apply [semimodule R M] (x : M) : refl R M x = x := rfl
/-- Linear equivalences are symmetric. -/
@[symm]
def symm : M₂ ≃ₗ[R] M :=
{ .. e.to_linear_map.inverse e.inv_fun e.left_inv e.right_inv,
.. e.to_equiv.symm }
/-- See Note [custom simps projection] -/
def simps.inv_fun [semimodule R M] [semimodule R M₂] (e : M ≃ₗ[R] M₂) : M₂ → M := e.symm
initialize_simps_projections linear_equiv (to_fun → apply, inv_fun → symm_apply)
@[simp] lemma inv_fun_eq_symm : e.inv_fun = e.symm := rfl
variables {semimodule_M₃ : semimodule R M₃} (e₁ : M ≃ₗ[R] M₂) (e₂ : M₂ ≃ₗ[R] M₃)
/-- Linear equivalences are transitive. -/
@[trans]
def trans : M ≃ₗ[R] M₃ :=
{ .. e₂.to_linear_map.comp e₁.to_linear_map,
.. e₁.to_equiv.trans e₂.to_equiv }
@[simp] lemma coe_to_add_equiv : ⇑(e.to_add_equiv) = e := rfl
/-- The two paths coercion can take to an `add_monoid_hom` are equivalent -/
lemma to_add_monoid_hom_commutes :
e.to_linear_map.to_add_monoid_hom = e.to_add_equiv.to_add_monoid_hom :=
rfl
@[simp] theorem trans_apply (c : M) :
(e₁.trans e₂) c = e₂ (e₁ c) := rfl
@[simp] theorem apply_symm_apply (c : M₂) : e (e.symm c) = c := e.6 c
@[simp] theorem symm_apply_apply (b : M) : e.symm (e b) = b := e.5 b
@[simp] lemma symm_trans_apply (c : M₃) : (e₁.trans e₂).symm c = e₁.symm (e₂.symm c) := rfl
@[simp] lemma trans_refl : e.trans (refl R M₂) = e := to_equiv_injective e.to_equiv.trans_refl
@[simp] lemma refl_trans : (refl R M).trans e = e := to_equiv_injective e.to_equiv.refl_trans
lemma symm_apply_eq {x y} : e.symm x = y ↔ x = e y := e.to_equiv.symm_apply_eq
lemma eq_symm_apply {x y} : y = e.symm x ↔ e y = x := e.to_equiv.eq_symm_apply
@[simp] lemma trans_symm [semimodule R M] [semimodule R M₂] (f : M ≃ₗ[R] M₂) :
f.trans f.symm = linear_equiv.refl R M :=
by { ext x, simp }
@[simp] lemma symm_trans [semimodule R M] [semimodule R M₂] (f : M ≃ₗ[R] M₂) :
f.symm.trans f = linear_equiv.refl R M₂ :=
by { ext x, simp }
@[simp, norm_cast] lemma refl_to_linear_map [semimodule R M] :
(linear_equiv.refl R M : M →ₗ[R] M) = linear_map.id :=
rfl
@[simp, norm_cast]
lemma comp_coe [semimodule R M] [semimodule R M₂] [semimodule R M₃] (f : M ≃ₗ[R] M₂)
(f' : M₂ ≃ₗ[R] M₃) : (f' : M₂ →ₗ[R] M₃).comp (f : M →ₗ[R] M₂) = (f.trans f' : M →ₗ[R] M₃) :=
rfl
@[simp] lemma mk_coe (h₁ h₂ f h₃ h₄) :
(linear_equiv.mk e h₁ h₂ f h₃ h₄ : M ≃ₗ[R] M₂) = e := ext $ λ _, rfl
@[simp] theorem map_add (a b : M) : e (a + b) = e a + e b := e.map_add' a b
@[simp] theorem map_zero : e 0 = 0 := e.to_linear_map.map_zero
@[simp] theorem map_smul (c : R) (x : M) : e (c • x) = c • e x := e.map_smul' c x
@[simp] lemma map_sum {s : finset ι} (u : ι → M) : e (∑ i in s, u i) = ∑ i in s, e (u i) :=
e.to_linear_map.map_sum
@[simp] theorem map_eq_zero_iff {x : M} : e x = 0 ↔ x = 0 :=
e.to_add_equiv.map_eq_zero_iff
theorem map_ne_zero_iff {x : M} : e x ≠ 0 ↔ x ≠ 0 :=
e.to_add_equiv.map_ne_zero_iff
@[simp] theorem symm_symm : e.symm.symm = e := by { cases e, refl }
lemma symm_bijective [semimodule R M] [semimodule R M₂] :
function.bijective (symm : (M ≃ₗ[R] M₂) → (M₂ ≃ₗ[R] M)) :=
equiv.bijective ⟨symm, symm, symm_symm, symm_symm⟩
@[simp] lemma mk_coe' (f h₁ h₂ h₃ h₄) :
(linear_equiv.mk f h₁ h₂ ⇑e h₃ h₄ : M₂ ≃ₗ[R] M) = e.symm :=
symm_bijective.injective $ ext $ λ x, rfl
@[simp] theorem symm_mk (f h₁ h₂ h₃ h₄) :
(⟨e, h₁, h₂, f, h₃, h₄⟩ : M ≃ₗ[R] M₂).symm =
{ to_fun := f, inv_fun := e,
..(⟨e, h₁, h₂, f, h₃, h₄⟩ : M ≃ₗ[R] M₂).symm } := rfl
protected lemma bijective : function.bijective e := e.to_equiv.bijective
protected lemma injective : function.injective e := e.to_equiv.injective
protected lemma surjective : function.surjective e := e.to_equiv.surjective
protected lemma image_eq_preimage (s : set M) : e '' s = e.symm ⁻¹' s :=
e.to_equiv.image_eq_preimage s
end
/-- An involutive linear map is a linear equivalence. -/
def of_involutive [semimodule R M] (f : M →ₗ[R] M) (hf : involutive f) : M ≃ₗ[R] M :=
{ .. f, .. hf.to_equiv f }
@[simp] lemma coe_of_involutive [semimodule R M] (f : M →ₗ[R] M) (hf : involutive f) :
⇑(of_involutive f hf) = f :=
rfl
end add_comm_monoid
end linear_equiv
|
eb720baa526eb4dc6579782f49bb78d03690c47c
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/pnat/prime_auto.lean
|
ce6c012a4923ef1687f073f08c73d3f1039c6f60
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 7,036
|
lean
|
/-
Copyright (c) 2017 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro, Neil Strickland
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.nat.prime
import Mathlib.data.pnat.basic
import Mathlib.PostPort
namespace Mathlib
/-!
# Primality and GCD on pnat
This file extends the theory of `ℕ+` with `gcd`, `lcm` and `prime` functions, analogous to those on
`nat`.
-/
namespace nat.primes
protected instance coe_pnat : has_coe primes ℕ+ :=
has_coe.mk fun (p : primes) => { val := ↑p, property := sorry }
theorem coe_pnat_nat (p : primes) : ↑↑p = ↑p := rfl
theorem coe_pnat_inj (p : primes) (q : primes) : ↑p = ↑q → p = q := sorry
end nat.primes
namespace pnat
/-- The greatest common divisor (gcd) of two positive natural numbers,
viewed as positive natural number. -/
def gcd (n : ℕ+) (m : ℕ+) : ℕ+ := { val := nat.gcd ↑n ↑m, property := sorry }
/-- The least common multiple (lcm) of two positive natural numbers,
viewed as positive natural number. -/
def lcm (n : ℕ+) (m : ℕ+) : ℕ+ := { val := nat.lcm ↑n ↑m, property := sorry }
@[simp] theorem gcd_coe (n : ℕ+) (m : ℕ+) : ↑(gcd n m) = nat.gcd ↑n ↑m := rfl
@[simp] theorem lcm_coe (n : ℕ+) (m : ℕ+) : ↑(lcm n m) = nat.lcm ↑n ↑m := rfl
theorem gcd_dvd_left (n : ℕ+) (m : ℕ+) : gcd n m ∣ n := iff.mpr dvd_iff (nat.gcd_dvd_left ↑n ↑m)
theorem gcd_dvd_right (n : ℕ+) (m : ℕ+) : gcd n m ∣ m := iff.mpr dvd_iff (nat.gcd_dvd_right ↑n ↑m)
theorem dvd_gcd {m : ℕ+} {n : ℕ+} {k : ℕ+} (hm : k ∣ m) (hn : k ∣ n) : k ∣ gcd m n :=
iff.mpr dvd_iff (nat.dvd_gcd (iff.mp dvd_iff hm) (iff.mp dvd_iff hn))
theorem dvd_lcm_left (n : ℕ+) (m : ℕ+) : n ∣ lcm n m := iff.mpr dvd_iff (nat.dvd_lcm_left ↑n ↑m)
theorem dvd_lcm_right (n : ℕ+) (m : ℕ+) : m ∣ lcm n m := iff.mpr dvd_iff (nat.dvd_lcm_right ↑n ↑m)
theorem lcm_dvd {m : ℕ+} {n : ℕ+} {k : ℕ+} (hm : m ∣ k) (hn : n ∣ k) : lcm m n ∣ k :=
iff.mpr dvd_iff (nat.lcm_dvd (iff.mp dvd_iff hm) (iff.mp dvd_iff hn))
theorem gcd_mul_lcm (n : ℕ+) (m : ℕ+) : gcd n m * lcm n m = n * m :=
subtype.eq (nat.gcd_mul_lcm ↑n ↑m)
theorem eq_one_of_lt_two {n : ℕ+} : n < bit0 1 → n = 1 :=
fun (h : n < bit0 1) =>
le_antisymm
(id
(fun (h : n < 1 + 1) => eq.mp (Eq._oldrec (Eq.refl (n < 1 + 1)) (propext lt_add_one_iff)) h)
h)
(one_le n)
/-! ### Prime numbers -/
/-- Primality predicate for `ℕ+`, defined in terms of `nat.prime`. -/
def prime (p : ℕ+) := nat.prime ↑p
theorem prime.one_lt {p : ℕ+} : prime p → 1 < p := nat.prime.one_lt
theorem prime_two : prime (bit0 1) := nat.prime_two
theorem dvd_prime {p : ℕ+} {m : ℕ+} (pp : prime p) : m ∣ p ↔ m = 1 ∨ m = p := sorry
theorem prime.ne_one {p : ℕ+} : prime p → p ≠ 1 := sorry
@[simp] theorem not_prime_one : ¬prime 1 := nat.not_prime_one
theorem prime.not_dvd_one {p : ℕ+} : prime p → ¬p ∣ 1 :=
fun (pp : prime p) =>
eq.mpr (id (Eq._oldrec (Eq.refl (¬p ∣ 1)) (propext dvd_iff))) (nat.prime.not_dvd_one pp)
theorem exists_prime_and_dvd {n : ℕ+} : bit0 1 ≤ n → ∃ (p : ℕ+), prime p ∧ p ∣ n := sorry
/-! ### Coprime numbers and gcd -/
/-- Two pnats are coprime if their gcd is 1. -/
def coprime (m : ℕ+) (n : ℕ+) := gcd m n = 1
@[simp] theorem coprime_coe {m : ℕ+} {n : ℕ+} : nat.coprime ↑m ↑n ↔ coprime m n := sorry
theorem coprime.mul {k : ℕ+} {m : ℕ+} {n : ℕ+} : coprime m k → coprime n k → coprime (m * n) k :=
sorry
theorem coprime.mul_right {k : ℕ+} {m : ℕ+} {n : ℕ+} :
coprime k m → coprime k n → coprime k (m * n) :=
sorry
theorem gcd_comm {m : ℕ+} {n : ℕ+} : gcd m n = gcd n m := sorry
theorem gcd_eq_left_iff_dvd {m : ℕ+} {n : ℕ+} : m ∣ n ↔ gcd m n = m := sorry
theorem gcd_eq_right_iff_dvd {m : ℕ+} {n : ℕ+} : m ∣ n ↔ gcd n m = m :=
eq.mpr (id (Eq._oldrec (Eq.refl (m ∣ n ↔ gcd n m = m)) gcd_comm)) gcd_eq_left_iff_dvd
theorem coprime.gcd_mul_left_cancel (m : ℕ+) {n : ℕ+} {k : ℕ+} :
coprime k n → gcd (k * m) n = gcd m n :=
sorry
theorem coprime.gcd_mul_right_cancel (m : ℕ+) {n : ℕ+} {k : ℕ+} :
coprime k n → gcd (m * k) n = gcd m n :=
eq.mpr (id (Eq._oldrec (Eq.refl (coprime k n → gcd (m * k) n = gcd m n)) (mul_comm m k)))
(coprime.gcd_mul_left_cancel m)
theorem coprime.gcd_mul_left_cancel_right (m : ℕ+) {n : ℕ+} {k : ℕ+} :
coprime k m → gcd m (k * n) = gcd m n :=
sorry
theorem coprime.gcd_mul_right_cancel_right (m : ℕ+) {n : ℕ+} {k : ℕ+} :
coprime k m → gcd m (n * k) = gcd m n :=
eq.mpr (id (Eq._oldrec (Eq.refl (coprime k m → gcd m (n * k) = gcd m n)) (mul_comm n k)))
(coprime.gcd_mul_left_cancel_right m)
@[simp] theorem one_gcd {n : ℕ+} : gcd 1 n = 1 :=
eq.mpr (id (Eq._oldrec (Eq.refl (gcd 1 n = 1)) (Eq.symm (propext gcd_eq_left_iff_dvd))))
(one_dvd n)
@[simp] theorem gcd_one {n : ℕ+} : gcd n 1 = 1 :=
eq.mpr (id (Eq._oldrec (Eq.refl (gcd n 1 = 1)) gcd_comm)) one_gcd
theorem coprime.symm {m : ℕ+} {n : ℕ+} : coprime m n → coprime n m :=
eq.mpr (id (imp_congr_eq (coprime.equations._eqn_1 m n) (coprime.equations._eqn_1 n m)))
(eq.mpr (id (Eq._oldrec (Eq.refl (gcd m n = 1 → gcd n m = 1)) gcd_comm))
(eq.mpr (id (propext imp_self)) trivial))
@[simp] theorem one_coprime {n : ℕ+} : coprime 1 n := one_gcd
@[simp] theorem coprime_one {n : ℕ+} : coprime n 1 := coprime.symm one_coprime
theorem coprime.coprime_dvd_left {m : ℕ+} {k : ℕ+} {n : ℕ+} : m ∣ k → coprime k n → coprime m n :=
sorry
theorem coprime.factor_eq_gcd_left {a : ℕ+} {b : ℕ+} {m : ℕ+} {n : ℕ+} (cop : coprime m n)
(am : a ∣ m) (bn : b ∣ n) : a = gcd (a * b) m :=
sorry
theorem coprime.factor_eq_gcd_right {a : ℕ+} {b : ℕ+} {m : ℕ+} {n : ℕ+} (cop : coprime m n)
(am : a ∣ m) (bn : b ∣ n) : a = gcd (b * a) m :=
eq.mpr (id (Eq._oldrec (Eq.refl (a = gcd (b * a) m)) (mul_comm b a)))
(coprime.factor_eq_gcd_left cop am bn)
theorem coprime.factor_eq_gcd_left_right {a : ℕ+} {b : ℕ+} {m : ℕ+} {n : ℕ+} (cop : coprime m n)
(am : a ∣ m) (bn : b ∣ n) : a = gcd m (a * b) :=
eq.mpr (id (Eq._oldrec (Eq.refl (a = gcd m (a * b))) gcd_comm))
(coprime.factor_eq_gcd_left cop am bn)
theorem coprime.factor_eq_gcd_right_right {a : ℕ+} {b : ℕ+} {m : ℕ+} {n : ℕ+} (cop : coprime m n)
(am : a ∣ m) (bn : b ∣ n) : a = gcd m (b * a) :=
eq.mpr (id (Eq._oldrec (Eq.refl (a = gcd m (b * a))) gcd_comm))
(coprime.factor_eq_gcd_right cop am bn)
theorem coprime.gcd_mul (k : ℕ+) {m : ℕ+} {n : ℕ+} (h : coprime m n) :
gcd k (m * n) = gcd k m * gcd k n :=
sorry
theorem gcd_eq_left {m : ℕ+} {n : ℕ+} : m ∣ n → gcd m n = m := sorry
theorem coprime.pow {m : ℕ+} {n : ℕ+} (k : ℕ) (l : ℕ) (h : coprime m n) : coprime (m ^ k) (n ^ l) :=
sorry
end Mathlib
|
b29ce3fce437bda0cec5e5a74f6aec85f54c58a8
|
8be24982c807641260370bd09243eac768750811
|
/src/presentation.lean
|
98591b0adbe43d98886519ba5c8f815f1df1c201
|
[] |
no_license
|
jjaassoonn/transcendental
|
8008813253af3aa80b5a5c56551317e7ab4246ab
|
99bc6ea6089f04ed90a0f55f0533ebb7f47b22ff
|
refs/heads/master
| 1,607,869,957,944
| 1,599,659,687,000
| 1,599,659,687,000
| 234,444,826
| 9
| 1
| null | 1,598,218,307,000
| 1,579,224,232,000
|
HTML
|
UTF-8
|
Lean
| false
| false
| 6,734
|
lean
|
import algebraic_countable_over_Z
import liouville_theorem
import e_transcendental
noncomputable theory
open_locale classical big_operators
variables (p q : Prop)
------------------------------------------------------------------------------------
/-# Formalisations of basic theorems of trascendental number theory
The main result in this project is the following:
- Countability of algebraic numbers;
- Liouville's theorem and Liouville's number;
- Transcendence of eⁿ for n ≥ 1; -/
------------------------------------------------------------------------------------
/- # Motivation
- what is the long term goal?
- what is the value of such approach in terms of pedagogy?
- what is the value of such approach in terms of researching?
* example of Kepler conjecture;
* automation;
* Univalent axiom : (A = B) ≃ (A ≃ B). -/
------------------------------------------------------------------------------------
/-## The set of algebraic number is countable -/
-- #print algebraic_set
-- #print algebraic_set_countable
-- #print transcendental_number_exists
------------------------------------------------------------------------------------
/- ## Liouville's theorem and Liouville's number -/
-- #print liouville_number
-- #print liouville_numbers_transcendental
/- α is a Liouville's number hence it is transcendental -/
-- #print ten_pow_n_fact_inverse
-- #print α
-- #print liouville_α
-- #print transcendental_α
------------------------------------------------------------------------------------
/- ## eⁿ is transcendental for all n ≥ 1 -/
-- #print e
-- #print e_pow_transcendental
------------------------------------------------------------------------------------
/-# Basic dependent type theory
0 : ℕ
ℕ : Type
Type : Type 1
n ↦ n! : ℕ -> ℕ
⟨0,0⟩ : ℕ × ℕ -/
------------------------------------------------------------------------------------
/- Let `𝓤` be a collection of types and `A` be a type,
for a family of types `B : A -> 𝓤`,
- we can form a new Π-type:
Π (x : A), B x.
The terms of this type has the form `f : Π (x : A), B x` such that for any term `a` of type `A`, `f a` is of type `B a`.
- we can form a new Σ-type:
Σ (x : A), B x.
The terms of this type has the form `⟨x, h⟩` where `x` has type `A` and `h` has type `B x`. -/
------------------------------------------------------------------------------------
/-## Curry-Howard isomorphism :
propositions are types whose terms are proofs. -/
------------------------------------------------------------------------------------
/-# p -> q
- Implication is function application.
-/
example : p -> p := id
example (proof_p_imp_q : p -> q) (proof_p : p) : q :=
proof_p_imp_q proof_p
/-# ⊥ ; ¬p
- false proposition (⊥) is a type without any term (like `∅`).
- negation of p is `p -> ⊥`.
-/
example : ¬ ⊥ := id
------------------------------------------------------------------------------------
/-# p ∧ q
- Conjunction is cartesian product.
-/
example (proof_p : p) (proof_q : q) : p ∧ q :=
⟨proof_p, proof_q⟩
example (proof_pq : p ∧ q) : p := proof_pq.1
------------------------------------------------------------------------------------
/-# p ∨ q
- Disjunction is the coproduct, like in category theory with the following universal property:
f₁ f₂
┌--------> X <--------┐
│ ↑ │
│ | f │
│ | │
p -----> p ∨ q <----- q
left right
-/
example (proof_of_p : p) : p ∨ q := or.intro_left q proof_of_p
------------------------------------------------------------------------------------
--# Quantifiers (∀, ∃)
-- Let B : A -> Prop
------------------------------------------------------------------------------------
/-
# Universal Quantifier as Π-type
`Π (a : A), B a` and `∀ a : A, B a`
# Existential Quantifier as Σ-type
`Σ (a : A), B a` and `∃ a : A, B a` -/
example : ∀ x : ℕ, x ≥ 0 := λ x, bot_le
example : ∃ x : ℕ, x > 0 := ⟨2, two_pos⟩
------------------------------------------------------------------------------------
theorem e_pow_transcendental' (n : ℕ) (hn : n ≥ 1) : transcendental (e^n) :=
begin
intro alg,
rcases alg with ⟨p, p_nonzero, hp⟩,
have e_algebraic : is_algebraic ℤ e,
{
set q := ∑ i in finset.range (p.nat_degree + 1), polynomial.C (p.coeff i) * (polynomial.X ^ (i * n)),
use q,
split,
-- q is non-zero
{
intro rid,
rw polynomial.ext_iff at rid,
replace p_nonzero := (not_iff_not.2 (@polynomial.ext_iff ℤ _ p 0)).1 p_nonzero,
simp only [not_forall, polynomial.coeff_zero] at p_nonzero,
choose k hk using p_nonzero,
replace rid := rid (k * n),
simp only [polynomial.mul_coeff_zero, polynomial.finset_sum_coeff, polynomial.coeff_zero] at rid,
simp_rw [polynomial.coeff_C_mul_X] at rid,
rw finset.sum_eq_single k at rid,
simp only [mul_one, if_true, true_or, eq_self_iff_true, nat.zero_eq_mul] at rid,
exact hk rid,
intros j hj1 hj2, split_ifs,
replace h := (nat.mul_left_inj _).1 h,
exfalso,
exact hj2 (eq.symm h), exact hn, refl,
intros hk,
simp only [not_lt, finset.mem_range] at hk,
simp only [if_true, eq_self_iff_true],
apply polynomial.coeff_eq_zero_of_nat_degree_lt,
linarith,
},
-- q(e) = 0
{
have H := polynomial.as_sum p,
rw H at hp, rw aeval_sum' at hp ⊢, rw <-hp,
apply finset.sum_congr rfl,
intros i hi,
simp only [polynomial.aeval_X, polynomial.aeval_C, alg_hom.map_pow, alg_hom.map_mul],
refine congr rfl _,
exact pow_mul' e i n,
}
},
exact e_transcendental e_algebraic,
end
-----------------------------------------------------------------------------------
|
1923bb7a62514512c8e64d7a974eda07db5749da
|
a0e23cfdd129a671bf3154ee1a8a3a72bf4c7940
|
/stage0/src/Lean/Compiler/ImplementedByAttr.lean
|
3c607625e9d1190c0a7b4cb3158908f79ae793c8
|
[
"Apache-2.0"
] |
permissive
|
WojciechKarpiel/lean4
|
7f89706b8e3c1f942b83a2c91a3a00b05da0e65b
|
f6e1314fa08293dea66a329e05b6c196a0189163
|
refs/heads/master
| 1,686,633,402,214
| 1,625,821,189,000
| 1,625,821,258,000
| 384,640,886
| 0
| 0
|
Apache-2.0
| 1,625,903,617,000
| 1,625,903,026,000
| null |
UTF-8
|
Lean
| false
| false
| 1,526
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Attributes
import Lean.MonadEnv
namespace Lean.Compiler
builtin_initialize implementedByAttr : ParametricAttribute Name ← registerParametricAttribute {
name := `implementedBy
descr := "name of the Lean (probably unsafe) function that implements opaque constant"
getParam := fun declName stx => do
let decl ← getConstInfo declName
let fnName ← Attribute.Builtin.getId stx
let fnName ← resolveGlobalConstNoOverload fnName
let fnDecl ← getConstInfo fnName
unless decl.type == fnDecl.type do
throwError "invalid function '{fnName}' type mismatch"
if decl.name == fnDecl.name then
throwError "invalid 'implementedBy' argument '{fnName}', function cannot be implemented by itself"
return fnName
}
@[export lean_get_implemented_by]
def getImplementedBy (env : Environment) (declName : Name) : Option Name :=
implementedByAttr.getParam env declName
def setImplementedBy (env : Environment) (declName : Name) (impName : Name) : Except String Environment :=
implementedByAttr.setParam env declName impName
end Compiler
def setImplementedBy {m} [Monad m] [MonadEnv m] [MonadError m] (declName : Name) (impName : Name) : m Unit := do
let env ← getEnv
match Compiler.setImplementedBy env declName impName with
| Except.ok env => setEnv env
| Except.error ex => throwError ex
end Lean
|
179d8dfbb8a04a9c8bd4aed496d2b2601e927fb1
|
d1a52c3f208fa42c41df8278c3d280f075eb020c
|
/tests/lean/run/whileRepeat.lean
|
e46b3013ad934322864474bbeb6ac29cdcd9c8a0
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
cipher1024/lean4
|
6e1f98bb58e7a92b28f5364eb38a14c8d0aae393
|
69114d3b50806264ef35b57394391c3e738a9822
|
refs/heads/master
| 1,642,227,983,603
| 1,642,011,696,000
| 1,642,011,696,000
| 228,607,691
| 0
| 0
|
Apache-2.0
| 1,576,584,269,000
| 1,576,584,268,000
| null |
UTF-8
|
Lean
| false
| false
| 1,266
|
lean
|
structure Loop where
@[inline]
partial def Loop.forIn {β : Type u} {m : Type u → Type v} [Monad m] (loop : Loop) (init : β) (f : Unit → β → m (ForInStep β)) : m β :=
let rec @[specialize] loop (b : β) : m β := do
match ← f () b with
| ForInStep.done b => pure b
| ForInStep.yield b => loop b
loop init
instance : ForIn m Loop Unit where
forIn := Loop.forIn
syntax "repeat " doSeq : doElem
macro_rules
| `(doElem| repeat $seq) => `(doElem| for _ in Loop.mk do $seq)
syntax "while " termBeforeDo " do " doSeq : doElem
macro_rules
| `(doElem| while $cond do $seq) =>
`(doElem| repeat if $cond then $seq else break)
def test1 : IO Unit := do
let mut i := 0
while i < 10 do
println! "{i}"
i := i + 1
println! "test1 done {i}"
#eval test1
syntax "repeat " doSeq " until " term : doElem
macro_rules
| `(doElem| repeat $seq until $cond) =>
`(doElem| repeat do $seq; if $cond then break)
def test2 : IO Unit := do
let mut i := 0
repeat
println! "{i}"
i := i + 1
until i >= 10
println! "test2 done {i}"
#eval test2
def test3 : IO Unit := do
let mut i := 0
repeat
println! "{i}"
if i > 10 && i % 3 == 0 then break
i := i + 1
println! "test3 done {i}"
#eval test3
|
00266b9d64f9e287432a24b24b6b0db7f8762b72
|
675b8263050a5d74b89ceab381ac81ce70535688
|
/src/analysis/analytic/composition.lean
|
44c66ff7b82d96c7f117b13b2fdb76a90f344ec6
|
[
"Apache-2.0"
] |
permissive
|
vozor/mathlib
|
5921f55235ff60c05f4a48a90d616ea167068adf
|
f7e728ad8a6ebf90291df2a4d2f9255a6576b529
|
refs/heads/master
| 1,675,607,702,231
| 1,609,023,279,000
| 1,609,023,279,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 56,078
|
lean
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Johan Commelin
-/
import analysis.analytic.basic
import combinatorics.composition
/-!
# Composition of analytic functions
in this file we prove that the composition of analytic functions is analytic.
The argument is the following. Assume `g z = ∑' qₙ (z, ..., z)` and `f y = ∑' pₖ (y, ..., y)`. Then
`g (f y) = ∑' qₙ (∑' pₖ (y, ..., y), ..., ∑' pₖ (y, ..., y))
= ∑' qₙ (p_{i₁} (y, ..., y), ..., p_{iₙ} (y, ..., y))`.
For each `n` and `i₁, ..., iₙ`, define a `i₁ + ... + iₙ` multilinear function mapping
`(y₀, ..., y_{i₁ + ... + iₙ - 1})` to
`qₙ (p_{i₁} (y₀, ..., y_{i₁-1}), p_{i₂} (y_{i₁}, ..., y_{i₁ + i₂ - 1}), ..., p_{iₙ} (....)))`.
Then `g ∘ f` is obtained by summing all these multilinear functions.
To formalize this, we use compositions of an integer `N`, i.e., its decompositions into
a sum `i₁ + ... + iₙ` of positive integers. Given such a composition `c` and two formal
multilinear series `q` and `p`, let `q.comp_along_composition p c` be the above multilinear
function. Then the `N`-th coefficient in the power series expansion of `g ∘ f` is the sum of these
terms over all `c : composition N`.
To complete the proof, we need to show that this power series has a positive radius of convergence.
This follows from the fact that `composition N` has cardinality `2^(N-1)` and estimates on
the norm of `qₙ` and `pₖ`, which give summability. We also need to show that it indeed converges to
`g ∘ f`. For this, we note that the composition of partial sums converges to `g ∘ f`, and that it
corresponds to a part of the whole sum, on a subset that increases to the whole space. By
summability of the norms, this implies the overall convergence.
## Main results
* `q.comp p` is the formal composition of the formal multilinear series `q` and `p`.
* `has_fpower_series_at.comp` states that if two functions `g` and `f` admit power series expansions
`q` and `p`, then `g ∘ f` admits a power series expansion given by `q.comp p`.
* `analytic_at.comp` states that the composition of analytic functions is analytic.
* `formal_multilinear_series.comp_assoc` states that composition is associative on formal
multilinear series.
## Implementation details
The main technical difficulty is to write down things. In particular, we need to define precisely
`q.comp_along_composition p c` and to show that it is indeed a continuous multilinear
function. This requires a whole interface built on the class `composition`. Once this is set,
the main difficulty is to reorder the sums, writing the composition of the partial sums as a sum
over some subset of `Σ n, composition n`. We need to check that the reordering is a bijection,
running over difficulties due to the dependent nature of the types under consideration, that are
controlled thanks to the interface for `composition`.
The associativity of composition on formal multilinear series is a nontrivial result: it does not
follow from the associativity of composition of analytic functions, as there is no uniqueness for
the formal multilinear series representing a function (and also, it holds even when the radius of
convergence of the series is `0`). Instead, we give a direct proof, which amounts to reordering
double sums in a careful way. The change of variables is a canonical (combinatorial) bijection
`composition.sigma_equiv_sigma_pi` between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, and is described
in more details below in the paragraph on associativity.
-/
noncomputable theory
variables {𝕜 : Type*} [nondiscrete_normed_field 𝕜]
{E : Type*} [normed_group E] [normed_space 𝕜 E]
{F : Type*} [normed_group F] [normed_space 𝕜 F]
{G : Type*} [normed_group G] [normed_space 𝕜 G]
{H : Type*} [normed_group H] [normed_space 𝕜 H]
open filter list
open_locale topological_space big_operators classical nnreal
/-! ### Composing formal multilinear series -/
namespace formal_multilinear_series
/-!
In this paragraph, we define the composition of formal multilinear series, by summing over all
possible compositions of `n`.
-/
/-- Given a formal multilinear series `p`, a composition `c` of `n` and the index `i` of a
block of `c`, we may define a function on `fin n → E` by picking the variables in the `i`-th block
of `n`, and applying the corresponding coefficient of `p` to these variables. This function is
called `p.apply_composition c v i` for `v : fin n → E` and `i : fin c.length`. -/
def apply_composition
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n) :
(fin n → E) → (fin (c.length) → F) :=
λ v i, p (c.blocks_fun i) (v ∘ (c.embedding i))
lemma apply_composition_ones (p : formal_multilinear_series 𝕜 E F) (n : ℕ) :
apply_composition p (composition.ones n) =
λ v i, p 1 (λ _, v (fin.cast_le (composition.length_le _) i)) :=
begin
funext v i,
apply p.congr (composition.ones_blocks_fun _ _),
intros j hjn hj1,
obtain rfl : j = 0, { linarith },
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, composition.ones_embedding, fin.coe_mk],
end
/-- Technical lemma stating how `p.apply_composition` commutes with updating variables. This
will be the key point to show that functions constructed from `apply_composition` retain
multilinearity. -/
lemma apply_composition_update
(p : formal_multilinear_series 𝕜 E F) {n : ℕ} (c : composition n)
(j : fin n) (v : fin n → E) (z : E) :
p.apply_composition c (function.update v j z) =
function.update (p.apply_composition c v) (c.index j)
(p (c.blocks_fun (c.index j))
(function.update (v ∘ (c.embedding (c.index j))) (c.inv_embedding j) z)) :=
begin
ext k,
by_cases h : k = c.index j,
{ rw h,
let r : fin (c.blocks_fun (c.index j)) → fin n := c.embedding (c.index j),
simp only [function.update_same],
change p (c.blocks_fun (c.index j)) ((function.update v j z) ∘ r) = _,
let j' := c.inv_embedding j,
suffices B : (function.update v j z) ∘ r = function.update (v ∘ r) j' z,
by rw B,
suffices C : (function.update v (r j') z) ∘ r = function.update (v ∘ r) j' z,
by { convert C, exact (c.embedding_comp_inv j).symm },
exact function.update_comp_eq_of_injective _ (c.embedding _).injective _ _ },
{ simp only [h, function.update_eq_self, function.update_noteq, ne.def, not_false_iff],
let r : fin (c.blocks_fun k) → fin n := c.embedding k,
change p (c.blocks_fun k) ((function.update v j z) ∘ r) = p (c.blocks_fun k) (v ∘ r),
suffices B : (function.update v j z) ∘ r = v ∘ r, by rw B,
apply function.update_comp_eq_of_not_mem_range,
rwa c.mem_range_embedding_iff' }
end
/-- Given two formal multilinear series `q` and `p` and a composition `c` of `n`, one may
form a multilinear map in `n` variables by applying the right coefficient of `p` to each block of
the composition, and then applying `q c.length` to the resulting vector. It is called
`q.comp_along_composition_multilinear p c`. This function admits a version as a continuous
multilinear map, called `q.comp_along_composition p c` below. -/
def comp_along_composition_multilinear {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) : multilinear_map 𝕜 (λ i : fin n, E) G :=
{ to_fun := λ v, q c.length (p.apply_composition c v),
map_add' := λ v i x y, by simp only [apply_composition_update,
continuous_multilinear_map.map_add],
map_smul' := λ v i c x, by simp only [apply_composition_update,
continuous_multilinear_map.map_smul] }
/-- The norm of `q.comp_along_composition_multilinear p c` is controlled by the product of
the norms of the relevant bits of `q` and `p`. -/
lemma comp_along_composition_multilinear_bound {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) (v : fin n → E) :
∥q.comp_along_composition_multilinear p c v∥ ≤
∥q c.length∥ * (∏ i, ∥p (c.blocks_fun i)∥) * (∏ i : fin n, ∥v i∥) :=
calc ∥q.comp_along_composition_multilinear p c v∥ = ∥q c.length (p.apply_composition c v)∥ : rfl
... ≤ ∥q c.length∥ * ∏ i, ∥p.apply_composition c v i∥ : continuous_multilinear_map.le_op_norm _ _
... ≤ ∥q c.length∥ * ∏ i, ∥p (c.blocks_fun i)∥ * ∏ j : fin (c.blocks_fun i), ∥(v ∘ (c.embedding i)) j∥ :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
refine finset.prod_le_prod (λ i hi, norm_nonneg _) (λ i hi, _),
apply continuous_multilinear_map.le_op_norm,
end
... = ∥q c.length∥ * (∏ i, ∥p (c.blocks_fun i)∥) *
∏ i (j : fin (c.blocks_fun i)), ∥(v ∘ (c.embedding i)) j∥ :
by rw [finset.prod_mul_distrib, mul_assoc]
... = ∥q c.length∥ * (∏ i, ∥p (c.blocks_fun i)∥) * (∏ i : fin n, ∥v i∥) :
by { rw [← c.blocks_fin_equiv.prod_comp, ← finset.univ_sigma_univ, finset.prod_sigma],
congr }
/-- Given two formal multilinear series `q` and `p` and a composition `c` of `n`, one may
form a continuous multilinear map in `n` variables by applying the right coefficient of `p` to each
block of the composition, and then applying `q c.length` to the resulting vector. It is
called `q.comp_along_composition p c`. It is constructed from the analogous multilinear
function `q.comp_along_composition_multilinear p c`, together with a norm control to get
the continuity. -/
def comp_along_composition {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) : continuous_multilinear_map 𝕜 (λ i : fin n, E) G :=
(q.comp_along_composition_multilinear p c).mk_continuous _
(q.comp_along_composition_multilinear_bound p c)
@[simp] lemma comp_along_composition_apply {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) (v : fin n → E) :
(q.comp_along_composition p c) v = q c.length (p.apply_composition c v) := rfl
/-- The norm of `q.comp_along_composition p c` is controlled by the product of
the norms of the relevant bits of `q` and `p`. -/
lemma comp_along_composition_norm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
∥q.comp_along_composition p c∥ ≤ ∥q c.length∥ * ∏ i, ∥p (c.blocks_fun i)∥ :=
multilinear_map.mk_continuous_norm_le _
(mul_nonneg (norm_nonneg _) (finset.prod_nonneg (λ i hi, norm_nonneg _))) _
lemma comp_along_composition_nnnorm {n : ℕ}
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(c : composition n) :
nnnorm (q.comp_along_composition p c) ≤ nnnorm (q c.length) * ∏ i, nnnorm (p (c.blocks_fun i)) :=
by simpa only [← nnreal.coe_le_coe, coe_nnnorm, nnreal.coe_mul, coe_nnnorm, nnreal.coe_prod, coe_nnnorm]
using q.comp_along_composition_norm p c
/-- Formal composition of two formal multilinear series. The `n`-th coefficient in the composition
is defined to be the sum of `q.comp_along_composition p c` over all compositions of
`n`. In other words, this term (as a multilinear function applied to `v_0, ..., v_{n-1}`) is
`∑'_{k} ∑'_{i₁ + ... + iₖ = n} pₖ (q_{i_1} (...), ..., q_{i_k} (...))`, where one puts all variables
`v_0, ..., v_{n-1}` in increasing order in the dots.-/
protected def comp (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) :
formal_multilinear_series 𝕜 E G :=
λ n, ∑ c : composition n, q.comp_along_composition p c
/-- The `0`-th coefficient of `q.comp p` is `q 0`. Since these maps are multilinear maps in zero
variables, but on different spaces, we can not state this directly, so we state it when applied to
arbitrary vectors (which have to be the zero vector). -/
lemma comp_coeff_zero (q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(v : fin 0 → E) (v' : fin 0 → F) :
(q.comp p) 0 v = q 0 v' :=
begin
let c : composition 0 := composition.ones 0,
dsimp [formal_multilinear_series.comp],
have : {c} = (finset.univ : finset (composition 0)),
{ apply finset.eq_of_subset_of_card_le; simp [finset.card_univ, composition_card 0] },
rw ← this,
simp only [finset.sum_singleton, continuous_multilinear_map.sum_apply],
change q c.length (p.apply_composition c v) = q 0 v',
congr' with i,
simp only [composition.ones_length] at i,
exact fin_zero_elim i
end
@[simp] lemma comp_coeff_zero'
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (v : fin 0 → E) :
(q.comp p) 0 v = q 0 (λ i, 0) :=
q.comp_coeff_zero p v _
/-- The `0`-th coefficient of `q.comp p` is `q 0`. When `p` goes from `E` to `E`, this can be
expressed as a direct equality -/
lemma comp_coeff_zero'' (q : formal_multilinear_series 𝕜 E F) (p : formal_multilinear_series 𝕜 E E) :
(q.comp p) 0 = q 0 :=
by { ext v, exact q.comp_coeff_zero p _ _ }
/-!
### The identity formal power series
We will now define the identity power series, and show that it is a neutral element for left and
right composition.
-/
section
variables (𝕜 E)
/-- The identity formal multilinear series, with all coefficients equal to `0` except for `n = 1`
where it is (the continuous multilinear version of) the identity. -/
def id : formal_multilinear_series 𝕜 E E
| 0 := 0
| 1 := (continuous_multilinear_curry_fin1 𝕜 E E).symm (continuous_linear_map.id 𝕜 E)
| _ := 0
/-- The first coefficient of `id 𝕜 E` is the identity. -/
@[simp] lemma id_apply_one (v : fin 1 → E) : (formal_multilinear_series.id 𝕜 E) 1 v = v 0 := rfl
/-- The `n`th coefficient of `id 𝕜 E` is the identity when `n = 1`. We state this in a dependent
way, as it will often appear in this form. -/
lemma id_apply_one' {n : ℕ} (h : n = 1) (v : fin n → E) :
(id 𝕜 E) n v = v ⟨0, h.symm ▸ zero_lt_one⟩ :=
begin
let w : fin 1 → E := λ i, v ⟨i.1, h.symm ▸ i.2⟩,
have : v ⟨0, h.symm ▸ zero_lt_one⟩ = w 0 := rfl,
rw [this, ← id_apply_one 𝕜 E w],
apply congr _ h,
intros,
obtain rfl : i = 0, { linarith },
exact this,
end
/-- For `n ≠ 1`, the `n`-th coefficient of `id 𝕜 E` is zero, by definition. -/
@[simp] lemma id_apply_ne_one {n : ℕ} (h : n ≠ 1) : (formal_multilinear_series.id 𝕜 E) n = 0 :=
by { cases n, { refl }, cases n, { contradiction }, refl }
end
@[simp] theorem comp_id (p : formal_multilinear_series 𝕜 E F) : p.comp (id 𝕜 E) = p :=
begin
ext1 n,
dsimp [formal_multilinear_series.comp],
rw finset.sum_eq_single (composition.ones n),
show comp_along_composition p (id 𝕜 E) (composition.ones n) = p n,
{ ext v,
rw comp_along_composition_apply,
apply p.congr (composition.ones_length n),
intros,
rw apply_composition_ones,
refine congr_arg v _,
rw [fin.ext_iff, fin.coe_cast_le, fin.coe_mk, fin.coe_mk], },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.ones n → comp_along_composition p (id 𝕜 E) b = 0,
{ assume b _ hb,
obtain ⟨k, hk, lt_k⟩ : ∃ (k : ℕ) (H : k ∈ composition.blocks b), 1 < k :=
composition.ne_ones_iff.1 hb,
obtain ⟨i, i_lt, hi⟩ : ∃ (i : ℕ) (h : i < b.blocks.length), b.blocks.nth_le i h = k :=
nth_le_of_mem hk,
let j : fin b.length := ⟨i, b.blocks_length ▸ i_lt⟩,
have A : 1 < b.blocks_fun j := by convert lt_k,
ext v,
rw [comp_along_composition_apply, continuous_multilinear_map.zero_apply],
apply continuous_multilinear_map.map_coord_zero _ j,
dsimp [apply_composition],
rw id_apply_ne_one _ _ (ne_of_gt A),
refl },
{ simp }
end
theorem id_comp (p : formal_multilinear_series 𝕜 E F) (h : p 0 = 0) : (id 𝕜 F).comp p = p :=
begin
ext1 n,
by_cases hn : n = 0,
{ rw [hn, h],
ext v,
rw [comp_coeff_zero', id_apply_ne_one _ _ zero_ne_one],
refl },
{ dsimp [formal_multilinear_series.comp],
have n_pos : 0 < n := bot_lt_iff_ne_bot.mpr hn,
rw finset.sum_eq_single (composition.single n n_pos),
show comp_along_composition (id 𝕜 F) p (composition.single n n_pos) = p n,
{ ext v,
rw [comp_along_composition_apply, id_apply_one' _ _ (composition.single_length n_pos)],
dsimp [apply_composition],
refine p.congr rfl (λ i him hin, congr_arg v $ _),
ext, simp },
show ∀ (b : composition n),
b ∈ finset.univ → b ≠ composition.single n n_pos → comp_along_composition (id 𝕜 F) p b = 0,
{ assume b _ hb,
have A : b.length ≠ 1, by simpa [composition.eq_single_iff] using hb,
ext v,
rw [comp_along_composition_apply, id_apply_ne_one _ _ A],
refl },
{ simp } }
end
/-! ### Summability properties of the composition of formal power series-/
/-- If two formal multilinear series have positive radius of convergence, then the terms appearing
in the definition of their composition are also summable (when multiplied by a suitable positive
geometric term). -/
theorem comp_summable_nnreal
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F)
(hq : 0 < q.radius) (hp : 0 < p.radius) :
∃ (r : ℝ≥0), 0 < r ∧ summable (λ i, nnnorm (q.comp_along_composition p i.2) * r ^ i.1 :
(Σ n, composition n) → ℝ≥0) :=
begin
/- This follows from the fact that the growth rate of `∥qₙ∥` and `∥pₙ∥` is at most geometric,
giving a geometric bound on each `∥q.comp_along_composition p op∥`, together with the
fact that there are `2^(n-1)` compositions of `n`, giving at most a geometric loss. -/
rcases ennreal.lt_iff_exists_nnreal_btwn.1 hq with ⟨rq, rq_pos, hrq⟩,
rcases ennreal.lt_iff_exists_nnreal_btwn.1 hp with ⟨rp, rp_pos, hrp⟩,
obtain ⟨Cq, hCq⟩ : ∃ (Cq : ℝ≥0), ∀ n, nnnorm (q n) * rq^n ≤ Cq := q.bound_of_lt_radius hrq,
obtain ⟨Cp, hCp⟩ : ∃ (Cp : ℝ≥0), ∀ n, nnnorm (p n) * rp^n ≤ Cp := p.bound_of_lt_radius hrp,
let r0 : ℝ≥0 := (4 * max Cp 1)⁻¹,
set r := min rp 1 * min rq 1 * r0,
have r_pos : 0 < r,
{ apply mul_pos (mul_pos _ _),
{ rw [nnreal.inv_pos],
apply mul_pos,
{ norm_num },
{ exact lt_of_lt_of_le zero_lt_one (le_max_right _ _) } },
{ rw ennreal.coe_pos at rp_pos, simp [rp_pos, zero_lt_one] },
{ rw ennreal.coe_pos at rq_pos, simp [rq_pos, zero_lt_one] } },
let a : ennreal := ((4 : ℝ≥0) ⁻¹ : ℝ≥0),
have two_a : 2 * a < 1,
{ change ((2 : ℝ≥0) : ennreal) * ((4 : ℝ≥0) ⁻¹ : ℝ≥0) < (1 : ℝ≥0),
rw [← ennreal.coe_mul, ennreal.coe_lt_coe, ← nnreal.coe_lt_coe, nnreal.coe_mul],
change (2 : ℝ) * (4 : ℝ)⁻¹ < 1,
norm_num },
have I : ∀ (i : Σ (n : ℕ), composition n),
↑(nnnorm (q.comp_along_composition p i.2) * r ^ i.1) ≤ (Cq : ennreal) * a ^ i.1,
{ rintros ⟨n, c⟩,
rw [← ennreal.coe_pow, ← ennreal.coe_mul, ennreal.coe_le_coe],
calc nnnorm (q.comp_along_composition p c) * r ^ n
≤ (nnnorm (q c.length) * ∏ i, nnnorm (p (c.blocks_fun i))) * r ^ n :
mul_le_mul_of_nonneg_right (q.comp_along_composition_nnnorm p c) (bot_le)
... = (nnnorm (q c.length) * (min rq 1)^n) *
((∏ i, nnnorm (p (c.blocks_fun i))) * (min rp 1) ^ n) *
r0 ^ n : by { dsimp [r], ring_exp }
... ≤ (nnnorm (q c.length) * (min rq 1) ^ c.length) *
(∏ i, nnnorm (p (c.blocks_fun i)) * (min rp 1) ^ (c.blocks_fun i)) * r0 ^ n :
begin
apply_rules [mul_le_mul, bot_le, le_refl, pow_le_pow_of_le_one, min_le_right, c.length_le],
apply le_of_eq,
rw finset.prod_mul_distrib,
congr' 1,
conv_lhs { rw [← c.sum_blocks_fun, ← finset.prod_pow_eq_pow_sum] },
end
... ≤ Cq * (∏ i : fin c.length, Cp) * r0 ^ n :
begin
apply_rules [mul_le_mul, bot_le, le_trans _ (hCq c.length), le_refl, finset.prod_le_prod',
pow_le_pow_of_le_left, min_le_left],
assume i hi,
refine le_trans (mul_le_mul (le_refl _) _ bot_le bot_le) (hCp (c.blocks_fun i)),
exact pow_le_pow_of_le_left bot_le (min_le_left _ _) _
end
... ≤ Cq * (max Cp 1) ^ n * r0 ^ n :
begin
apply_rules [mul_le_mul, bot_le, le_refl],
simp only [finset.card_fin, finset.prod_const],
refine le_trans (pow_le_pow_of_le_left bot_le (le_max_left Cp 1) c.length) _,
apply pow_le_pow (le_max_right Cp 1) c.length_le,
end
... = Cq * 4⁻¹ ^ n :
begin
dsimp [r0],
have A : (4 : ℝ≥0) ≠ 0, by norm_num,
have B : max Cp 1 ≠ 0 :=
ne_of_gt (lt_of_lt_of_le zero_lt_one (le_max_right Cp 1)),
field_simp [A, B],
ring_exp
end },
refine ⟨r, r_pos, _⟩,
rw [← ennreal.tsum_coe_ne_top_iff_summable],
apply ne_of_lt,
calc (∑' (i : Σ (n : ℕ), composition n), ↑(nnnorm (q.comp_along_composition p i.2) * r ^ i.1))
≤ (∑' (i : Σ (n : ℕ), composition n), (Cq : ennreal) * a ^ i.1) : ennreal.tsum_le_tsum I
... = (∑' (n : ℕ), (∑' (c : composition n), (Cq : ennreal) * a ^ n)) : ennreal.tsum_sigma' _
... = (∑' (n : ℕ), ↑(fintype.card (composition n)) * (Cq : ennreal) * a ^ n) :
begin
congr' 1 with n : 1,
rw [tsum_fintype, finset.sum_const, nsmul_eq_mul, finset.card_univ, mul_assoc]
end
... ≤ (∑' (n : ℕ), (2 : ennreal) ^ n * (Cq : ennreal) * a ^ n) :
begin
apply ennreal.tsum_le_tsum (λ n, _),
apply ennreal.mul_le_mul (ennreal.mul_le_mul _ (le_refl _)) (le_refl _),
rw composition_card,
simp only [nat.cast_bit0, nat.cast_one, nat.cast_pow],
apply ennreal.pow_le_pow _ (nat.sub_le n 1),
have : (1 : ℝ≥0) ≤ (2 : ℝ≥0), by norm_num,
rw ← ennreal.coe_le_coe at this,
exact this
end
... = (∑' (n : ℕ), (Cq : ennreal) * (2 * a) ^ n) : by { congr' 1 with n : 1, rw mul_pow, ring }
... = (Cq : ennreal) * (1 - 2 * a) ⁻¹ : by rw [ennreal.tsum_mul_left, ennreal.tsum_geometric]
... < ⊤ : by simp [lt_top_iff_ne_top, ennreal.mul_eq_top, two_a]
end
/-- Bounding below the radius of the composition of two formal multilinear series assuming
summability over all compositions. -/
theorem le_comp_radius_of_summable
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (r : ℝ≥0)
(hr : summable (λ i, nnnorm (q.comp_along_composition p i.2) * r ^ i.1 :
(Σ n, composition n) → ℝ≥0)) :
(r : ennreal) ≤ (q.comp p).radius :=
begin
apply le_radius_of_bound _ (tsum (λ (i : Σ (n : ℕ), composition n),
(nnnorm (comp_along_composition q p i.snd) * r ^ i.fst))),
assume n,
calc nnnorm (formal_multilinear_series.comp q p n) * r ^ n ≤
∑' (c : composition n), nnnorm (comp_along_composition q p c) * r ^ n :
begin
rw [tsum_fintype, ← finset.sum_mul],
exact mul_le_mul_of_nonneg_right (nnnorm_sum_le _ _) bot_le
end
... ≤ ∑' (i : Σ (n : ℕ), composition n),
nnnorm (comp_along_composition q p i.snd) * r ^ i.fst :
begin
let f : composition n → (Σ (n : ℕ), composition n) := λ c, ⟨n, c⟩,
have : function.injective f, by tidy,
convert nnreal.tsum_comp_le_tsum_of_inj hr this
end
end
/-!
### Composing analytic functions
Now, we will prove that the composition of the partial sums of `q` and `p` up to order `N` is
given by a sum over some large subset of `Σ n, composition n` of `q.comp_along_composition p`, to
deduce that the series for `q.comp p` indeed converges to `g ∘ f` when `q` is a power series for
`g` and `p` is a power series for `f`.
This proof is a big reindexing argument of a sum. Since it is a bit involved, we define first
the source of the change of variables (`comp_partial_source`), its target
(`comp_partial_target`) and the change of variables itself (`comp_change_of_variables`) before
giving the main statement in `comp_partial_sum`. -/
/-- Source set in the change of variables to compute the composition of partial sums of formal
power series.
See also `comp_partial_sum`. -/
def comp_partial_sum_source (N : ℕ) : finset (Σ n, (fin n) → ℕ) :=
finset.sigma (finset.range N) (λ (n : ℕ), fintype.pi_finset (λ (i : fin n), finset.Ico 1 N) : _)
@[simp] lemma mem_comp_partial_sum_source_iff (N : ℕ) (i : Σ n, (fin n) → ℕ) :
i ∈ comp_partial_sum_source N ↔ i.1 < N ∧ ∀ (a : fin i.1), 1 ≤ i.2 a ∧ i.2 a < N :=
by simp only [comp_partial_sum_source, finset.Ico.mem,
fintype.mem_pi_finset, finset.mem_sigma, finset.mem_range]
/-- Change of variables appearing to compute the composition of partial sums of formal
power series -/
def comp_change_of_variables (N : ℕ) (i : Σ n, (fin n) → ℕ) (hi : i ∈ comp_partial_sum_source N) :
(Σ n, composition n) :=
begin
rcases i with ⟨n, f⟩,
rw mem_comp_partial_sum_source_iff at hi,
refine ⟨∑ j, f j, of_fn (λ a, f a), λ i hi', _, by simp [sum_of_fn]⟩,
obtain ⟨j, rfl⟩ : ∃ (j : fin n), f j = i, by rwa [mem_of_fn, set.mem_range] at hi',
exact (hi.2 j).1
end
@[simp] lemma comp_change_of_variables_length
(N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source N) :
composition.length (comp_change_of_variables N i hi).2 = i.1 :=
begin
rcases i with ⟨k, blocks_fun⟩,
dsimp [comp_change_of_variables],
simp only [composition.length, map_of_fn, length_of_fn]
end
lemma comp_change_of_variables_blocks_fun
(N : ℕ) {i : Σ n, (fin n) → ℕ} (hi : i ∈ comp_partial_sum_source N) (j : fin i.1) :
(comp_change_of_variables N i hi).2.blocks_fun
⟨j, (comp_change_of_variables_length N hi).symm ▸ j.2⟩ = i.2 j :=
begin
rcases i with ⟨n, f⟩,
dsimp [composition.blocks_fun, composition.blocks, comp_change_of_variables],
simp only [map_of_fn, nth_le_of_fn', function.comp_app],
apply congr_arg,
rw [fin.ext_iff, fin.mk_coe]
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a set. -/
def comp_partial_sum_target_set (N : ℕ) : set (Σ n, composition n) :=
{i | (i.2.length < N) ∧ (∀ (j : fin i.2.length), i.2.blocks_fun j < N)}
lemma comp_partial_sum_target_subset_image_comp_partial_sum_source
(N : ℕ) (i : Σ n, composition n) (hi : i ∈ comp_partial_sum_target_set N) :
∃ j (hj : j ∈ comp_partial_sum_source N), i = comp_change_of_variables N j hj :=
begin
rcases i with ⟨n, c⟩,
refine ⟨⟨c.length, c.blocks_fun⟩, _, _⟩,
{ simp only [comp_partial_sum_target_set, set.mem_set_of_eq] at hi,
simp only [mem_comp_partial_sum_source_iff, hi.left, hi.right, true_and, and_true],
exact λ a, c.one_le_blocks' _ },
{ dsimp [comp_change_of_variables],
rw composition.sigma_eq_iff_blocks_eq,
simp only [composition.blocks_fun, composition.blocks, subtype.coe_eta, nth_le_map'],
conv_lhs { rw ← of_fn_nth_le c.blocks },
simp only [fin.val_eq_coe], refl, /- where does this fin.val come from? -/ }
end
/-- Target set in the change of variables to compute the composition of partial sums of formal
power series, here given a a finset.
See also `comp_partial_sum`. -/
def comp_partial_sum_target (N : ℕ) : finset (Σ n, composition n) :=
set.finite.to_finset $ (finset.finite_to_set _).dependent_image
(comp_partial_sum_target_subset_image_comp_partial_sum_source N)
@[simp] lemma mem_comp_partial_sum_target_iff {N : ℕ} {a : Σ n, composition n} :
a ∈ comp_partial_sum_target N ↔ a.2.length < N ∧ (∀ (j : fin a.2.length), a.2.blocks_fun j < N) :=
by simp [comp_partial_sum_target, comp_partial_sum_target_set]
/-- The auxiliary set corresponding to the composition of partial sums asymptotically contains
all possible compositions. -/
lemma comp_partial_sum_target_tendsto_at_top :
tendsto comp_partial_sum_target at_top at_top :=
begin
apply monotone.tendsto_at_top_finset,
{ assume m n hmn a ha,
have : ∀ i, i < m → i < n := λ i hi, lt_of_lt_of_le hi hmn,
tidy },
{ rintros ⟨n, c⟩,
simp only [mem_comp_partial_sum_target_iff],
obtain ⟨n, hn⟩ : bdd_above ↑(finset.univ.image (λ (i : fin c.length), c.blocks_fun i)) :=
finset.bdd_above _,
refine ⟨max n c.length + 1, lt_of_le_of_lt (le_max_right n c.length) (lt_add_one _),
λ j, lt_of_le_of_lt (le_trans _ (le_max_left _ _)) (lt_add_one _)⟩,
apply hn,
simp only [finset.mem_image_of_mem, finset.mem_coe, finset.mem_univ] }
end
/-- Composing the partial sums of two multilinear series coincides with the sum over all
compositions in `comp_partial_sum_target N`. This is precisely the motivation for the definition of
`comp_partial_sum_target N`. -/
lemma comp_partial_sum
(q : formal_multilinear_series 𝕜 F G) (p : formal_multilinear_series 𝕜 E F) (N : ℕ) (z : E) :
q.partial_sum N (∑ i in finset.Ico 1 N, p i (λ j, z)) =
∑ i in comp_partial_sum_target N, q.comp_along_composition_multilinear p i.2 (λ j, z) :=
begin
-- we expand the composition, using the multilinearity of `q` to expand along each coordinate.
suffices H : ∑ n in finset.range N, ∑ r in fintype.pi_finset (λ (i : fin n), finset.Ico 1 N),
q n (λ (i : fin n), p (r i) (λ j, z)) =
∑ i in comp_partial_sum_target N, q.comp_along_composition_multilinear p i.2 (λ j, z),
by simpa only [formal_multilinear_series.partial_sum,
continuous_multilinear_map.map_sum_finset] using H,
-- rewrite the first sum as a big sum over a sigma type
rw [finset.sum_sigma'],
-- show that the two sums correspond to each other by reindexing the variables.
apply finset.sum_bij (comp_change_of_variables N),
-- To conclude, we should show that the correspondance we have set up is indeed a bijection
-- between the index sets of the two sums.
-- 1 - show that the image belongs to `comp_partial_sum_target N`
{ rintros ⟨k, blocks_fun⟩ H,
rw mem_comp_partial_sum_source_iff at H,
simp only [mem_comp_partial_sum_target_iff, composition.length, composition.blocks, H.left,
map_of_fn, length_of_fn, true_and, comp_change_of_variables],
assume j,
simp only [composition.blocks_fun, (H.right _).right, nth_le_of_fn'] },
-- 2 - show that the composition gives the `comp_along_composition` application
{ rintros ⟨k, blocks_fun⟩ H,
apply congr _ (comp_change_of_variables_length N H).symm,
intros,
rw ← comp_change_of_variables_blocks_fun N H,
refl },
-- 3 - show that the map is injective
{ rintros ⟨k, blocks_fun⟩ ⟨k', blocks_fun'⟩ H H' heq,
obtain rfl : k = k',
{ have := (comp_change_of_variables_length N H).symm,
rwa [heq, comp_change_of_variables_length] at this, },
congr,
funext i,
calc blocks_fun i = (comp_change_of_variables N _ H).2.blocks_fun _ :
(comp_change_of_variables_blocks_fun N H i).symm
... = (comp_change_of_variables N _ H').2.blocks_fun _ :
begin
apply composition.blocks_fun_congr; try { rw heq },
refl
end
... = blocks_fun' i : comp_change_of_variables_blocks_fun N H' i },
-- 4 - show that the map is surjective
{ assume i hi,
apply comp_partial_sum_target_subset_image_comp_partial_sum_source N i,
simpa [comp_partial_sum_target] using hi }
end
end formal_multilinear_series
open formal_multilinear_series
/-- If two functions `g` and `f` have power series `q` and `p` respectively at `f x` and `x`, then
`g ∘ f` admits the power series `q.comp p` at `x`. -/
theorem has_fpower_series_at.comp {g : F → G} {f : E → F}
{q : formal_multilinear_series 𝕜 F G} {p : formal_multilinear_series 𝕜 E F} {x : E}
(hg : has_fpower_series_at g q (f x)) (hf : has_fpower_series_at f p x) :
has_fpower_series_at (g ∘ f) (q.comp p) x :=
begin
/- Consider `rf` and `rg` such that `f` and `g` have power series expansion on the disks
of radius `rf` and `rg`. -/
rcases hg with ⟨rg, Hg⟩,
rcases hf with ⟨rf, Hf⟩,
/- The terms defining `q.comp p` are geometrically summable in a disk of some radius `r`. -/
rcases q.comp_summable_nnreal p Hg.radius_pos Hf.radius_pos with ⟨r, r_pos, hr⟩,
/- We will consider `y` which is smaller than `r` and `rf`, and also small enough that
`f (x + y)` is close enough to `f x` to be in the disk where `g` is well behaved. Let
`min (r, rf, δ)` be this new radius.-/
have : continuous_at f x := Hf.analytic_at.continuous_at,
obtain ⟨δ, δpos, hδ⟩ : ∃ (δ : ennreal) (H : 0 < δ),
∀ {z : E}, z ∈ emetric.ball x δ → f z ∈ emetric.ball (f x) rg,
{ have : emetric.ball (f x) rg ∈ 𝓝 (f x) := emetric.ball_mem_nhds _ Hg.r_pos,
rcases emetric.mem_nhds_iff.1 (Hf.analytic_at.continuous_at this) with ⟨δ, δpos, Hδ⟩,
exact ⟨δ, δpos, λ z hz, Hδ hz⟩ },
let rf' := min rf δ,
have min_pos : 0 < min rf' r,
by simp only [r_pos, Hf.r_pos, δpos, lt_min_iff, ennreal.coe_pos, and_self],
/- We will show that `g ∘ f` admits the power series `q.comp p` in the disk of
radius `min (r, rf', δ)`. -/
refine ⟨min rf' r, _⟩,
refine ⟨le_trans (min_le_right rf' r)
(formal_multilinear_series.le_comp_radius_of_summable q p r hr), min_pos, λ y hy, _⟩,
/- Let `y` satisfy `∥y∥ < min (r, rf', δ)`. We want to show that `g (f (x + y))` is the sum of
`q.comp p` applied to `y`. -/
-- First, check that `y` is small enough so that estimates for `f` and `g` apply.
have y_mem : y ∈ emetric.ball (0 : E) rf :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_left _ _))) hy,
have fy_mem : f (x + y) ∈ emetric.ball (f x) rg,
{ apply hδ,
have : y ∈ emetric.ball (0 : E) δ :=
(emetric.ball_subset_ball (le_trans (min_le_left _ _) (min_le_right _ _))) hy,
simpa [edist_eq_coe_nnnorm_sub, edist_eq_coe_nnnorm] },
/- Now the proof starts. To show that the sum of `q.comp p` at `y` is `g (f (x + y))`, we will
write `q.comp p` applied to `y` as a big sum over all compositions. Since the sum is
summable, to get its convergence it suffices to get the convergence along some increasing sequence
of sets. We will use the sequence of sets `comp_partial_sum_target n`, along which the sum is
exactly the composition of the partial sums of `q` and `p`, by design. To show that it converges
to `g (f (x + y))`, pointwise convergence would not be enough, but we have uniform convergence
to save the day. -/
-- First step: the partial sum of `p` converges to `f (x + y)`.
have A : tendsto (λ n, ∑ a in finset.Ico 1 n, p a (λ b, y)) at_top (𝓝 (f (x + y) - f x)),
{ have L : ∀ᶠ n in at_top, ∑ a in finset.range n, p a (λ b, y) - f x =
∑ a in finset.Ico 1 n, p a (λ b, y),
{ rw eventually_at_top,
refine ⟨1, λ n hn, _⟩,
symmetry,
rw [eq_sub_iff_add_eq', finset.range_eq_Ico, ← Hf.coeff_zero (λi, y),
finset.sum_eq_sum_Ico_succ_bot hn] },
have : tendsto (λ n, ∑ a in finset.range n, p a (λ b, y) - f x) at_top (𝓝 (f (x + y) - f x)) :=
(Hf.has_sum y_mem).tendsto_sum_nat.sub tendsto_const_nhds,
exact tendsto.congr' L this },
-- Second step: the composition of the partial sums of `q` and `p` converges to `g (f (x + y))`.
have B : tendsto (λ n, q.partial_sum n (∑ a in finset.Ico 1 n, p a (λ b, y)))
at_top (𝓝 (g (f (x + y)))),
{ -- we use the fact that the partial sums of `q` converge locally uniformly to `g`, and that
-- composition passes to the limit under locally uniform convergence.
have B₁ : continuous_at (λ (z : F), g (f x + z)) (f (x + y) - f x),
{ refine continuous_at.comp _ (continuous_const.add continuous_id).continuous_at,
simp only [add_sub_cancel'_right, id.def],
exact Hg.continuous_on.continuous_at (mem_nhds_sets (emetric.is_open_ball) fy_mem) },
have B₂ : f (x + y) - f x ∈ emetric.ball (0 : F) rg,
by simpa [edist_eq_coe_nnnorm, edist_eq_coe_nnnorm_sub] using fy_mem,
rw [← nhds_within_eq_of_open B₂ emetric.is_open_ball] at A,
convert Hg.tendsto_locally_uniformly_on.tendsto_comp B₁.continuous_within_at B₂ A,
simp only [add_sub_cancel'_right] },
-- Third step: the sum over all compositions in `comp_partial_sum_target n` converges to
-- `g (f (x + y))`. As this sum is exactly the composition of the partial sum, this is a direct
-- consequence of the second step
have C : tendsto (λ n,
∑ i in comp_partial_sum_target n, q.comp_along_composition_multilinear p i.2 (λ j, y))
at_top (𝓝 (g (f (x + y)))),
by simpa [comp_partial_sum] using B,
-- Fourth step: the sum over all compositions is `g (f (x + y))`. This follows from the
-- convergence along a subsequence proved in the third step, and the fact that the sum is Cauchy
-- thanks to the summability properties.
have D : has_sum (λ i : (Σ n, composition n),
q.comp_along_composition_multilinear p i.2 (λ j, y)) (g (f (x + y))),
{ have cau : cauchy_seq (λ (s : finset (Σ n, composition n)),
∑ i in s, q.comp_along_composition_multilinear p i.2 (λ j, y)),
{ apply cauchy_seq_finset_of_norm_bounded _ (nnreal.summable_coe.2 hr) _,
simp only [coe_nnnorm, nnreal.coe_mul, nnreal.coe_pow],
rintros ⟨n, c⟩,
calc ∥(comp_along_composition q p c) (λ (j : fin n), y)∥
≤ ∥comp_along_composition q p c∥ * ∏ j : fin n, ∥y∥ :
by apply continuous_multilinear_map.le_op_norm
... ≤ ∥comp_along_composition q p c∥ * (r : ℝ) ^ n :
begin
apply mul_le_mul_of_nonneg_left _ (norm_nonneg _),
rw [finset.prod_const, finset.card_fin],
apply pow_le_pow_of_le_left (norm_nonneg _),
rw [emetric.mem_ball, edist_eq_coe_nnnorm] at hy,
have := (le_trans (le_of_lt hy) (min_le_right _ _)),
rwa [ennreal.coe_le_coe, ← nnreal.coe_le_coe, coe_nnnorm] at this
end },
exact tendsto_nhds_of_cauchy_seq_of_subseq cau
comp_partial_sum_target_tendsto_at_top C },
-- Fifth step: the sum over `n` of `q.comp p n` can be expressed as a particular resummation of
-- the sum over all compositions, by grouping together the compositions of the same
-- integer `n`. The convergence of the whole sum therefore implies the converence of the sum
-- of `q.comp p n`
have E : has_sum (λ n, (q.comp p) n (λ j, y)) (g (f (x + y))),
{ apply D.sigma,
assume n,
dsimp [formal_multilinear_series.comp],
convert has_sum_fintype _,
simp only [continuous_multilinear_map.sum_apply],
refl },
exact E
end
/-- If two functions `g` and `f` are analytic respectively at `f x` and `x`, then `g ∘ f` is
analytic at `x`. -/
theorem analytic_at.comp {g : F → G} {f : E → F} {x : E}
(hg : analytic_at 𝕜 g (f x)) (hf : analytic_at 𝕜 f x) : analytic_at 𝕜 (g ∘ f) x :=
let ⟨q, hq⟩ := hg, ⟨p, hp⟩ := hf in (hq.comp hp).analytic_at
/-!
### Associativity of the composition of formal multilinear series
In this paragraph, we us prove the associativity of the composition of formal power series.
By definition,
```
(r.comp q).comp p n v
= ∑_{i₁ + ... + iₖ = n} (r.comp q)ₖ (p_{i₁} (v₀, ..., v_{i₁ -1}), p_{i₂} (...), ..., p_{iₖ}(...))
= ∑_{a : composition n} (r.comp q) a.length (apply_composition p a v)
```
decomposing `r.comp q` in the same way, we get
```
(r.comp q).comp p n v
= ∑_{a : composition n} ∑_{b : composition a.length}
r b.length (apply_composition q b (apply_composition p a v))
```
On the other hand,
```
r.comp (q.comp p) n v = ∑_{c : composition n} r c.length (apply_composition (q.comp p) c v)
```
Here, `apply_composition (q.comp p) c v` is a vector of length `c.length`, whose `i`-th term is
given by `(q.comp p) (c.blocks_fun i) (v_l, v_{l+1}, ..., v_{m-1})` where `{l, ..., m-1}` is the
`i`-th block in the composition `c`, of length `c.blocks_fun i` by definition. To compute this term,
we expand it as `∑_{dᵢ : composition (c.blocks_fun i)} q dᵢ.length (apply_composition p dᵢ v')`,
where `v' = (v_l, v_{l+1}, ..., v_{m-1})`. Therefore, we get
```
r.comp (q.comp p) n v =
∑_{c : composition n} ∑_{d₀ : composition (c.blocks_fun 0),
..., d_{c.length - 1} : composition (c.blocks_fun (c.length - 1))}
r c.length (λ i, q dᵢ.length (apply_composition p dᵢ v'ᵢ))
```
To show that these terms coincide, we need to explain how to reindex the sums to put them in
bijection (and then the terms we are summing will correspond to each other). Suppose we have a
composition `a` of `n`, and a composition `b` of `a.length`. Then `b` indicates how to group
together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of blocks
can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by saying that
each `dᵢ` is one single block. Conversely, if one starts from `c` and the `dᵢ`s, one can concatenate
the `dᵢ`s to obtain a composition `a` of `n`, and register the lengths of the `dᵢ`s in a composition
`b` of `a.length`.
An example might be enlightening. Suppose `a = [2, 2, 3, 4, 2]`. It is a composition of
length 5 of 13. The content of the blocks may be represented as `0011222333344`.
Now take `b = [2, 3]` as a composition of `a.length = 5`. It says that the first 2 blocks of `a`
should be merged, and the last 3 blocks of `a` should be merged, giving a new composition of `13`
made of two blocks of length `4` and `9`, i.e., `c = [4, 9]`. But one can also remember that
the new first block was initially made of two blocks of size `2`, so `d₀ = [2, 2]`, and the new
second block was initially made of three blocks of size `3`, `4` and `2`, so `d₁ = [3, 4, 2]`.
This equivalence is called `composition.sigma_equiv_sigma_pi n` below.
We start with preliminary results on compositions, of a very specialized nature, then define the
equivalence `composition.sigma_equiv_sigma_pi n`, and we deduce finally the associativity of
composition of formal multilinear series in `formal_multilinear_series.comp_assoc`.
-/
namespace composition
variable {n : ℕ}
/-- Rewriting equality in the dependent type `Σ (a : composition n), composition a.length)` in
non-dependent terms with lists, requiring that the blocks coincide. -/
lemma sigma_composition_eq_iff (i j : Σ (a : composition n), composition a.length) :
i = j ↔ i.1.blocks = j.1.blocks ∧ i.2.blocks = j.2.blocks :=
begin
refine ⟨by rintro rfl; exact ⟨rfl, rfl⟩, _⟩,
rcases i with ⟨a, b⟩,
rcases j with ⟨a', b'⟩,
rintros ⟨h, h'⟩,
have H : a = a', by { ext1, exact h },
induction H, congr, ext1, exact h'
end
/-- Rewriting equality in the dependent type
`Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)` in
non-dependent terms with lists, requiring that the lists of blocks coincide. -/
lemma sigma_pi_composition_eq_iff
(u v : Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :
u = v ↔ of_fn (λ i, (u.2 i).blocks) = of_fn (λ i, (v.2 i).blocks) :=
begin
refine ⟨λ H, by rw H, λ H, _⟩,
rcases u with ⟨a, b⟩,
rcases v with ⟨a', b'⟩,
dsimp at H,
have h : a = a',
{ ext1,
have : map list.sum (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) =
map list.sum (of_fn (λ (i : fin (composition.length a')), (b' i).blocks)), by rw H,
simp only [map_of_fn] at this,
change of_fn (λ (i : fin (composition.length a)), (b i).blocks.sum) =
of_fn (λ (i : fin (composition.length a')), (b' i).blocks.sum) at this,
simpa [composition.blocks_sum, composition.of_fn_blocks_fun] using this },
induction h,
simp only [true_and, eq_self_iff_true, heq_iff_eq],
ext i : 2,
have : nth_le (of_fn (λ (i : fin (composition.length a)), (b i).blocks)) i (by simp [i.is_lt]) =
nth_le (of_fn (λ (i : fin (composition.length a)), (b' i).blocks)) i (by simp [i.is_lt]) :=
nth_le_of_eq H _,
rwa [nth_le_of_fn, nth_le_of_fn] at this
end
/-- When `a` is a composition of `n` and `b` is a composition of `a.length`, `a.gather b` is the
composition of `n` obtained by gathering all the blocks of `a` corresponding to a block of `b`.
For instance, if `a = [6, 5, 3, 5, 2]` and `b = [2, 3]`, one should gather together
the first two blocks of `a` and its last three blocks, giving `a.gather b = [11, 10]`. -/
def gather (a : composition n) (b : composition a.length) : composition n :=
{ blocks := (a.blocks.split_wrt_composition b).map sum,
blocks_pos :=
begin
rw forall_mem_map_iff,
intros j hj,
suffices H : ∀ i ∈ j, 1 ≤ i, from
calc 0 < j.length : length_pos_of_mem_split_wrt_composition hj
... ≤ j.sum : length_le_sum_of_one_le _ H,
intros i hi,
apply a.one_le_blocks,
rw ← a.blocks.join_split_wrt_composition b,
exact mem_join_of_mem hj hi,
end,
blocks_sum := by { rw [← sum_join, join_split_wrt_composition, a.blocks_sum] } }
lemma length_gather (a : composition n) (b : composition a.length) :
length (a.gather b) = b.length :=
show (map list.sum (a.blocks.split_wrt_composition b)).length = b.blocks.length,
by rw [length_map, length_split_wrt_composition]
/-- An auxiliary function used in the definition of `sigma_equiv_sigma_pi` below, associating to
two compositions `a` of `n` and `b` of `a.length`, and an index `i` bounded by the length of
`a.gather b`, the subcomposition of `a` made of those blocks belonging to the `i`-th block of
`a.gather b`. -/
def sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin (a.gather b).length) :
composition ((a.gather b).blocks_fun i) :=
{ blocks := nth_le (a.blocks.split_wrt_composition b) i
(by { rw [length_split_wrt_composition, ← length_gather], exact i.2 }),
blocks_pos := assume i hi, a.blocks_pos
(by { rw ← a.blocks.join_split_wrt_composition b, exact mem_join_of_mem (nth_le_mem _ _ _) hi }),
blocks_sum := by simp only [composition.blocks_fun, nth_le_map', composition.gather, fin.val_eq_coe] }
/- Where did the fin.val come from in the proof on the preceding line? -/
lemma length_sigma_composition_aux (a : composition n) (b : composition a.length) (i : fin b.length) :
composition.length (composition.sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩) =
composition.blocks_fun b i :=
show list.length (nth_le (split_wrt_composition a.blocks b) i _) = blocks_fun b i,
by { rw [nth_le_map_rev list.length, nth_le_of_eq (map_length_split_wrt_composition _ _)], refl }
lemma blocks_fun_sigma_composition_aux (a : composition n) (b : composition a.length)
(i : fin b.length) (j : fin (blocks_fun b i)) :
blocks_fun (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ i.2⟩)
⟨j, (length_sigma_composition_aux a b i).symm ▸ j.2⟩ = blocks_fun a (embedding b i j) :=
show nth_le (nth_le _ _ _) _ _ = nth_le a.blocks _ _,
by { rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take'], refl }
/-- Auxiliary lemma to prove that the composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Grouping together some
blocks of `a` according to `b` as in `a.gather b`, one can compute the total size of the blocks
of `a` up to an index `size_up_to b i + j` (where the `j` corresponds to a set of blocks of `a`
that do not fill a whole block of `a.gather b`). The first part corresponds to a sum of blocks
in `a.gather b`, and the second one to a sum of blocks in the next block of
`sigma_composition_aux a b`. This is the content of this lemma. -/
lemma size_up_to_size_up_to_add (a : composition n) (b : composition a.length)
{i j : ℕ} (hi : i < b.length) (hj : j < blocks_fun b ⟨i, hi⟩) :
size_up_to a (size_up_to b i + j) = size_up_to (a.gather b) i +
(size_up_to (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩) j) :=
begin
induction j with j IHj,
{ show sum (take ((b.blocks.take i).sum) a.blocks) =
sum (take i (map sum (split_wrt_composition a.blocks b))),
induction i with i IH,
{ refl },
{ have A : i < b.length := nat.lt_of_succ_lt hi,
have B : i < list.length (map list.sum (split_wrt_composition a.blocks b)), by simp [A],
have C : 0 < blocks_fun b ⟨i, A⟩ := composition.blocks_pos' _ _ _,
rw [sum_take_succ _ _ B, ← IH A C],
have : take (sum (take i b.blocks)) a.blocks =
take (sum (take i b.blocks)) (take (sum (take (i+1) b.blocks)) a.blocks),
{ rw [take_take, min_eq_left],
apply monotone_sum_take _ (nat.le_succ _) },
rw [this, nth_le_map', nth_le_split_wrt_composition,
← take_append_drop (sum (take i b.blocks)) ((take (sum (take (nat.succ i) b.blocks)) a.blocks)),
sum_append],
congr,
rw [take_append_drop] } },
{ have A : j < blocks_fun b ⟨i, hi⟩ := lt_trans (lt_add_one j) hj,
have B : j < length (sigma_composition_aux a b ⟨i, (length_gather a b).symm ▸ hi⟩),
by { convert A, rw [← length_sigma_composition_aux], refl },
have C : size_up_to b i + j < size_up_to b (i + 1),
{ simp only [size_up_to_succ b hi, add_lt_add_iff_left],
exact A },
have D : size_up_to b i + j < length a := lt_of_lt_of_le C (b.size_up_to_le _),
have : size_up_to b i + nat.succ j = (size_up_to b i + j).succ := rfl,
rw [this, size_up_to_succ _ D, IHj A, size_up_to_succ _ B],
simp only [sigma_composition_aux, add_assoc, add_left_inj, fin.coe_mk],
rw [nth_le_of_eq (nth_le_split_wrt_composition _ _ _), nth_le_drop', nth_le_take _ _ C] }
end
/--
Natural equivalence between `(Σ (a : composition n), composition a.length)` and
`(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i))`, that shows up as a
change of variables in the proof that composition of formal multilinear series is associative.
Consider a composition `a` of `n` and a composition `b` of `a.length`. Then `b` indicates how to
group together some blocks of `a`, giving altogether `b.length` blocks of blocks. These blocks of
blocks can be called `d₀, ..., d_{a.length - 1}`, and one obtains a composition `c` of `n` by
saying that each `dᵢ` is one single block. The map `⟨a, b⟩ → ⟨c, (d₀, ..., d_{a.length - 1})⟩` is
the direct map in the equiv.
Conversely, if one starts from `c` and the `dᵢ`s, one can join the `dᵢ`s to obtain a composition
`a` of `n`, and register the lengths of the `dᵢ`s in a composition `b` of `a.length`. This is the
inverse map of the equiv.
-/
def sigma_equiv_sigma_pi (n : ℕ) :
(Σ (a : composition n), composition a.length) ≃
(Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) :=
{ to_fun := λ i, ⟨i.1.gather i.2, i.1.sigma_composition_aux i.2⟩,
inv_fun := λ i, ⟨
{ blocks := (of_fn (λ j, (i.2 j).blocks)).join,
blocks_pos :=
begin
simp only [and_imp, mem_join, exists_imp_distrib, forall_mem_of_fn_iff],
exact λ i j hj, composition.blocks_pos _ hj
end,
blocks_sum := by simp [sum_of_fn, composition.blocks_sum, composition.sum_blocks_fun] },
{ blocks := of_fn (λ j, (i.2 j).length),
blocks_pos := forall_mem_of_fn_iff.2
(λ j, composition.length_pos_of_pos _ (composition.blocks_pos' _ _ _)),
blocks_sum := by { dsimp only [composition.length], simp [sum_of_fn] } }⟩,
left_inv :=
begin
-- the fact that we have a left inverse is essentially `join_split_wrt_composition`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨a, b⟩,
rw sigma_composition_eq_iff,
dsimp,
split,
{ have A := length_map list.sum (split_wrt_composition a.blocks b),
conv_rhs { rw [← join_split_wrt_composition a.blocks b,
← of_fn_nth_le (split_wrt_composition a.blocks b)] },
congr,
{ exact A },
{ exact (fin.heq_fun_iff A).2 (λ i, rfl) } },
{ have B : composition.length (composition.gather a b) = list.length b.blocks :=
composition.length_gather _ _,
conv_rhs { rw [← of_fn_nth_le b.blocks] },
congr' 1,
{ exact B },
{ apply (fin.heq_fun_iff B).2 (λ i, _),
rw [sigma_composition_aux, composition.length, nth_le_map_rev list.length,
nth_le_of_eq (map_length_split_wrt_composition _ _)], refl } }
end,
right_inv :=
begin
-- the fact that we have a right inverse is essentially `split_wrt_composition_join`,
-- but we need to massage it to take care of the dependent setting.
rintros ⟨c, d⟩,
have : map list.sum (of_fn (λ (i : fin (composition.length c)), (d i).blocks)) = c.blocks,
by simp [map_of_fn, (∘), composition.blocks_sum, composition.of_fn_blocks_fun],
rw sigma_pi_composition_eq_iff,
dsimp,
congr,
{ ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] },
{ rw fin.heq_fun_iff,
{ assume i,
dsimp [composition.sigma_composition_aux],
rw [nth_le_of_eq (split_wrt_composition_join _ _ _)],
{ simp only [nth_le_of_fn'] },
{ simp only [map_of_fn] } },
{ congr,
ext1,
dsimp [composition.gather],
rwa split_wrt_composition_join,
simp only [map_of_fn] } }
end }
end composition
namespace formal_multilinear_series
open composition
theorem comp_assoc (r : formal_multilinear_series 𝕜 G H) (q : formal_multilinear_series 𝕜 F G)
(p : formal_multilinear_series 𝕜 E F) :
(r.comp q).comp p = r.comp (q.comp p) :=
begin
ext n v,
/- First, rewrite the two compositions appearing in the theorem as two sums over complicated
sigma types, as in the description of the proof above. -/
let f : (Σ (a : composition n), composition a.length) → H :=
λ c, r c.2.length (apply_composition q c.2 (apply_composition p c.1 v)),
let g : (Σ (c : composition n), Π (i : fin c.length), composition (c.blocks_fun i)) → H :=
λ c, r c.1.length (λ (i : fin c.1.length),
q (c.2 i).length (apply_composition p (c.2 i) (v ∘ c.1.embedding i))),
suffices : ∑ c, f c = ∑ c, g c,
by simpa only [formal_multilinear_series.comp, continuous_multilinear_map.sum_apply,
comp_along_composition_apply, continuous_multilinear_map.map_sum, finset.sum_sigma',
apply_composition],
/- Now, we use `composition.sigma_equiv_sigma_pi n` to change
variables in the second sum, and check that we get exactly the same sums. -/
rw ← (sigma_equiv_sigma_pi n).sum_comp,
/- To check that we have the same terms, we should check that we apply the same component of
`r`, and the same component of `q`, and the same component of `p`, to the same coordinate of
`v`. This is true by definition, but at each step one needs to convince Lean that the types
one considers are the same, using a suitable congruence lemma to avoid dependent type issues.
This dance has to be done three times, one for `r`, one for `q` and one for `p`.-/
apply finset.sum_congr rfl,
rintros ⟨a, b⟩ _,
dsimp [f, g, sigma_equiv_sigma_pi],
-- check that the `r` components are the same. Based on `composition.length_gather`
apply r.congr (composition.length_gather a b).symm,
intros i hi1 hi2,
-- check that the `q` components are the same. Based on `length_sigma_composition_aux`
apply q.congr (length_sigma_composition_aux a b _).symm,
intros j hj1 hj2,
-- check that the `p` components are the same. Based on `blocks_fun_sigma_composition_aux`
apply p.congr (blocks_fun_sigma_composition_aux a b _ _).symm,
intros k hk1 hk2,
-- finally, check that the coordinates of `v` one is using are the same. Based on
-- `size_up_to_size_up_to_add`.
refine congr_arg v (fin.eq_of_veq _),
dsimp [composition.embedding],
rw [size_up_to_size_up_to_add _ _ hi1 hj1, add_assoc],
end
end formal_multilinear_series
|
2846a89eb6a2c6339aed39844537ed52c1be119c
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/linear_algebra/clifford_algebra/fold.lean
|
86d1432eb9908b4b20da52968696690f2d25a60a
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 6,202
|
lean
|
/-
Copyright (c) 2022 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import linear_algebra.clifford_algebra.conjugation
/-!
# Recursive computation rules for the Clifford algebra
This file provides API for a special case `clifford_algebra.foldr` of the universal property
`clifford_algebra.lift` with `A = module.End R N` for some arbitrary module `N`. This specialization
resembles the `list.foldr` operation, allowing a bilinear map to be "folded" along the generators.
For convenience, this file also provides `clifford_algebra.foldl`, implemented via
`clifford_algebra.reverse`
## Main definitions
* `clifford_algebra.foldr`: a computation rule for building linear maps out of the clifford
algebra starting on the right, analogous to using `list.foldr` on the generators.
* `clifford_algebra.foldl`: a computation rule for building linear maps out of the clifford
algebra starting on the left, analogous to using `list.foldl` on the generators.
## Main statements
* `clifford_algebra.right_induction`: an induction rule that adds generators from the right.
* `clifford_algebra.left_induction`: an induction rule that adds generators from the left.
-/
universes u1 u2 u3
variables {R M N : Type*}
variables [comm_ring R] [add_comm_group M] [add_comm_group N]
variables [module R M] [module R N]
variables (Q : quadratic_form R M)
namespace clifford_algebra
section foldr
/-- Fold a bilinear map along the generators of a term of the clifford algebra, with the rule
given by `foldr Q f hf n (ι Q m * x) = f m (foldr Q f hf n x)`.
For example, `foldr f hf n (r • ι R u + ι R v * ι R w) = r • f u n + f v (f w n)`. -/
def foldr (f : M →ₗ[R] N →ₗ[R] N) (hf : ∀ m x, f m (f m x) = Q m • x) :
N →ₗ[R] clifford_algebra Q →ₗ[R] N :=
(clifford_algebra.lift Q ⟨f, λ v, linear_map.ext $ hf v⟩).to_linear_map.flip
@[simp] lemma foldr_ι (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (m : M) :
foldr Q f hf n (ι Q m) = f m n :=
linear_map.congr_fun (lift_ι_apply _ _ _) n
@[simp] lemma foldr_algebra_map (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (r : R) :
foldr Q f hf n (algebra_map R _ r) = r • n :=
linear_map.congr_fun (alg_hom.commutes _ r) n
@[simp] lemma foldr_one (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) :
foldr Q f hf n 1 = n :=
linear_map.congr_fun (alg_hom.map_one _) n
@[simp] lemma foldr_mul (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (a b : clifford_algebra Q) :
foldr Q f hf n (a * b) = foldr Q f hf (foldr Q f hf n b) a :=
linear_map.congr_fun (alg_hom.map_mul _ _ _) n
/-- This lemma demonstrates the origin of the `foldr` name. -/
lemma foldr_prod_map_ι (l : list M) (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N):
foldr Q f hf n (l.map $ ι Q).prod = list.foldr (λ m n, f m n) n l :=
begin
induction l with hd tl ih,
{ rw [list.map_nil, list.prod_nil, list.foldr_nil, foldr_one] },
{ rw [list.map_cons, list.prod_cons, list.foldr_cons, foldr_mul, foldr_ι, ih] },
end
end foldr
section foldl
/-- Fold a bilinear map along the generators of a term of the clifford algebra, with the rule
given by `foldl Q f hf n (ι Q m * x) = f m (foldl Q f hf n x)`.
For example, `foldl f hf n (r • ι R u + ι R v * ι R w) = r • f u n + f v (f w n)`. -/
def foldl (f : M →ₗ[R] N →ₗ[R] N) (hf : ∀ m x, f m (f m x) = Q m • x) :
N →ₗ[R] clifford_algebra Q →ₗ[R] N :=
linear_map.compl₂ (foldr Q f hf) reverse
@[simp] lemma foldl_reverse (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (x : clifford_algebra Q) :
foldl Q f hf n (reverse x) = foldr Q f hf n x :=
fun_like.congr_arg (foldr Q f hf n) $ reverse_reverse _
@[simp] lemma foldr_reverse (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (x : clifford_algebra Q) :
foldr Q f hf n (reverse x) = foldl Q f hf n x := rfl
@[simp] lemma foldl_ι (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (m : M) :
foldl Q f hf n (ι Q m) = f m n :=
by rw [←foldr_reverse, reverse_ι, foldr_ι]
@[simp] lemma foldl_algebra_map (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (r : R) :
foldl Q f hf n (algebra_map R _ r) = r • n :=
by rw [←foldr_reverse, reverse.commutes, foldr_algebra_map]
@[simp] lemma foldl_one (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) :
foldl Q f hf n 1 = n :=
by rw [←foldr_reverse, reverse.map_one, foldr_one]
@[simp] lemma foldl_mul (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N) (a b : clifford_algebra Q) :
foldl Q f hf n (a * b) = foldl Q f hf (foldl Q f hf n a) b :=
by rw [←foldr_reverse, ←foldr_reverse, ←foldr_reverse, reverse.map_mul, foldr_mul]
/-- This lemma demonstrates the origin of the `foldl` name. -/
lemma foldl_prod_map_ι (l : list M) (f : M →ₗ[R] N →ₗ[R] N) (hf) (n : N):
foldl Q f hf n (l.map $ ι Q).prod = list.foldl (λ m n, f n m) n l :=
by rw [←foldr_reverse, reverse_prod_map_ι, ←list.map_reverse, foldr_prod_map_ι, list.foldr_reverse]
end foldl
lemma right_induction {P : clifford_algebra Q → Prop}
(hr : ∀ r : R, P (algebra_map _ _ r))
(h_add : ∀ x y, P x → P y → P (x + y))
(h_ι_mul : ∀ m x, P x → P (x * ι Q m)) : ∀ x, P x :=
begin
/- It would be neat if we could prove this via `foldr` like how we prove
`clifford_algebra.induction`, but going via the grading seems easier. -/
intro x,
have : x ∈ ⊤ := submodule.mem_top,
rw ←supr_ι_range_eq_top at this,
apply submodule.supr_induction _ this (λ i x hx, _) _ h_add,
{ refine submodule.pow_induction_on_right _ hr h_add (λ x px m, _) hx,
rintro ⟨m, rfl⟩,
exact h_ι_mul _ _ px },
{ simpa only [map_zero] using hr 0}
end
lemma left_induction {P : clifford_algebra Q → Prop}
(hr : ∀ r : R, P (algebra_map _ _ r))
(h_add : ∀ x y, P x → P y → P (x + y))
(h_mul_ι : ∀ x m, P x → P (ι Q m * x)) : ∀ x, P x :=
begin
refine reverse_involutive.surjective.forall.2 _,
intro x,
induction x using clifford_algebra.right_induction with r x y hx hy m x hx,
{ simpa only [reverse.commutes] using hr r },
{ simpa only [map_add] using h_add _ _ hx hy },
{ simpa only [reverse.map_mul, reverse_ι] using h_mul_ι _ _ hx },
end
end clifford_algebra
|
ec3ce72a3378acd6d97918d003654835e07a2ffd
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/src/group_theory/free_abelian_group.lean
|
48589854c55a13d7306280ed0f3c3e46cd9770db
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 19,470
|
lean
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import algebra.group.pi
import group_theory.free_group
import group_theory.abelianization
import algebra.module.basic -- we use the ℤ-module structure on an add_comm_group in punit_equiv
import deprecated.group -- someone who understands `seq` can remove this
/-!
# Free abelian groups
The free abelian group on a type `α`, defined as the abelianisation of
the free group on `α`.
The free abelian group on `α` can be abstractly defined as the left adjoint of the
forgetful functor from abelian groups to types. Alternatively, one could define
it as the functions `α → ℤ` which send all but finitely many `(a : α)` to `0`,
under pointwise addition. In this file, it is defined as the abelianisation
of the free group on `α`. All the constructions and theorems required to show
the adjointness of the construction and the forgetful functor are proved in this
file, but the category-theoretic adjunction statement is in
`algebra.category.Group.adjunctions` .
## Main definitions
Here we use the following variables: `(α β : Type*) (A : Type*) [add_comm_group A]`
* `free_abelian_group α` : the free abelian group on a type `α`. As an abelian
group it is `α →₀ ℤ`, the functions from `α` to `ℤ` such that all but finitely
many elements get mapped to zero, however this is not how it is implemented.
* `lift f : free_abelian_group α →+ A` : the group homomorphism induced
by the map `f : α → A`.
* `map (f : α → β) : free_abelian_group α →+ free_abelian_group β` : functoriality
of `free_abelian_group`
* `instance [monoid α] : semigroup (free_abelian_group α)`
* `instance [comm_monoid α] : comm_ring (free_abelian_group α)`
It has been suggested that we would be better off refactoring this file
and using `finsupp` instead.
## Implementation issues
The definition is `def free_abelian_group : Type u :=
additive $ abelianization $ free_group α`
Chris Hughes has suggested that this all be rewritten in terms of `finsupp`.
Johan Commelin has written all the API relating the definition to `finsupp`
in the lean-liquid repo.
The lemmas `map_pure`, `map_of`, `map_zero`, `map_add`, `map_neg` and `map_sub`
are proved about the `functor.map` `<$>` construction, and need `α` and `β` to
be in the same universe. But
`free_abelian_group.map (f : α → β)` is defined to be the `add_group`
homomorphism `free_abelian_group α →+ free_abelian_group β` (with `α` and `β` now
allowed to be in different universes), so `(map f).map_add`
etc can be used to prove that `free_abelian_group.map` preserves addition. The
functions `map_id`, `map_id_apply`, `map_comp`, `map_comp_apply` and `map_of_apply`
are about `free_abelian_group.map`.
-/
universes u v
variables (α : Type u)
/-- The free abelian group on a type. -/
def free_abelian_group : Type u :=
additive $ abelianization $ free_group α
instance : add_comm_group (free_abelian_group α) :=
@additive.add_comm_group _ $ abelianization.comm_group _
instance : inhabited (free_abelian_group α) := ⟨0⟩
variable {α}
namespace free_abelian_group
/-- The canonical map from α to `free_abelian_group α` -/
def of (x : α) : free_abelian_group α :=
abelianization.of $ free_group.of x
/-- The map `free_abelian_group α →+ A` induced by a map of types `α → A`. -/
def lift {β : Type v} [add_comm_group β] : (α → β) ≃ (free_abelian_group α →+ β) :=
(@free_group.lift _ (multiplicative β) _).trans $
(@abelianization.lift _ _ (multiplicative β) _).trans monoid_hom.to_additive
namespace lift
variables {β : Type v} [add_comm_group β] (f : α → β)
open free_abelian_group
@[simp] protected lemma of (x : α) : lift f (of x) = f x :=
begin
convert @abelianization.lift.of (free_group α) _ (multiplicative β) _ _ _,
convert free_group.lift.of.symm
end
protected theorem unique (g : free_abelian_group α →+ β)
(hg : ∀ x, g (of x) = f x) {x} :
g x = lift f x :=
add_monoid_hom.congr_fun ((lift.symm_apply_eq).mp (funext hg : g ∘ of = f)) _
/-- See note [partially-applied ext lemmas]. -/
@[ext]
protected theorem ext (g h : free_abelian_group α →+ β)
(H : ∀ x, g (of x) = h (of x)) :
g = h :=
lift.symm.injective $ funext H
lemma map_hom {α β γ} [add_comm_group β] [add_comm_group γ]
(a : free_abelian_group α) (f : α → β) (g : β →+ γ) :
g (lift f a) = lift (g ∘ f) a :=
begin
suffices : (g.comp (lift f)) a = lift (g ∘ f) a,
exact this,
apply @lift.unique,
assume a,
show g ((lift f) (of a)) = g (f a),
simp only [(∘), lift.of],
end
end lift
section
open_locale classical
lemma of_injective : function.injective (of : α → free_abelian_group α) :=
λ x y hoxy, classical.by_contradiction $ assume hxy : x ≠ y,
let f : free_abelian_group α →+ ℤ := lift (λ z, if x = z then (1 : ℤ) else 0) in
have hfx1 : f (of x) = 1, from (lift.of _ _).trans $ if_pos rfl,
have hfy1 : f (of y) = 1, from hoxy ▸ hfx1,
have hfy0 : f (of y) = 0, from (lift.of _ _).trans $ if_neg hxy,
one_ne_zero $ hfy1.symm.trans hfy0
end
local attribute [instance] quotient_group.left_rel
@[elab_as_eliminator]
protected theorem induction_on
{C : free_abelian_group α → Prop}
(z : free_abelian_group α)
(C0 : C 0)
(C1 : ∀ x, C $ of x)
(Cn : ∀ x, C (of x) → C (-of x))
(Cp : ∀ x y, C x → C y → C (x + y)) : C z :=
quotient.induction_on' z $ λ x, quot.induction_on x $ λ L,
list.rec_on L C0 $ λ ⟨x, b⟩ tl ih,
bool.rec_on b (Cp _ _ (Cn _ (C1 x)) ih) (Cp _ _ (C1 x) ih)
theorem lift.add' {α β} [add_comm_group β] (a : free_abelian_group α) (f g : α → β) :
lift (f + g) a = lift f a + lift g a :=
begin
refine free_abelian_group.induction_on a _ _ _ _,
{ simp only [(lift _).map_zero, zero_add] },
{ assume x,
simp only [lift.of, pi.add_apply] },
{ assume x h,
simp only [(lift _).map_neg, lift.of, pi.add_apply, neg_add] },
{ assume x y hx hy,
simp only [(lift _).map_add, hx, hy],
ac_refl }
end
/-- If `g : free_abelian_group X` and `A` is an abelian group then `lift_add_group_hom g`
is the additive group homomorphism sending a function `X → A` to the term of type `A`
corresponding to the evaluation of the induced map `free_abelian_group X → A` at `g`. -/
@[simps]
def lift_add_group_hom {α} (β) [add_comm_group β] (a : free_abelian_group α) : (α → β) →+ β :=
add_monoid_hom.mk' (λ f, lift f a) (lift.add' a)
lemma is_add_group_hom_lift' {α} (β) [add_comm_group β] (a : free_abelian_group α) :
is_add_group_hom (λf, (lift f a : β)) :=
{ map_add := λ f g, lift.add' a f g }
section monad
variables {β : Type u}
instance : monad free_abelian_group.{u} :=
{ pure := λ α, of,
bind := λ α β x f, lift f x }
@[elab_as_eliminator]
protected theorem induction_on'
{C : free_abelian_group α → Prop}
(z : free_abelian_group α)
(C0 : C 0)
(C1 : ∀ x, C $ pure x)
(Cn : ∀ x, C (pure x) → C (-pure x))
(Cp : ∀ x y, C x → C y → C (x + y)) : C z :=
free_abelian_group.induction_on z C0 C1 Cn Cp
@[simp] lemma map_pure (f : α → β) (x : α) : f <$> (pure x : free_abelian_group α) = pure (f x) :=
rfl
@[simp] lemma map_zero (f : α → β) : f <$> (0 : free_abelian_group α) = 0 :=
(lift (of ∘ f)).map_zero
@[simp] lemma map_add (f : α → β) (x y : free_abelian_group α) :
f <$> (x + y) = f <$> x + f <$> y :=
(lift _).map_add _ _
@[simp] lemma map_neg (f : α → β) (x : free_abelian_group α) : f <$> (-x) = -(f <$> x) :=
(lift _).map_neg _
@[simp] lemma map_sub (f : α → β) (x y : free_abelian_group α) :
f <$> (x - y) = f <$> x - f <$> y :=
(lift _).map_sub _ _
@[simp] lemma map_of (f : α → β) (y : α) : f <$> of y = of (f y) := rfl
@[simp] lemma pure_bind (f : α → free_abelian_group β) (x) : pure x >>= f = f x :=
lift.of _ _
@[simp] lemma zero_bind (f : α → free_abelian_group β) : 0 >>= f = 0 :=
(lift f).map_zero
@[simp] lemma add_bind (f : α → free_abelian_group β) (x y : free_abelian_group α) :
x + y >>= f = (x >>= f) + (y >>= f) :=
(lift _).map_add _ _
@[simp] lemma neg_bind (f : α → free_abelian_group β) (x : free_abelian_group α) :
-x >>= f = -(x >>= f) :=
(lift _).map_neg _
@[simp] lemma sub_bind (f : α → free_abelian_group β) (x y : free_abelian_group α) :
x - y >>= f = (x >>= f) - (y >>= f) :=
(lift _).map_sub _ _
@[simp] lemma pure_seq (f : α → β) (x : free_abelian_group α) : pure f <*> x = f <$> x :=
pure_bind _ _
@[simp] lemma zero_seq (x : free_abelian_group α) : (0 : free_abelian_group (α → β)) <*> x = 0 :=
zero_bind _
@[simp] lemma add_seq (f g : free_abelian_group (α → β)) (x : free_abelian_group α) :
f + g <*> x = (f <*> x) + (g <*> x) :=
add_bind _ _ _
@[simp] lemma neg_seq (f : free_abelian_group (α → β)) (x : free_abelian_group α) :
-f <*> x = -(f <*> x) :=
neg_bind _ _
@[simp] lemma sub_seq (f g : free_abelian_group (α → β)) (x : free_abelian_group α) :
f - g <*> x = (f <*> x) - (g <*> x) :=
sub_bind _ _ _
lemma is_add_group_hom_seq (f : free_abelian_group (α → β)) : is_add_group_hom ((<*>) f) :=
{ map_add := λ x y, show lift (<$> (x+y)) _ = _, by simp only [map_add]; exact
@@is_add_hom.map_add _ _ _
(@@free_abelian_group.is_add_group_hom_lift' (free_abelian_group β) _ _).to_is_add_hom _ _ }
@[simp] lemma seq_zero (f : free_abelian_group (α → β)) : f <*> 0 = 0 :=
is_add_group_hom.map_zero (is_add_group_hom_seq f)
@[simp] lemma seq_add (f : free_abelian_group (α → β)) (x y : free_abelian_group α) :
f <*> (x + y) = (f <*> x) + (f <*> y) :=
is_add_hom.map_add (is_add_group_hom_seq f).to_is_add_hom _ _
@[simp] lemma seq_neg (f : free_abelian_group (α → β)) (x : free_abelian_group α) :
f <*> (-x) = -(f <*> x) :=
is_add_group_hom.map_neg (is_add_group_hom_seq f) _
@[simp] lemma seq_sub (f : free_abelian_group (α → β)) (x y : free_abelian_group α) :
f <*> (x - y) = (f <*> x) - (f <*> y) :=
is_add_group_hom.map_sub (is_add_group_hom_seq f) _ _
instance : is_lawful_monad free_abelian_group.{u} :=
{ id_map := λ α x, free_abelian_group.induction_on' x (map_zero id) (λ x, map_pure id x)
(λ x ih, by rw [map_neg, ih]) (λ x y ihx ihy, by rw [map_add, ihx, ihy]),
pure_bind := λ α β x f, pure_bind f x,
bind_assoc := λ α β γ x f g, free_abelian_group.induction_on' x
(by iterate 3 { rw zero_bind }) (λ x, by iterate 2 { rw pure_bind })
(λ x ih, by iterate 3 { rw neg_bind }; rw ih)
(λ x y ihx ihy, by iterate 3 { rw add_bind }; rw [ihx, ihy]) }
instance : is_comm_applicative free_abelian_group.{u} :=
{ commutative_prod := λ α β x y, free_abelian_group.induction_on' x
(by rw [map_zero, zero_seq, seq_zero])
(λ p, by rw [map_pure, pure_seq]; exact free_abelian_group.induction_on' y
(by rw [map_zero, map_zero, zero_seq])
(λ q, by rw [map_pure, map_pure, pure_seq, map_pure])
(λ q ih, by rw [map_neg, map_neg, neg_seq, ih])
(λ y₁ y₂ ih1 ih2, by rw [map_add, map_add, add_seq, ih1, ih2]))
(λ p ih, by rw [map_neg, neg_seq, seq_neg, ih])
(λ x₁ x₂ ih1 ih2, by rw [map_add, add_seq, seq_add, ih1, ih2]) }
end monad
universe w
variables {β : Type v} {γ : Type w}
/-- The additive group homomorphism `free_abelian_group α →+ free_abelian_group β` induced from a
map `α → β` -/
def map (f : α → β) : free_abelian_group α →+ free_abelian_group β :=
lift (of ∘ f)
lemma lift_comp {α} {β} {γ} [add_comm_group γ]
(f : α → β) (g : β → γ) (x : free_abelian_group α) :
lift (g ∘ f) x = lift g (map f x) :=
begin
apply free_abelian_group.induction_on x,
{ exact add_monoid_hom.map_zero _ },
{ intro y, refl },
{ intros x h, simp only [h, add_monoid_hom.map_neg] },
{ intros x y h₁ h₂, simp only [h₁, h₂, add_monoid_hom.map_add] }
end
lemma map_id : map id = add_monoid_hom.id (free_abelian_group α) :=
eq.symm $ lift.ext _ _ $ λ x, lift.unique of (add_monoid_hom.id _) $
λ y, add_monoid_hom.id_apply _ _
lemma map_id_apply (x : free_abelian_group α) : map id x = x := by {rw map_id, refl }
lemma map_comp {f : α → β} {g : β → γ} : map (g ∘ f) = (map g).comp (map f) :=
eq.symm $ lift.ext _ _ $ λ x, eq.symm $ lift_comp _ _ _
lemma map_comp_apply {f : α → β} {g : β → γ} (x : free_abelian_group α) :
map (g ∘ f) x = (map g) ((map f) x) := by { rw map_comp, refl }
-- version of map_of which uses `map`
@[simp] lemma map_of_apply {f : α → β} (a : α) : map f (of a) = of (f a) := rfl
variable (α)
section monoid
variables {R : Type*} [monoid α] [ring R]
instance : semigroup (free_abelian_group α) :=
{ mul := λ x, lift $ λ x₂, lift (λ x₁, of $ x₁ * x₂) x,
mul_assoc := λ x y z, begin
unfold has_mul.mul,
refine free_abelian_group.induction_on z (by simp) _ _ _,
{ intros L3, rw [lift.of, lift.of],
refine free_abelian_group.induction_on y (by simp) _ _ _,
{ intros L2, iterate 3 { rw lift.of },
refine free_abelian_group.induction_on x (by simp) _ _ _,
{ intros L1, iterate 3 { rw lift.of }, congr' 1, exact mul_assoc _ _ _ },
{ intros L1 ih, iterate 3 { rw (lift _).map_neg }, rw ih },
{ intros x1 x2 ih1 ih2, iterate 3 { rw (lift _).map_add }, rw [ih1, ih2] } },
{ intros L2 ih, iterate 4 { rw (lift _).map_neg }, rw ih },
{ intros y1 y2 ih1 ih2, iterate 4 { rw (lift _).map_add }, rw [ih1, ih2] } },
{ intros L3 ih, iterate 3 { rw (lift _).map_neg }, rw ih },
{ intros z1 z2 ih1 ih2, iterate 2 { rw (lift _).map_add }, rw [ih1, ih2],
exact ((lift _).map_add _ _).symm }
end }
variable {α}
lemma mul_def (x y : free_abelian_group α) :
x * y = lift (λ x₂, lift (λ x₁, of (x₁ * x₂)) x) y := rfl
lemma of_mul_of (x y : α) : of x * of y = of (x * y) := rfl
lemma of_mul (x y : α) : of (x * y) = of x * of y := rfl
variable (α)
instance : ring (free_abelian_group α) :=
{ one := free_abelian_group.of 1,
mul_one := λ x, begin
unfold has_mul.mul semigroup.mul has_one.one,
rw lift.of,
refine free_abelian_group.induction_on x rfl _ _ _,
{ intros L, erw [lift.of], congr' 1, exact mul_one L },
{ intros L ih, rw [(lift _).map_neg, ih] },
{ intros x1 x2 ih1 ih2, rw [(lift _).map_add, ih1, ih2] }
end,
one_mul := λ x, begin
unfold has_mul.mul semigroup.mul has_one.one,
refine free_abelian_group.induction_on x rfl _ _ _,
{ intros L, rw [lift.of, lift.of], congr' 1, exact one_mul L },
{ intros L ih, rw [(lift _).map_neg, ih] },
{ intros x1 x2 ih1 ih2, rw [(lift _).map_add, ih1, ih2] }
end,
left_distrib := λ x y z, (lift _).map_add _ _,
right_distrib := λ x y z, begin
unfold has_mul.mul semigroup.mul,
refine free_abelian_group.induction_on z rfl _ _ _,
{ intros L, iterate 3 { rw lift.of }, rw (lift _).map_add, refl },
{ intros L ih, iterate 3 { rw (lift _).map_neg }, rw [ih, neg_add], refl },
{ intros z1 z2 ih1 ih2, iterate 3 { rw (lift _).map_add }, rw [ih1, ih2],
rw [add_assoc, add_assoc], congr' 1, apply add_left_comm }
end,
.. free_abelian_group.add_comm_group α,
.. free_abelian_group.semigroup α }
variable {α}
/-- `free_abelian_group.of` is a `monoid_hom` when `α` is a `monoid`. -/
def of_mul_hom : α →* free_abelian_group α :=
{ to_fun := of,
map_one' := rfl,
map_mul' := of_mul }
@[simp] lemma of_mul_hom_coe : (of_mul_hom : α → free_abelian_group α) = of := rfl
/-- If `f` preserves multiplication, then so does `lift f`. -/
def lift_monoid : (α →* R) ≃ (free_abelian_group α →+* R) :=
{ to_fun := λ f,
{ map_one' := (lift.of f _).trans f.map_one,
map_mul' := λ x y,
begin
simp only [add_monoid_hom.to_fun_eq_coe],
refine free_abelian_group.induction_on y (mul_zero _).symm _ _ _,
{ intros L2,
rw mul_def x,
simp only [lift.of],
refine free_abelian_group.induction_on x (zero_mul _).symm _ _ _,
{ intros L1, iterate 3 { rw lift.of },
exact f.map_mul _ _ },
{ intros L1 ih,
iterate 3 { rw (lift _).map_neg },
rw [ih, neg_mul_eq_neg_mul] },
{ intros x1 x2 ih1 ih2,
iterate 3 { rw (lift _).map_add },
rw [ih1, ih2, add_mul] } },
{ intros L2 ih,
rw [mul_neg_eq_neg_mul_symm, add_monoid_hom.map_neg, add_monoid_hom.map_neg,
mul_neg_eq_neg_mul_symm, ih] },
{ intros y1 y2 ih1 ih2,
rw [mul_add, add_monoid_hom.map_add, add_monoid_hom.map_add, mul_add, ih1, ih2] },
end,
.. lift f },
inv_fun := λ F, monoid_hom.comp ↑F of_mul_hom,
left_inv := λ f, monoid_hom.ext $ lift.of _,
right_inv := λ F, ring_hom.coe_add_monoid_hom_injective $
lift.apply_symm_apply (↑F : free_abelian_group α →+ R) }
@[simp] lemma lift_monoid_coe_add_monoid_hom (f : α →* R) : ↑(lift_monoid f) = lift f := rfl
@[simp] lemma lift_monoid_coe (f : α →* R) : ⇑(lift_monoid f) = lift f := rfl
@[simp] lemma lift_monoid_symm_coe (f : free_abelian_group α →+* R) :
⇑(lift_monoid.symm f) = lift.symm ↑f := rfl
lemma one_def : (1 : free_abelian_group α) = of 1 := rfl
lemma of_one : (of 1 : free_abelian_group α) = 1 := rfl
end monoid
instance [comm_monoid α] : comm_ring (free_abelian_group α) :=
{ mul_comm := λ x y, begin
refine free_abelian_group.induction_on x (zero_mul y) _ _ _,
{ intros s, refine free_abelian_group.induction_on y (zero_mul _).symm _ _ _,
{ intros t, unfold has_mul.mul semigroup.mul ring.mul,
iterate 4 { rw lift.of }, congr' 1, exact mul_comm _ _ },
{ intros t ih, rw [mul_neg_eq_neg_mul_symm, ih, neg_mul_eq_neg_mul] },
{ intros y1 y2 ih1 ih2, rw [mul_add, add_mul, ih1, ih2] } },
{ intros s ih, rw [neg_mul_eq_neg_mul_symm, ih, neg_mul_eq_mul_neg] },
{ intros x1 x2 ih1 ih2, rw [add_mul, mul_add, ih1, ih2] }
end,
.. free_abelian_group.ring α }
instance pempty_unique : unique (free_abelian_group pempty) :=
{ default := 0,
uniq := λ x, free_abelian_group.induction_on x rfl
(λ x, pempty.elim x)
(λ x, pempty.elim x)
(by { rintros - - rfl rfl, simp }) }
/-- The free abelian group on a type with one term is isomorphic to `ℤ`. -/
def punit_equiv (T : Type*) [unique T] : free_abelian_group T ≃+ ℤ :=
{ to_fun := free_abelian_group.lift (λ _, (1 : ℤ)),
inv_fun := λ n, n • of (inhabited.default T),
left_inv := λ z, free_abelian_group.induction_on z
(by simp only [zero_smul, add_monoid_hom.map_zero])
(unique.forall_iff.2 $ by simp only [one_smul, lift.of])
(unique.forall_iff.2 $ by simp)
(λ x y hx hy, by { simp only [add_monoid_hom.map_add, add_smul] at *, rw [hx, hy]}),
right_inv := λ n,
begin
rw [add_monoid_hom.map_int_module_smul, lift.of],
exact zsmul_int_one n
end,
map_add' := add_monoid_hom.map_add _ }
/-- Isomorphic types have isomorphic free abelian groups. -/
def equiv_of_equiv {α β : Type*} (f : α ≃ β) : free_abelian_group α ≃+ free_abelian_group β :=
{ to_fun := map f,
inv_fun := map f.symm,
left_inv := begin
intros x,
rw [← map_comp_apply, equiv.symm_comp_self, map_id],
refl,
end,
right_inv := begin
intros x,
rw [← map_comp_apply, equiv.self_comp_symm, map_id],
refl,
end,
map_add' := add_monoid_hom.map_add _ }
end free_abelian_group
|
72bb1385867c7d22b616658c96c978f1fecbdff4
|
74addaa0e41490cbaf2abd313a764c96df57b05d
|
/Mathlib/data/multiset/lattice.lean
|
025300fbbc801810a9c430a34c297bb57cfd0a42
|
[] |
no_license
|
AurelienSaue/Mathlib4_auto
|
f538cfd0980f65a6361eadea39e6fc639e9dae14
|
590df64109b08190abe22358fabc3eae000943f2
|
refs/heads/master
| 1,683,906,849,776
| 1,622,564,669,000
| 1,622,564,669,000
| 371,723,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 4,307
|
lean
|
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.multiset.finset_ops
import Mathlib.data.multiset.fold
import Mathlib.PostPort
universes u_1
namespace Mathlib
/-!
# Lattice operations on multisets
-/
namespace multiset
/-! ### sup -/
/-- Supremum of a multiset: `sup {a, b, c} = a ⊔ b ⊔ c` -/
def sup {α : Type u_1} [semilattice_sup_bot α] (s : multiset α) : α :=
fold has_sup.sup ⊥ s
@[simp] theorem sup_zero {α : Type u_1} [semilattice_sup_bot α] : sup 0 = ⊥ :=
fold_zero has_sup.sup ⊥
@[simp] theorem sup_cons {α : Type u_1} [semilattice_sup_bot α] (a : α) (s : multiset α) : sup (a ::ₘ s) = a ⊔ sup s :=
fold_cons_left has_sup.sup ⊥ a s
@[simp] theorem sup_singleton {α : Type u_1} [semilattice_sup_bot α] {a : α} : sup (a ::ₘ 0) = a := sorry
@[simp] theorem sup_add {α : Type u_1} [semilattice_sup_bot α] (s₁ : multiset α) (s₂ : multiset α) : sup (s₁ + s₂) = sup s₁ ⊔ sup s₂ := sorry
theorem sup_le {α : Type u_1} [semilattice_sup_bot α] {s : multiset α} {a : α} : sup s ≤ a ↔ ∀ (b : α), b ∈ s → b ≤ a := sorry
theorem le_sup {α : Type u_1} [semilattice_sup_bot α] {s : multiset α} {a : α} (h : a ∈ s) : a ≤ sup s :=
iff.mp sup_le (le_refl (sup s)) a h
theorem sup_mono {α : Type u_1} [semilattice_sup_bot α] {s₁ : multiset α} {s₂ : multiset α} (h : s₁ ⊆ s₂) : sup s₁ ≤ sup s₂ :=
iff.mpr sup_le fun (b : α) (hb : b ∈ s₁) => le_sup (h hb)
@[simp] theorem sup_erase_dup {α : Type u_1} [semilattice_sup_bot α] [DecidableEq α] (s : multiset α) : sup (erase_dup s) = sup s :=
fold_erase_dup_idem has_sup.sup s ⊥
@[simp] theorem sup_ndunion {α : Type u_1} [semilattice_sup_bot α] [DecidableEq α] (s₁ : multiset α) (s₂ : multiset α) : sup (ndunion s₁ s₂) = sup s₁ ⊔ sup s₂ := sorry
@[simp] theorem sup_union {α : Type u_1} [semilattice_sup_bot α] [DecidableEq α] (s₁ : multiset α) (s₂ : multiset α) : sup (s₁ ∪ s₂) = sup s₁ ⊔ sup s₂ := sorry
@[simp] theorem sup_ndinsert {α : Type u_1} [semilattice_sup_bot α] [DecidableEq α] (a : α) (s : multiset α) : sup (ndinsert a s) = a ⊔ sup s := sorry
/-! ### inf -/
/-- Infimum of a multiset: `inf {a, b, c} = a ⊓ b ⊓ c` -/
def inf {α : Type u_1} [semilattice_inf_top α] (s : multiset α) : α :=
fold has_inf.inf ⊤ s
@[simp] theorem inf_zero {α : Type u_1} [semilattice_inf_top α] : inf 0 = ⊤ :=
fold_zero has_inf.inf ⊤
@[simp] theorem inf_cons {α : Type u_1} [semilattice_inf_top α] (a : α) (s : multiset α) : inf (a ::ₘ s) = a ⊓ inf s :=
fold_cons_left has_inf.inf ⊤ a s
@[simp] theorem inf_singleton {α : Type u_1} [semilattice_inf_top α] {a : α} : inf (a ::ₘ 0) = a := sorry
@[simp] theorem inf_add {α : Type u_1} [semilattice_inf_top α] (s₁ : multiset α) (s₂ : multiset α) : inf (s₁ + s₂) = inf s₁ ⊓ inf s₂ := sorry
theorem le_inf {α : Type u_1} [semilattice_inf_top α] {s : multiset α} {a : α} : a ≤ inf s ↔ ∀ (b : α), b ∈ s → a ≤ b := sorry
theorem inf_le {α : Type u_1} [semilattice_inf_top α] {s : multiset α} {a : α} (h : a ∈ s) : inf s ≤ a :=
iff.mp le_inf (le_refl (inf s)) a h
theorem inf_mono {α : Type u_1} [semilattice_inf_top α] {s₁ : multiset α} {s₂ : multiset α} (h : s₁ ⊆ s₂) : inf s₂ ≤ inf s₁ :=
iff.mpr le_inf fun (b : α) (hb : b ∈ s₁) => inf_le (h hb)
@[simp] theorem inf_erase_dup {α : Type u_1} [semilattice_inf_top α] [DecidableEq α] (s : multiset α) : inf (erase_dup s) = inf s :=
fold_erase_dup_idem has_inf.inf s ⊤
@[simp] theorem inf_ndunion {α : Type u_1} [semilattice_inf_top α] [DecidableEq α] (s₁ : multiset α) (s₂ : multiset α) : inf (ndunion s₁ s₂) = inf s₁ ⊓ inf s₂ := sorry
@[simp] theorem inf_union {α : Type u_1} [semilattice_inf_top α] [DecidableEq α] (s₁ : multiset α) (s₂ : multiset α) : inf (s₁ ∪ s₂) = inf s₁ ⊓ inf s₂ := sorry
@[simp] theorem inf_ndinsert {α : Type u_1} [semilattice_inf_top α] [DecidableEq α] (a : α) (s : multiset α) : inf (ndinsert a s) = a ⊓ inf s := sorry
|
80e27febcab90ac33fcf7deeb5563cda0f884fd7
|
302c785c90d40ad3d6be43d33bc6a558354cc2cf
|
/src/topology/sheaves/stalks.lean
|
dbedd5b7bf19199fcc5c031d8a152a3ebc8be90b
|
[
"Apache-2.0"
] |
permissive
|
ilitzroth/mathlib
|
ea647e67f1fdfd19a0f7bdc5504e8acec6180011
|
5254ef14e3465f6504306132fe3ba9cec9ffff16
|
refs/heads/master
| 1,680,086,661,182
| 1,617,715,647,000
| 1,617,715,647,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 6,759
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import topology.category.Top.open_nhds
import topology.sheaves.presheaf
import category_theory.limits.types
noncomputable theory
universes v u v' u'
open category_theory
open Top
open category_theory.limits
open topological_space
open opposite
variables {C : Type u} [category.{v} C]
variables [has_colimits.{v} C]
variables {X Y Z : Top.{v}}
namespace Top.presheaf
variables (C)
/-- Stalks are functorial with respect to morphisms of presheaves over a fixed `X`. -/
def stalk_functor (x : X) : X.presheaf C ⥤ C :=
((whiskering_left _ _ C).obj (open_nhds.inclusion x).op) ⋙ colim
variables {C}
/--
The stalk of a presheaf `F` at a point `x` is calculated as the colimit of the functor
nbhds x ⥤ opens F.X ⥤ C
-/
def stalk (ℱ : X.presheaf C) (x : X) : C :=
(stalk_functor C x).obj ℱ -- -- colimit ((open_nhds.inclusion x).op ⋙ ℱ)
@[simp] lemma stalk_functor_obj (ℱ : X.presheaf C) (x : X) :
(stalk_functor C x).obj ℱ = ℱ.stalk x := rfl
/--
The germ of a section of a presheaf over an open at a point of that open.
-/
def germ (F : X.presheaf C) {U : opens X} (x : U) : F.obj (op U) ⟶ stalk F x :=
colimit.ι ((open_nhds.inclusion x.1).op ⋙ F) (op ⟨U, x.2⟩)
/-- For a `Type` valued presheaf, every point in a stalk is a germ. -/
lemma germ_exist (F : X.presheaf (Type v)) (x : X) (t : stalk F x) :
∃ (U : opens X) (m : x ∈ U) (s : F.obj (op U)), F.germ ⟨x, m⟩ s = t :=
begin
obtain ⟨U, s, e⟩ := types.jointly_surjective _ (colimit.is_colimit _) t,
revert s e,
rw [(show U = op (unop U), from rfl)],
generalize : unop U = V, clear U,
cases V with V m,
intros s e,
exact ⟨V, m, s, e⟩,
end
lemma germ_eq (F : X.presheaf (Type v)) {U V : opens X} (x : X) (mU : x ∈ U) (mV : x ∈ V)
(s : F.obj (op U)) (t : F.obj (op V))
(h : germ F ⟨x, mU⟩ s = germ F ⟨x, mV⟩ t) :
∃ (W : opens X) (m : x ∈ W) (iU : W ⟶ U) (iV : W ⟶ V), F.map iU.op s = F.map iV.op t :=
begin
erw types.filtered_colimit.colimit_eq_iff at h,
rcases h with ⟨W, iU, iV, e⟩,
exact ⟨(unop W).1, (unop W).2, iU.unop, iV.unop, e⟩,
end
@[simp] lemma germ_res (F : X.presheaf C) {U V : opens X} (i : U ⟶ V) (x : U) :
F.map i.op ≫ germ F x = germ F (i x : V) :=
let i' : (⟨U, x.2⟩ : open_nhds x.1) ⟶ ⟨V, (i x : V).2⟩ := i in
colimit.w ((open_nhds.inclusion x.1).op ⋙ F) i'.op
@[simp] lemma germ_res_apply (F : X.presheaf (Type v)) {U V : opens X} (i : U ⟶ V)
(x : U) (f : F.obj (op V)) :
germ F x (F.map i.op f) = germ F (i x : V) f :=
let i' : (⟨U, x.2⟩ : open_nhds x.1) ⟶ ⟨V, (i x : V).2⟩ := i in
congr_fun (colimit.w ((open_nhds.inclusion x.1).op ⋙ F) i'.op) f
/-- A variant when the open sets are written in `(opens X)ᵒᵖ`. -/
@[simp] lemma germ_res_apply' (F : X.presheaf (Type v)) {U V : (opens X)ᵒᵖ} (i : V ⟶ U)
(x : unop U) (f : F.obj V) :
germ F x (F.map i f) = germ F (i.unop x : unop V) f :=
let i' : (⟨unop U, x.2⟩ : open_nhds x.1) ⟶ ⟨unop V, (i.unop x : unop V).2⟩ := i.unop in
congr_fun (colimit.w ((open_nhds.inclusion x.1).op ⋙ F) i'.op) f
section
local attribute [instance] concrete_category.has_coe_to_sort concrete_category.has_coe_to_fun
@[ext]
lemma germ_ext {D : Type u} [category.{v} D] [concrete_category D] [has_colimits D]
(F : X.presheaf D)
{U V : opens X} {x : X} {hxU : x ∈ U} {hxV : x ∈ V}
(W : opens X) (hxW : x ∈ W) (iWU : W ⟶ U) (iWV : W ⟶ V)
{sU : F.obj (op U)} {sV : F.obj (op V)}
(ih : F.map iWU.op sU = F.map iWV.op sV) :
F.germ ⟨x, hxU⟩ sU = F.germ ⟨x, hxV⟩ sV :=
by erw [← F.germ_res iWU ⟨x, hxW⟩,
← F.germ_res iWV ⟨x, hxW⟩, coe_comp, coe_comp, ih]
end
lemma stalk_hom_ext (F : X.presheaf C) {x} {Y : C} {f₁ f₂ : F.stalk x ⟶ Y}
(ih : ∀ (U : opens X) (hxU : x ∈ U), F.germ ⟨x, hxU⟩ ≫ f₁ = F.germ ⟨x, hxU⟩ ≫ f₂) : f₁ = f₂ :=
colimit.hom_ext $ λ U, by { op_induction U, cases U with U hxU, exact ih U hxU }
variables (C)
def stalk_pushforward (f : X ⟶ Y) (ℱ : X.presheaf C) (x : X) : (f _* ℱ).stalk (f x) ⟶ ℱ.stalk x :=
begin
-- This is a hack; Lean doesn't like to elaborate the term written directly.
transitivity,
swap,
exact colimit.pre _ (open_nhds.map f x).op,
exact colim.map (whisker_right (nat_trans.op (open_nhds.inclusion_map_iso f x).inv) ℱ),
end
-- Here are two other potential solutions, suggested by @fpvandoorn at
-- <https://github.com/leanprover-community/mathlib/pull/1018#discussion_r283978240>
-- However, I can't get the subsequent two proofs to work with either one.
-- def stalk_pushforward (f : X ⟶ Y) (ℱ : X.presheaf C) (x : X) :
-- (f _* ℱ).stalk (f x) ⟶ ℱ.stalk x :=
-- colim.map ((functor.associator _ _ _).inv ≫
-- whisker_right (nat_trans.op (open_nhds.inclusion_map_iso f x).inv) ℱ) ≫
-- colimit.pre ((open_nhds.inclusion x).op ⋙ ℱ) (open_nhds.map f x).op
-- def stalk_pushforward (f : X ⟶ Y) (ℱ : X.presheaf C) (x : X) :
-- (f _* ℱ).stalk (f x) ⟶ ℱ.stalk x :=
-- (colim.map (whisker_right (nat_trans.op (open_nhds.inclusion_map_iso f x).inv) ℱ) :
-- colim.obj ((open_nhds.inclusion (f x) ⋙ opens.map f).op ⋙ ℱ) ⟶ _) ≫
-- colimit.pre ((open_nhds.inclusion x).op ⋙ ℱ) (open_nhds.map f x).op
namespace stalk_pushforward
local attribute [tidy] tactic.op_induction'
@[simp] lemma id (ℱ : X.presheaf C) (x : X) :
ℱ.stalk_pushforward C (𝟙 X) x = (stalk_functor C x).map ((pushforward.id ℱ).hom) :=
begin
dsimp [stalk_pushforward, stalk_functor],
ext1,
tactic.op_induction',
cases j, cases j_val,
rw [colimit.ι_map_assoc, colimit.ι_map, colimit.ι_pre, whisker_left_app, whisker_right_app,
pushforward.id_hom_app, eq_to_hom_map, eq_to_hom_refl],
dsimp,
-- FIXME A simp lemma which unfortunately doesn't fire:
erw [category_theory.functor.map_id],
end
-- This proof is sadly not at all robust:
-- having to use `erw` at all is a bad sign.
@[simp] lemma comp (ℱ : X.presheaf C) (f : X ⟶ Y) (g : Y ⟶ Z) (x : X) :
ℱ.stalk_pushforward C (f ≫ g) x =
((f _* ℱ).stalk_pushforward C g (f x)) ≫ (ℱ.stalk_pushforward C f x) :=
begin
dsimp [stalk_pushforward, stalk_functor],
ext U,
op_induction U,
cases U,
cases U_val,
simp only [colimit.ι_map_assoc, colimit.ι_pre_assoc,
whisker_right_app, category.assoc],
dsimp,
-- FIXME: Some of these are simp lemmas, but don't fire successfully:
erw [category_theory.functor.map_id, category.id_comp, category.id_comp, category.id_comp,
colimit.ι_pre, colimit.ι_pre],
refl,
end
end stalk_pushforward
end Top.presheaf
|
023c1786a3d675770aa0ad9c24c57e89d1bd5ef7
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/category_theory/groupoid/subgroupoid.lean
|
e6055869660814b1fd47ad3a3114d307a7611370
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 24,788
|
lean
|
/-
Copyright (c) 2022 Rémi Bottinelli. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Rémi Bottinelli, Junyan Xu
-/
import category_theory.groupoid.vertex_group
import category_theory.groupoid.basic
import category_theory.groupoid
import algebra.group.defs
import data.set.lattice
import group_theory.subgroup.basic
import order.galois_connection
/-!
# Subgroupoid
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines subgroupoids as `structure`s containing the subsets of arrows and their
stability under composition and inversion.
Also defined are:
* containment of subgroupoids is a complete lattice;
* images and preimages of subgroupoids under a functor;
* the notion of normality of subgroupoids and its stability under intersection and preimage;
* compatibility of the above with `groupoid.vertex_group`.
## Main definitions
Given a type `C` with associated `groupoid C` instance.
* `subgroupoid C` is the type of subgroupoids of `C`
* `subgroupoid.is_normal` is the property that the subgroupoid is stable under conjugation
by arbitrary arrows, _and_ that all identity arrows are contained in the subgroupoid.
* `subgroupoid.comap` is the "preimage" map of subgroupoids along a functor.
* `subgroupoid.map` is the "image" map of subgroupoids along a functor _injective on objects_.
* `subgroupoid.vertex_subgroup` is the subgroup of the `vertex group` at a given vertex `v`,
assuming `v` is contained in the `subgroupoid` (meaning, by definition, that the arrow `𝟙 v`
is contained in the subgroupoid).
## Implementation details
The structure of this file is copied from/inspired by `group_theory.subgroup.basic`
and `combinatorics.simple_graph.subgraph`.
## TODO
* Equivalent inductive characterization of generated (normal) subgroupoids.
* Characterization of normal subgroupoids as kernels.
* Prove that `full` and `disconnect` preserve intersections (and `disconnect` also unions)
## Tags
subgroupoid
-/
namespace category_theory
open set groupoid
local attribute [protected] category_theory.inv
universes u v
variables {C : Type u} [groupoid C]
/--
A sugroupoid of `C` consists of a choice of arrows for each pair of vertices, closed
under composition and inverses.
-/
@[ext] structure subgroupoid (C : Type u) [groupoid C] :=
(arrows : ∀ (c d : C), set (c ⟶ d))
(inv : ∀ {c d} {p : c ⟶ d} (hp : p ∈ arrows c d),
inv p ∈ arrows d c)
(mul : ∀ {c d e} {p} (hp : p ∈ arrows c d) {q} (hq : q ∈ arrows d e),
p ≫ q ∈ arrows c e)
attribute [protected] subgroupoid.inv subgroupoid.mul
namespace subgroupoid
variable (S : subgroupoid C)
lemma inv_mem_iff {c d : C} (f : c ⟶ d) : inv f ∈ S.arrows d c ↔ f ∈ S.arrows c d :=
begin
split,
{ rintro h,
suffices : inv (inv f) ∈ S.arrows c d,
{ simpa only [inv_eq_inv, is_iso.inv_inv] using this, },
{ apply S.inv h, }, },
{ apply S.inv, },
end
lemma mul_mem_cancel_left {c d e : C} {f : c ⟶ d} {g : d ⟶ e} (hf : f ∈ S.arrows c d) :
f ≫ g ∈ S.arrows c e ↔ g ∈ S.arrows d e :=
begin
split,
{ rintro h,
suffices : (inv f) ≫ f ≫ g ∈ S.arrows d e,
{ simpa only [inv_eq_inv, is_iso.inv_hom_id_assoc] using this, },
{ apply S.mul (S.inv hf) h, }, },
{ apply S.mul hf, },
end
lemma mul_mem_cancel_right {c d e : C} {f : c ⟶ d} {g : d ⟶ e} (hg : g ∈ S.arrows d e) :
f ≫ g ∈ S.arrows c e ↔ f ∈ S.arrows c d :=
begin
split,
{ rintro h,
suffices : (f ≫ g) ≫ (inv g) ∈ S.arrows c d,
{ simpa only [inv_eq_inv, is_iso.hom_inv_id, category.comp_id, category.assoc] using this, },
{ apply S.mul h (S.inv hg), }, },
{ exact λ hf, S.mul hf hg, },
end
/-- The vertices of `C` on which `S` has non-trivial isotropy -/
def objs : set C := {c : C | (S.arrows c c).nonempty}
lemma mem_objs_of_src {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : c ∈ S.objs :=
⟨f ≫ inv f, S.mul h (S.inv h)⟩
lemma mem_objs_of_tgt {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : d ∈ S.objs :=
⟨(inv f) ≫ f, S.mul (S.inv h) h⟩
lemma id_mem_of_nonempty_isotropy (c : C) :
c ∈ objs S → 𝟙 c ∈ S.arrows c c :=
begin
rintro ⟨γ,hγ⟩,
convert S.mul hγ (S.inv hγ),
simp only [inv_eq_inv, is_iso.hom_inv_id],
end
lemma id_mem_of_src {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : (𝟙 c) ∈ S.arrows c c :=
id_mem_of_nonempty_isotropy S c (mem_objs_of_src S h)
lemma id_mem_of_tgt {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : (𝟙 d) ∈ S.arrows d d :=
id_mem_of_nonempty_isotropy S d (mem_objs_of_tgt S h)
/-- A subgroupoid seen as a quiver on vertex set `C` -/
def as_wide_quiver : quiver C := ⟨λ c d, subtype $ S.arrows c d⟩
/-- The coercion of a subgroupoid as a groupoid -/
@[simps to_category_comp_coe, simps inv_coe (lemmas_only)]
instance coe : groupoid S.objs :=
{ hom := λ a b, S.arrows a.val b.val,
id := λ a, ⟨𝟙 a.val, id_mem_of_nonempty_isotropy S a.val a.prop⟩,
comp := λ a b c p q, ⟨p.val ≫ q.val, S.mul p.prop q.prop⟩,
id_comp' := λ a b ⟨p,hp⟩, by simp only [category.id_comp],
comp_id' := λ a b ⟨p,hp⟩, by simp only [category.comp_id],
assoc' := λ a b c d ⟨p,hp⟩ ⟨q,hq⟩ ⟨r,hr⟩, by simp only [category.assoc],
inv := λ a b p, ⟨inv p.val, S.inv p.prop⟩,
inv_comp' := λ a b ⟨p,hp⟩, by simp only [inv_comp],
comp_inv' := λ a b ⟨p,hp⟩, by simp only [comp_inv] }
@[simp] lemma coe_inv_coe' {c d : S.objs} (p : c ⟶ d) :
(category_theory.inv p).val = category_theory.inv p.val :=
by { simp only [subtype.val_eq_coe, ←inv_eq_inv, coe_inv_coe], }
/-- The embedding of the coerced subgroupoid to its parent-/
def hom : S.objs ⥤ C :=
{ obj := λ c, c.val,
map := λ c d f, f.val,
map_id' := λ c, rfl,
map_comp' := λ c d e f g, rfl }
lemma hom.inj_on_objects : function.injective (hom S).obj :=
by { rintros ⟨c,hc⟩ ⟨d,hd⟩ hcd, simp only [subtype.mk_eq_mk], exact hcd }
lemma hom.faithful :
∀ c d, function.injective (λ (f : c ⟶ d), (hom S).map f) :=
by { rintros ⟨c,hc⟩ ⟨d,hd⟩ ⟨f,hf⟩ ⟨g,hg⟩ hfg, simp only [subtype.mk_eq_mk], exact hfg, }
/-- The subgroup of the vertex group at `c` given by the subgroupoid -/
def vertex_subgroup {c : C} (hc : c ∈ S.objs) : subgroup (c ⟶ c) :=
{ carrier := S.arrows c c,
mul_mem' := λ f g hf hg, S.mul hf hg,
one_mem' := id_mem_of_nonempty_isotropy _ _ hc,
inv_mem' := λ f hf, S.inv hf }
instance : set_like (subgroupoid C) (Σ (c d : C), c ⟶ d) :=
{ coe := λ S, {F | F.2.2 ∈ S.arrows F.1 F.2.1},
coe_injective' := λ ⟨S, _, _⟩ ⟨T, _, _⟩ h, by { ext c d f, apply set.ext_iff.1 h ⟨c, d, f⟩ } }
lemma mem_iff (S : subgroupoid C) (F : Σ c d, c ⟶ d) :
F ∈ S ↔ F.2.2 ∈ S.arrows F.1 F.2.1 := iff.rfl
lemma le_iff (S T : subgroupoid C) : (S ≤ T) ↔ (∀ {c d}, (S.arrows c d) ⊆ (T.arrows c d)) :=
by { rw [set_like.le_def, sigma.forall], exact forall_congr (λ c, sigma.forall) }
instance : has_top (subgroupoid C) :=
⟨ { arrows := (λ _ _, set.univ),
mul := by { rintros, trivial, },
inv := by { rintros, trivial, } } ⟩
lemma mem_top {c d : C} (f : c ⟶ d) : f ∈ (⊤ : subgroupoid C).arrows c d := trivial
lemma mem_top_objs (c : C) : c ∈ (⊤ : subgroupoid C).objs :=
by { dsimp [has_top.top,objs], simp only [univ_nonempty], }
instance : has_bot (subgroupoid C) :=
⟨ { arrows := (λ _ _, ∅),
mul := λ _ _ _ _, false.elim,
inv := λ _ _ _, false.elim } ⟩
instance : inhabited (subgroupoid C) := ⟨⊤⟩
instance : has_inf (subgroupoid C) :=
⟨ λ S T,
{ arrows := (λ c d, (S.arrows c d) ∩ (T.arrows c d)),
inv := by { rintros, exact ⟨S.inv hp.1, T.inv hp.2⟩, },
mul := by { rintros, exact ⟨S.mul hp.1 hq.1, T.mul hp.2 hq.2⟩, } } ⟩
instance : has_Inf (subgroupoid C) :=
⟨ λ s,
{ arrows := λ c d, ⋂ S ∈ s, (subgroupoid.arrows S c d),
inv := by { intros, rw mem_Inter₂ at hp ⊢, exact λ S hS, S.inv (hp S hS) },
mul := by { intros, rw mem_Inter₂ at hp hq ⊢,exact λ S hS, S.mul (hp S hS) (hq S hS) } } ⟩
instance : complete_lattice (subgroupoid C) :=
{ bot := (⊥),
bot_le := λ S, empty_subset _,
top := (⊤),
le_top := λ S, subset_univ _,
inf := (⊓),
le_inf := λ R S T RS RT _ pR, ⟨RS pR, RT pR⟩,
inf_le_left := λ R S _, and.left,
inf_le_right := λ R S _, and.right,
.. complete_lattice_of_Inf (subgroupoid C)
begin
refine (λ s, ⟨λ S Ss F, _, λ T Tl F fT, _⟩);
simp only [Inf, mem_iff, mem_Inter],
exacts [λ hp, hp S Ss, λ S Ss, Tl Ss fT],
end }
lemma le_objs {S T : subgroupoid C} (h : S ≤ T) : S.objs ⊆ T.objs :=
λ s ⟨γ, hγ⟩, ⟨γ, @h ⟨s, s, γ⟩ hγ⟩
/-- The functor associated to the embedding of subgroupoids -/
def inclusion {S T : subgroupoid C} (h : S ≤ T) : S.objs ⥤ T.objs :=
{ obj := λ s, ⟨s.val, le_objs h s.prop⟩,
map := λ s t f, ⟨f.val, @h ⟨s, t, f.val⟩ f.prop⟩,
map_id' := λ _, rfl,
map_comp' := λ _ _ _ _ _, rfl }
lemma inclusion_inj_on_objects {S T : subgroupoid C} (h : S ≤ T) :
function.injective (inclusion h).obj :=
λ ⟨s,hs⟩ ⟨t,ht⟩, by simpa only [inclusion, subtype.mk_eq_mk] using id
lemma inclusion_faithful {S T : subgroupoid C} (h : S ≤ T) (s t : S.objs) :
function.injective (λ (f : s ⟶ t), (inclusion h).map f) :=
λ ⟨f,hf⟩ ⟨g,hg⟩, by { dsimp only [inclusion], simpa only [subtype.mk_eq_mk] using id }
lemma inclusion_refl {S : subgroupoid C} : inclusion (le_refl S) = 𝟭 S.objs :=
functor.hext (λ ⟨s,hs⟩, rfl) (λ ⟨s,hs⟩ ⟨t,ht⟩ ⟨f,hf⟩, heq_of_eq rfl)
lemma inclusion_trans {R S T : subgroupoid C} (k : R ≤ S) (h : S ≤ T) :
inclusion (k.trans h) = (inclusion k) ⋙ (inclusion h) := rfl
lemma inclusion_comp_embedding {S T : subgroupoid C} (h : S ≤ T) :
(inclusion h) ⋙ T.hom = S.hom := rfl
/-- The family of arrows of the discrete groupoid -/
inductive discrete.arrows : Π (c d : C), (c ⟶ d) → Prop
| id (c : C) : discrete.arrows c c (𝟙 c)
/-- The only arrows of the discrete groupoid are the identity arrows. -/
def discrete : subgroupoid C :=
{ arrows := discrete.arrows,
inv := by { rintros _ _ _ ⟨⟩, simp only [inv_eq_inv, is_iso.inv_id], split, },
mul := by { rintros _ _ _ _ ⟨⟩ _ ⟨⟩, rw category.comp_id, split, } }
lemma mem_discrete_iff {c d : C} (f : c ⟶ d) :
(f ∈ (discrete).arrows c d) ↔ (∃ (h : c = d), f = eq_to_hom h) :=
⟨by { rintro ⟨⟩, exact ⟨rfl, rfl⟩ }, by { rintro ⟨rfl, rfl⟩, split }⟩
/-- A subgroupoid is wide if its carrier set is all of `C`-/
structure is_wide : Prop :=
(wide : ∀ c, (𝟙 c) ∈ (S.arrows c c))
lemma is_wide_iff_objs_eq_univ : S.is_wide ↔ S.objs = set.univ :=
begin
split,
{ rintro h,
ext, split; simp only [top_eq_univ, mem_univ, implies_true_iff, forall_true_left],
apply mem_objs_of_src S (h.wide x), },
{ rintro h,
refine ⟨λ c, _⟩,
obtain ⟨γ,γS⟩ := (le_of_eq h.symm : ⊤ ⊆ S.objs) (set.mem_univ c),
exact id_mem_of_src S γS, },
end
lemma is_wide.id_mem {S : subgroupoid C} (Sw : S.is_wide) (c : C) :
(𝟙 c) ∈ S.arrows c c := Sw.wide c
lemma is_wide.eq_to_hom_mem {S : subgroupoid C} (Sw : S.is_wide) {c d : C} (h : c = d) :
(eq_to_hom h) ∈ S.arrows c d := by
{ cases h, simp only [eq_to_hom_refl], apply Sw.id_mem c, }
/-- A subgroupoid is normal if it is wide and satisfies the expected stability under conjugacy. -/
structure is_normal extends (is_wide S) : Prop :=
(conj : ∀ {c d} (p : c ⟶ d) {γ : c ⟶ c} (hs : γ ∈ S.arrows c c),
((inv p) ≫ γ ≫ p) ∈ (S.arrows d d))
lemma is_normal.conj' {S : subgroupoid C} (Sn : is_normal S) :
∀ {c d} (p : d ⟶ c) {γ : c ⟶ c} (hs : γ ∈ S.arrows c c), (p ≫ γ ≫ (inv p)) ∈ (S.arrows d d) :=
λ c d p γ hs, by { convert Sn.conj (inv p) hs, simp, }
lemma is_normal.conjugation_bij (Sn : is_normal S) {c d} (p : c ⟶ d) :
set.bij_on (λ γ : c ⟶ c, (inv p) ≫ γ ≫ p) (S.arrows c c) (S.arrows d d) :=
begin
refine ⟨λ γ γS, Sn.conj p γS, λ γ₁ γ₁S γ₂ γ₂S h, _, λ δ δS, ⟨p ≫ δ ≫ (inv p), Sn.conj' p δS, _⟩⟩,
{ simpa only [inv_eq_inv, category.assoc, is_iso.hom_inv_id,
category.comp_id, is_iso.hom_inv_id_assoc] using p ≫= h =≫ inv p },
{ simp only [inv_eq_inv, category.assoc, is_iso.inv_hom_id,
category.comp_id, is_iso.inv_hom_id_assoc] },
end
lemma top_is_normal : is_normal (⊤ : subgroupoid C) :=
{ wide := (λ c, trivial),
conj := (λ a b c d e, trivial) }
lemma Inf_is_normal (s : set $ subgroupoid C) (sn : ∀ S ∈ s, is_normal S) : is_normal (Inf s) :=
{ wide := by { simp_rw [Inf, mem_Inter₂], exact λ c S Ss, (sn S Ss).wide c },
conj := by { simp_rw [Inf, mem_Inter₂], exact λ c d p γ hγ S Ss, (sn S Ss).conj p (hγ S Ss) } }
lemma discrete_is_normal : (@discrete C _).is_normal :=
{ wide := λ c, by { constructor, },
conj := λ c d f γ hγ, by
{ cases hγ, simp only [inv_eq_inv, category.id_comp, is_iso.inv_hom_id], constructor, } }
lemma is_normal.vertex_subgroup (Sn : is_normal S) (c : C) (cS : c ∈ S.objs) :
(S.vertex_subgroup cS).normal :=
{ conj_mem := λ x hx y, by { rw mul_assoc, exact Sn.conj' y hx } }
section generated_subgroupoid
-- TODO: proof that generated is just "words in X" and generated_normal is similarly
variable (X : ∀ c d : C, set (c ⟶ d))
/-- The subgropoid generated by the set of arrows `X` -/
def generated : subgroupoid C :=
Inf {S : subgroupoid C | ∀ c d, X c d ⊆ S.arrows c d}
lemma subset_generated (c d : C) : X c d ⊆ (generated X).arrows c d :=
begin
dsimp only [generated, Inf],
simp only [subset_Inter₂_iff],
exact λ S hS f fS, hS _ _ fS,
end
/-- The normal sugroupoid generated by the set of arrows `X` -/
def generated_normal : subgroupoid C :=
Inf {S : subgroupoid C | (∀ c d, X c d ⊆ S.arrows c d) ∧ S.is_normal}
lemma generated_le_generated_normal : generated X ≤ generated_normal X :=
begin
apply @Inf_le_Inf (subgroupoid C) _,
exact λ S ⟨h,_⟩, h,
end
lemma generated_normal_is_normal : (generated_normal X).is_normal :=
Inf_is_normal _ (λ S h, h.right)
lemma is_normal.generated_normal_le {S : subgroupoid C} (Sn : S.is_normal) :
generated_normal X ≤ S ↔ ∀ c d, X c d ⊆ S.arrows c d :=
begin
split,
{ rintro h c d,
let h' := generated_le_generated_normal X,
rw le_iff at h h',
exact ((subset_generated X c d).trans (@h' c d)).trans (@h c d), },
{ rintro h,
apply @Inf_le (subgroupoid C) _,
exact ⟨h,Sn⟩, },
end
end generated_subgroupoid
section hom
variables {D : Type*} [groupoid D] (φ : C ⥤ D)
/--
A functor between groupoid defines a map of subgroupoids in the reverse direction
by taking preimages.
-/
def comap (S : subgroupoid D) : subgroupoid C :=
{ arrows := λ c d, {f : c ⟶ d | φ.map f ∈ S.arrows (φ.obj c) (φ.obj d)},
inv := λ c d p hp, by { rw [mem_set_of, inv_eq_inv, φ.map_inv p, ← inv_eq_inv], exact S.inv hp },
mul := begin
rintros,
simp only [mem_set_of, functor.map_comp],
apply S.mul; assumption,
end }
lemma comap_mono (S T : subgroupoid D) :
S ≤ T → comap φ S ≤ comap φ T := λ ST ⟨c,d,p⟩, @ST ⟨_,_,_⟩
lemma is_normal_comap {S : subgroupoid D} (Sn : is_normal S) : is_normal (comap φ S) :=
{ wide := λ c, by { rw [comap, mem_set_of, functor.map_id], apply Sn.wide, },
conj := λ c d f γ hγ, by
{ simp_rw [inv_eq_inv f, comap, mem_set_of, functor.map_comp, functor.map_inv, ←inv_eq_inv],
exact Sn.conj _ hγ, } }
@[simp] lemma comap_comp {E : Type*} [groupoid E] (ψ : D ⥤ E) :
comap (φ ⋙ ψ) = (comap φ) ∘ (comap ψ) := rfl
/-- The kernel of a functor between subgroupoid is the preimage. -/
def ker : subgroupoid C := comap φ discrete
lemma mem_ker_iff {c d : C} (f : c ⟶ d) :
f ∈ (ker φ).arrows c d ↔ ∃ (h : φ.obj c = φ.obj d), φ.map f = eq_to_hom h :=
mem_discrete_iff (φ.map f)
lemma ker_is_normal : (ker φ).is_normal := is_normal_comap φ (discrete_is_normal)
@[simp]
lemma ker_comp {E : Type*} [groupoid E] (ψ : D ⥤ E) : ker (φ ⋙ ψ) = comap φ (ker ψ) := rfl
/-- The family of arrows of the image of a subgroupoid under a functor injective on objects -/
inductive map.arrows (hφ : function.injective φ.obj) (S : subgroupoid C) :
Π (c d : D), (c ⟶ d) → Prop
| im {c d : C} (f : c ⟶ d) (hf : f ∈ S.arrows c d) : map.arrows (φ.obj c) (φ.obj d) (φ.map f)
lemma map.arrows_iff (hφ : function.injective φ.obj) (S : subgroupoid C) {c d : D} (f : c ⟶ d) :
map.arrows φ hφ S c d f ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d) (hg : g ∈ S.arrows a b),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) :=
begin
split,
{ rintro ⟨g,hg⟩, exact ⟨_,_,g,rfl,rfl,hg, eq_conj_eq_to_hom _⟩ },
{ rintro ⟨a,b,g,rfl,rfl,hg,rfl⟩, rw ← eq_conj_eq_to_hom, split, exact hg },
end
/-- The "forward" image of a subgroupoid under a functor injective on objects -/
def map (hφ : function.injective φ.obj) (S : subgroupoid C) : subgroupoid D :=
{ arrows := map.arrows φ hφ S,
inv := begin
rintro _ _ _ ⟨⟩,
rw [inv_eq_inv, ←functor.map_inv, ←inv_eq_inv],
split, apply S.inv, assumption,
end,
mul := begin
rintro _ _ _ _ ⟨f,hf⟩ q hq,
obtain ⟨c₃,c₄,g,he,rfl,hg,gq⟩ := (map.arrows_iff φ hφ S q).mp hq,
cases hφ he, rw [gq, ← eq_conj_eq_to_hom, ← φ.map_comp],
split, exact S.mul hf hg,
end }
lemma mem_map_iff (hφ : function.injective φ.obj) (S : subgroupoid C) {c d : D} (f : c ⟶ d) :
f ∈ (map φ hφ S).arrows c d ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d) (hg : g ∈ S.arrows a b),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) := map.arrows_iff φ hφ S f
lemma galois_connection_map_comap (hφ : function.injective φ.obj) :
galois_connection (map φ hφ) (comap φ) :=
begin
rintro S T, simp_rw [le_iff], split,
{ exact λ h c d f fS, h (map.arrows.im f fS), },
{ rintros h _ _ g ⟨a,gφS⟩,
exact h gφS, },
end
lemma map_mono (hφ : function.injective φ.obj) (S T : subgroupoid C) :
S ≤ T → map φ hφ S ≤ map φ hφ T :=
λ h, (galois_connection_map_comap φ hφ).monotone_l h
lemma le_comap_map (hφ : function.injective φ.obj) (S : subgroupoid C) :
S ≤ comap φ (map φ hφ S) := (galois_connection_map_comap φ hφ).le_u_l S
lemma map_comap_le (hφ : function.injective φ.obj) (T : subgroupoid D) :
map φ hφ (comap φ T) ≤ T := (galois_connection_map_comap φ hφ).l_u_le T
lemma map_le_iff_le_comap (hφ : function.injective φ.obj)
(S : subgroupoid C) (T : subgroupoid D) :
map φ hφ S ≤ T ↔ S ≤ comap φ T := (galois_connection_map_comap φ hφ).le_iff_le
lemma mem_map_objs_iff (hφ : function.injective φ.obj) (d : D) :
d ∈ (map φ hφ S).objs ↔ ∃ c ∈ S.objs, φ.obj c = d :=
begin
dsimp [objs, map],
split,
{ rintro ⟨f,hf⟩,
change map.arrows φ hφ S d d f at hf, rw map.arrows_iff at hf,
obtain ⟨c,d,g,ec,ed,eg,gS,eg⟩ := hf,
exact ⟨c, ⟨mem_objs_of_src S eg, ec⟩⟩, },
{ rintros ⟨c,⟨γ,γS⟩,rfl⟩,
exact ⟨φ.map γ,⟨γ,γS⟩⟩, }
end
@[simp]
lemma map_objs_eq (hφ : function.injective φ.obj) : (map φ hφ S).objs = φ.obj '' S.objs :=
by { ext, convert mem_map_objs_iff S φ hφ x, simp only [mem_image, exists_prop], }
/-- The image of a functor injective on objects -/
def im (hφ : function.injective φ.obj) := map φ hφ (⊤)
lemma mem_im_iff (hφ : function.injective φ.obj) {c d : D} (f : c ⟶ d) :
f ∈ (im φ hφ).arrows c d ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) :=
by { convert map.arrows_iff φ hφ ⊤ f, simp only [has_top.top, mem_univ, exists_true_left] }
lemma mem_im_objs_iff (hφ : function.injective φ.obj) (d : D) :
d ∈ (im φ hφ).objs ↔ ∃ c : C, φ.obj c = d := by
{ simp only [im, mem_map_objs_iff, mem_top_objs, exists_true_left], }
lemma obj_surjective_of_im_eq_top (hφ : function.injective φ.obj) (hφ' : im φ hφ = ⊤) :
function.surjective φ.obj :=
begin
rintro d,
rw [←mem_im_objs_iff, hφ'],
apply mem_top_objs,
end
lemma is_normal_map (hφ : function.injective φ.obj) (hφ' : im φ hφ = ⊤) (Sn : S.is_normal) :
(map φ hφ S).is_normal :=
{ wide := λ d, by
{ obtain ⟨c,rfl⟩ := obj_surjective_of_im_eq_top φ hφ hφ' d,
change map.arrows φ hφ S _ _ (𝟙 _), rw ←functor.map_id,
constructor, exact Sn.wide c, },
conj := λ d d' g δ hδ, by
{ rw mem_map_iff at hδ,
obtain ⟨c,c',γ,cd,cd',γS,hγ⟩ := hδ, subst_vars, cases hφ cd',
have : d' ∈ (im φ hφ).objs, by { rw hφ', apply mem_top_objs, },
rw mem_im_objs_iff at this,
obtain ⟨c',rfl⟩ := this,
have : g ∈ (im φ hφ).arrows (φ.obj c) (φ.obj c'), by
{ rw hφ', trivial, },
rw mem_im_iff at this,
obtain ⟨b,b',f,hb,hb',_,hf⟩ := this, subst_vars, cases hφ hb, cases hφ hb',
change map.arrows φ hφ S (φ.obj c') (φ.obj c') _,
simp only [eq_to_hom_refl, category.comp_id, category.id_comp, inv_eq_inv],
suffices : map.arrows φ hφ S (φ.obj c') (φ.obj c') (φ.map $ inv f ≫ γ ≫ f),
{ simp only [inv_eq_inv, functor.map_comp, functor.map_inv] at this, exact this, },
{ constructor, apply Sn.conj f γS, } } }
end hom
section thin
/-- A subgroupoid `is_thin` if it has at most one arrow between any two vertices. -/
abbreviation is_thin := quiver.is_thin S.objs
lemma is_thin_iff : S.is_thin ↔ ∀ (c : S.objs), subsingleton (S.arrows c c) :=
by apply is_thin_iff
end thin
section disconnected
/-- A subgroupoid `is_totally_disconnected` if it has only isotropy arrows. -/
abbreviation is_totally_disconnected := is_totally_disconnected S.objs
lemma is_totally_disconnected_iff :
S.is_totally_disconnected ↔ ∀ c d, (S.arrows c d).nonempty → c = d :=
begin
split,
{ rintro h c d ⟨f,fS⟩,
rw ←@subtype.mk_eq_mk _ _ c (mem_objs_of_src S fS) d (mem_objs_of_tgt S fS),
exact h ⟨c, mem_objs_of_src S fS⟩ ⟨d, mem_objs_of_tgt S fS⟩ ⟨f, fS⟩, },
{ rintros h ⟨c, hc⟩ ⟨d, hd⟩ ⟨f, fS⟩,
simp only [subtype.mk_eq_mk],
exact h c d ⟨f, fS⟩, },
end
/-- The isotropy subgroupoid of `S` -/
def disconnect : subgroupoid C :=
{ arrows := λ c d f, c = d ∧ f ∈ S.arrows c d,
inv := by { rintros _ _ _ ⟨rfl, h⟩, exact ⟨rfl, S.inv h⟩, },
mul := by { rintros _ _ _ _ ⟨rfl, h⟩ _ ⟨rfl, h'⟩, exact ⟨rfl, S.mul h h'⟩, } }
lemma disconnect_le : S.disconnect ≤ S :=
by { rw le_iff, rintros _ _ _ ⟨⟩, assumption, }
lemma disconnect_normal (Sn : S.is_normal) : S.disconnect.is_normal :=
{ wide := λ c, ⟨rfl, Sn.wide c⟩,
conj := λ c d p γ ⟨_,h'⟩, ⟨rfl, Sn.conj _ h'⟩ }
@[simp] lemma mem_disconnect_objs_iff {c : C} : c ∈ S.disconnect.objs ↔ c ∈ S.objs :=
⟨λ ⟨γ, h, γS⟩, ⟨γ, γS⟩, λ ⟨γ, γS⟩, ⟨γ, rfl, γS⟩⟩
lemma disconnect_objs : S.disconnect.objs = S.objs :=
by { apply set.ext, apply mem_disconnect_objs_iff, }
lemma disconnect_is_totally_disconnected : S.disconnect.is_totally_disconnected :=
by { rw is_totally_disconnected_iff, exact λ c d ⟨f, h, fS⟩, h }
end disconnected
section full
variable (D : set C)
/-- The full subgroupoid on a set `D : set C` -/
def full : subgroupoid C :=
{ arrows := λ c d _, c ∈ D ∧ d ∈ D,
inv := by { rintros _ _ _ ⟨⟩, constructor; assumption, },
mul := by { rintros _ _ _ _ ⟨⟩ _ ⟨⟩, constructor; assumption,} }
lemma full_objs : (full D).objs = D :=
set.ext $ λ _, ⟨λ ⟨f, h, _⟩, h , λ h, ⟨𝟙 _, h, h⟩⟩
@[simp] lemma mem_full_iff {c d : C} {f : c ⟶ d} : f ∈ (full D).arrows c d ↔ c ∈ D ∧ d ∈ D :=
iff.rfl
@[simp] lemma mem_full_objs_iff {c : C} : c ∈ (full D).objs ↔ c ∈ D :=
by rw full_objs
@[simp] lemma full_empty : full ∅ = (⊥ : subgroupoid C) :=
by { ext, simp only [has_bot.bot, mem_full_iff, mem_empty_iff_false, and_self], }
@[simp] lemma full_univ : full set.univ = (⊤ : subgroupoid C) :=
by { ext, simp only [mem_full_iff, mem_univ, and_self, true_iff], }
lemma full_mono {D E : set C} (h : D ≤ E) : full D ≤ full E :=
begin
rw le_iff,
rintro c d f,
simp only [mem_full_iff],
exact λ ⟨hc, hd⟩, ⟨h hc, h hd⟩,
end
lemma full_arrow_eq_iff {c d : (full D).objs} {f g : c ⟶ d} :
f = g ↔ (↑f : c.val ⟶ d.val) = ↑g :=
by apply subtype.ext_iff
end full
end subgroupoid
end category_theory
|
483cd2c375065215f2c0ee553ec30099811ad6e8
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/src/group_theory/perm/concrete_cycle.lean
|
f52e50a7239b5a107a2d91d0c5465ae1abade2ee
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 19,621
|
lean
|
/-
Copyright (c) 2021 Yakov Pechersky. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yakov Pechersky
-/
import group_theory.perm.list
import data.list.cycle
import group_theory.perm.cycle_type
/-!
# Properties of cyclic permutations constructed from lists/cycles
In the following, `{α : Type*} [fintype α] [decidable_eq α]`.
## Main definitions
* `cycle.form_perm`: the cyclic permutation created by looping over a `cycle α`
* `equiv.perm.to_list`: the list formed by iterating application of a permutation
* `equiv.perm.to_cycle`: the cycle formed by iterating application of a permutation
* `equiv.perm.iso_cycle`: the equivalence between cyclic permutations `f : perm α`
and the terms of `cycle α` that correspond to them
* `equiv.perm.iso_cycle'`: the same equivalence as `equiv.perm.iso_cycle`
but with evaluation via choosing over fintypes
* The notation `c[1, 2, 3]` to emulate notation of cyclic permutations `(1 2 3)`
* A `has_repr` instance for any `perm α`, by representing the `finset` of
`cycle α` that correspond to the cycle factors.
## Main results
* `list.is_cycle_form_perm`: a nontrivial list without duplicates, when interpreted as
a permutation, is cyclic
* `equiv.perm.is_cycle.exists_unique_cycle`: there is only one nontrivial `cycle α`
corresponding to each cyclic `f : perm α`
## Implementation details
The forward direction of `equiv.perm.iso_cycle'` uses `fintype.choose` of the uniqueness
result, relying on the `fintype` instance of a `cycle.nodup` subtype.
It is unclear if this works faster than the `equiv.perm.to_cycle`, which relies
on recursion over `finset.univ`.
Running `#eval` on even a simple noncyclic permutation `c[(1 : fin 7), 2, 3] * c[0, 5]`
to show it takes a long time. TODO: is this because computing the cycle factors is slow?
-/
open equiv equiv.perm list
namespace list
variables {α : Type*} [decidable_eq α] {l l' : list α}
lemma form_perm_disjoint_iff (hl : nodup l) (hl' : nodup l')
(hn : 2 ≤ l.length) (hn' : 2 ≤ l'.length) :
perm.disjoint (form_perm l) (form_perm l') ↔ l.disjoint l' :=
begin
rw [disjoint_iff_eq_or_eq, list.disjoint],
split,
{ rintro h x hx hx',
specialize h x,
rw [form_perm_apply_mem_eq_self_iff _ hl _ hx,
form_perm_apply_mem_eq_self_iff _ hl' _ hx'] at h,
rcases h with hl | hl'; linarith },
{ intros h x,
by_cases hx : x ∈ l; by_cases hx' : x ∈ l',
{ exact (h hx hx').elim },
all_goals { have := form_perm_eq_self_of_not_mem _ _ ‹_›, tauto } }
end
lemma is_cycle_form_perm (hl : nodup l) (hn : 2 ≤ l.length) :
is_cycle (form_perm l) :=
begin
cases l with x l,
{ norm_num at hn },
induction l with y l IH generalizing x,
{ norm_num at hn },
{ use x,
split,
{ rwa form_perm_apply_mem_ne_self_iff _ hl _ (mem_cons_self _ _) },
{ intros w hw,
have : w ∈ (x :: y :: l) := mem_of_form_perm_ne_self _ _ hw,
obtain ⟨k, hk, rfl⟩ := nth_le_of_mem this,
use k,
simp only [zpow_coe_nat, form_perm_pow_apply_head _ _ hl k, nat.mod_eq_of_lt hk] } }
end
lemma pairwise_same_cycle_form_perm (hl : nodup l) (hn : 2 ≤ l.length) :
pairwise (l.form_perm.same_cycle) l :=
pairwise.imp_mem.mpr (pairwise_of_forall (λ x y hx hy, (is_cycle_form_perm hl hn).same_cycle
((form_perm_apply_mem_ne_self_iff _ hl _ hx).mpr hn)
((form_perm_apply_mem_ne_self_iff _ hl _ hy).mpr hn)))
lemma cycle_of_form_perm (hl : nodup l) (hn : 2 ≤ l.length) (x) :
cycle_of l.attach.form_perm x = l.attach.form_perm :=
have hn : 2 ≤ l.attach.length := by rwa ← length_attach at hn,
have hl : l.attach.nodup := by rwa ← nodup_attach at hl,
(is_cycle_form_perm hl hn).cycle_of_eq
((form_perm_apply_mem_ne_self_iff _ hl _ (mem_attach _ _)).mpr hn)
lemma cycle_type_form_perm (hl : nodup l) (hn : 2 ≤ l.length) :
cycle_type l.attach.form_perm = {l.length} :=
begin
rw ←length_attach at hn,
rw ←nodup_attach at hl,
rw cycle_type_eq [l.attach.form_perm],
{ simp only [map, function.comp_app],
rw [support_form_perm_of_nodup _ hl, card_to_finset, erase_dup_eq_self.mpr hl],
{ simpa },
{ intros x h,
simpa [h, nat.succ_le_succ_iff] using hn } },
{ simp },
{ simpa using is_cycle_form_perm hl hn },
{ simp }
end
lemma form_perm_apply_mem_eq_next (hl : nodup l) (x : α) (hx : x ∈ l) :
form_perm l x = next l x hx :=
begin
obtain ⟨k, hk, rfl⟩ := nth_le_of_mem hx,
rw [next_nth_le _ hl, form_perm_apply_nth_le _ hl]
end
end list
namespace cycle
variables {α : Type*} [decidable_eq α] (s s' : cycle α)
/--
A cycle `s : cycle α` , given `nodup s` can be interpreted as a `equiv.perm α`
where each element in the list is permuted to the next one, defined as `form_perm`.
-/
def form_perm : Π (s : cycle α) (h : nodup s), equiv.perm α :=
λ s, quot.hrec_on s (λ l h, form_perm l)
(λ l₁ l₂ (h : l₁ ~r l₂),
begin
ext,
{ exact h.nodup_iff },
{ intros h₁ h₂ _,
exact heq_of_eq (form_perm_eq_of_is_rotated h₁ h) }
end)
@[simp] lemma form_perm_coe (l : list α) (hl : l.nodup) :
form_perm (l : cycle α) hl = l.form_perm := rfl
lemma form_perm_subsingleton (s : cycle α) (h : subsingleton s) :
form_perm s h.nodup = 1 :=
begin
induction s using quot.induction_on,
simp only [form_perm_coe, mk_eq_coe],
simp only [length_subsingleton_iff, length_coe, mk_eq_coe] at h,
cases s with hd tl,
{ simp },
{ simp only [length_eq_zero, add_le_iff_nonpos_left, list.length, nonpos_iff_eq_zero] at h,
simp [h] }
end
lemma is_cycle_form_perm (s : cycle α) (h : nodup s) (hn : nontrivial s) :
is_cycle (form_perm s h) :=
begin
induction s using quot.induction_on,
exact list.is_cycle_form_perm h (length_nontrivial hn)
end
lemma support_form_perm [fintype α] (s : cycle α) (h : nodup s) (hn : nontrivial s) :
support (form_perm s h) = s.to_finset :=
begin
induction s using quot.induction_on,
refine support_form_perm_of_nodup s h _,
rintro _ rfl,
simpa [nat.succ_le_succ_iff] using length_nontrivial hn
end
lemma form_perm_eq_self_of_not_mem (s : cycle α) (h : nodup s) (x : α) (hx : x ∉ s) :
form_perm s h x = x :=
begin
induction s using quot.induction_on,
simpa using list.form_perm_eq_self_of_not_mem _ _ hx
end
lemma form_perm_apply_mem_eq_next (s : cycle α) (h : nodup s) (x : α) (hx : x ∈ s) :
form_perm s h x = next s h x hx :=
begin
induction s using quot.induction_on,
simpa using list.form_perm_apply_mem_eq_next h _ _
end
lemma form_perm_reverse (s : cycle α) (h : nodup s) :
form_perm s.reverse (nodup_reverse_iff.mpr h) = (form_perm s h)⁻¹ :=
begin
induction s using quot.induction_on,
simpa using form_perm_reverse _ h
end
lemma form_perm_eq_form_perm_iff {α : Type*} [decidable_eq α]
{s s' : cycle α} {hs : s.nodup} {hs' : s'.nodup} :
s.form_perm hs = s'.form_perm hs' ↔ s = s' ∨ s.subsingleton ∧ s'.subsingleton :=
begin
rw [cycle.length_subsingleton_iff, cycle.length_subsingleton_iff],
revert s s',
intros s s',
apply quotient.induction_on₂' s s',
intros l l',
simpa using form_perm_eq_form_perm_iff
end
end cycle
variables {α : Type*}
namespace equiv.perm
variables [fintype α] [decidable_eq α] (p : equiv.perm α) (x : α)
/--
`equiv.perm.to_list (f : perm α) (x : α)` generates the list `[x, f x, f (f x), ...]`
until looping. That means when `f x = x`, `to_list f x = []`.
-/
def to_list : list α :=
(list.range (cycle_of p x).support.card).map (λ k, (p ^ k) x)
@[simp] lemma to_list_one : to_list (1 : perm α) x = [] :=
by simp [to_list, cycle_of_one]
@[simp] lemma to_list_eq_nil_iff {p : perm α} {x} : to_list p x = [] ↔ x ∉ p.support :=
by simp [to_list]
@[simp] lemma length_to_list : length (to_list p x) = (cycle_of p x).support.card :=
by simp [to_list]
lemma to_list_ne_singleton (y : α) : to_list p x ≠ [y] :=
begin
intro H,
simpa [card_support_ne_one] using congr_arg length H
end
lemma two_le_length_to_list_iff_mem_support {p : perm α} {x : α} :
2 ≤ length (to_list p x) ↔ x ∈ p.support :=
by simp
lemma length_to_list_pos_of_mem_support (h : x ∈ p.support) : 0 < length (to_list p x) :=
zero_lt_two.trans_le (two_le_length_to_list_iff_mem_support.mpr h)
lemma nth_le_to_list (n : ℕ) (hn : n < length (to_list p x)) :
nth_le (to_list p x) n hn = (p ^ n) x :=
by simp [to_list]
lemma to_list_nth_le_zero (h : x ∈ p.support) :
(to_list p x).nth_le 0 (length_to_list_pos_of_mem_support _ _ h) = x :=
by simp [to_list]
variables {p} {x}
lemma mem_to_list_iff {y : α} :
y ∈ to_list p x ↔ same_cycle p x y ∧ x ∈ p.support :=
begin
simp only [to_list, mem_range, mem_map],
split,
{ rintro ⟨n, hx, rfl⟩,
refine ⟨⟨n, rfl⟩, _⟩,
contrapose! hx,
rw ←support_cycle_of_eq_nil_iff at hx,
simp [hx] },
{ rintro ⟨h, hx⟩,
simpa using same_cycle.nat_of_mem_support _ h hx }
end
lemma nodup_to_list (p : perm α) (x : α) :
nodup (to_list p x) :=
begin
by_cases hx : p x = x,
{ rw [←not_mem_support, ←to_list_eq_nil_iff] at hx,
simp [hx] },
have hc : is_cycle (cycle_of p x) := is_cycle_cycle_of p hx,
rw nodup_iff_nth_le_inj,
rintros n m hn hm,
rw [length_to_list, ←order_of_is_cycle hc] at hm hn,
rw [←cycle_of_apply_self, ←ne.def, ←mem_support] at hx,
rw [nth_le_to_list, nth_le_to_list,
←cycle_of_pow_apply_self p x n, ←cycle_of_pow_apply_self p x m],
cases n; cases m,
{ simp },
{ rw [←hc.mem_support_pos_pow_iff_of_lt_order_of m.zero_lt_succ hm,
mem_support, cycle_of_pow_apply_self] at hx,
simp [hx.symm] },
{ rw [←hc.mem_support_pos_pow_iff_of_lt_order_of n.zero_lt_succ hn,
mem_support, cycle_of_pow_apply_self] at hx,
simp [hx] },
intro h,
have hn' : ¬ order_of (p.cycle_of x) ∣ n.succ := nat.not_dvd_of_pos_of_lt n.zero_lt_succ hn,
have hm' : ¬ order_of (p.cycle_of x) ∣ m.succ := nat.not_dvd_of_pos_of_lt m.zero_lt_succ hm,
rw ←hc.support_pow_eq_iff at hn' hm',
rw [←nat.mod_eq_of_lt hn, ←nat.mod_eq_of_lt hm, ←pow_inj_mod],
refine support_congr _ _,
{ rw [hm', hn'],
exact finset.subset.refl _ },
{ rw hm',
intros y hy,
obtain ⟨k, rfl⟩ := hc.exists_pow_eq (mem_support.mp hx) (mem_support.mp hy),
rw [←mul_apply, (commute.pow_pow_self _ _ _).eq, mul_apply, h, ←mul_apply, ←mul_apply,
(commute.pow_pow_self _ _ _).eq] }
end
lemma next_to_list_eq_apply (p : perm α) (x y : α) (hy : y ∈ to_list p x) :
next (to_list p x) y hy = p y :=
begin
rw mem_to_list_iff at hy,
obtain ⟨k, hk, hk'⟩ := hy.left.nat_of_mem_support _ hy.right,
rw ←nth_le_to_list p x k (by simpa using hk) at hk',
simp_rw ←hk',
rw [next_nth_le _ (nodup_to_list _ _), nth_le_to_list, nth_le_to_list, ←mul_apply, ←pow_succ,
length_to_list, pow_apply_eq_pow_mod_order_of_cycle_of_apply p (k + 1), order_of_is_cycle],
exact is_cycle_cycle_of _ (mem_support.mp hy.right)
end
lemma to_list_pow_apply_eq_rotate (p : perm α) (x : α) (k : ℕ) :
p.to_list ((p ^ k) x) = (p.to_list x).rotate k :=
begin
apply ext_le,
{ simp },
{ intros n hn hn',
rw [nth_le_to_list, nth_le_rotate, nth_le_to_list, length_to_list,
pow_mod_card_support_cycle_of_self_apply, pow_add, mul_apply] }
end
lemma same_cycle.to_list_is_rotated {f : perm α} {x y : α} (h : same_cycle f x y) :
to_list f x ~r to_list f y :=
begin
by_cases hx : x ∈ f.support,
{ obtain ⟨_ | k, hk, hy⟩ := h.nat_of_mem_support _ hx,
{ simp only [coe_one, id.def, pow_zero] at hy,
simp [hy] },
use k.succ,
rw [←to_list_pow_apply_eq_rotate, hy] },
{ rw [to_list_eq_nil_iff.mpr hx, is_rotated_nil_iff', eq_comm, to_list_eq_nil_iff],
rwa ←h.mem_support_iff }
end
lemma pow_apply_mem_to_list_iff_mem_support {n : ℕ} :
(p ^ n) x ∈ p.to_list x ↔ x ∈ p.support :=
begin
rw [mem_to_list_iff, and_iff_right_iff_imp],
refine λ _, same_cycle.symm _,
rw same_cycle_pow_left_iff
end
lemma to_list_form_perm_nil (x : α) :
to_list (form_perm ([] : list α)) x = [] :=
by simp
lemma to_list_form_perm_singleton (x y : α) :
to_list (form_perm [x]) y = [] :=
by simp
lemma to_list_form_perm_nontrivial (l : list α) (hl : 2 ≤ l.length) (hn : nodup l) :
to_list (form_perm l) (l.nth_le 0 (zero_lt_two.trans_le hl)) = l :=
begin
have hc : l.form_perm.is_cycle := list.is_cycle_form_perm hn hl,
have hs : l.form_perm.support = l.to_finset,
{ refine support_form_perm_of_nodup _ hn _,
rintro _ rfl,
simpa [nat.succ_le_succ_iff] using hl },
rw [to_list, hc.cycle_of_eq (mem_support.mp _), hs, card_to_finset, erase_dup_eq_self.mpr hn],
{ refine list.ext_le (by simp) (λ k hk hk', _),
simp [form_perm_pow_apply_nth_le _ hn, nat.mod_eq_of_lt hk'] },
{ simpa [hs] using nth_le_mem _ _ _ }
end
lemma to_list_form_perm_is_rotated_self (l : list α) (hl : 2 ≤ l.length) (hn : nodup l)
(x : α) (hx : x ∈ l):
to_list (form_perm l) x ~r l :=
begin
obtain ⟨k, hk, rfl⟩ := nth_le_of_mem hx,
have hr : l ~r l.rotate k := ⟨k, rfl⟩,
rw form_perm_eq_of_is_rotated hn hr,
rw ←nth_le_rotate' l k k,
simp only [nat.mod_eq_of_lt hk, tsub_add_cancel_of_le hk.le, nat.mod_self],
rw [to_list_form_perm_nontrivial],
{ simp },
{ simpa using hl },
{ simpa using hn }
end
lemma form_perm_to_list (f : perm α) (x : α) :
form_perm (to_list f x) = f.cycle_of x :=
begin
by_cases hx : f x = x,
{ rw [(cycle_of_eq_one_iff f).mpr hx, to_list_eq_nil_iff.mpr (not_mem_support.mpr hx),
form_perm_nil] },
ext y,
by_cases hy : same_cycle f x y,
{ obtain ⟨k, hk, rfl⟩ := hy.nat_of_mem_support _ (mem_support.mpr hx),
rw [cycle_of_apply_apply_pow_self, list.form_perm_apply_mem_eq_next (nodup_to_list f x),
next_to_list_eq_apply, pow_succ, mul_apply],
rw mem_to_list_iff,
exact ⟨⟨k, rfl⟩, mem_support.mpr hx⟩ },
{ rw [cycle_of_apply_of_not_same_cycle hy, form_perm_apply_of_not_mem],
simp [mem_to_list_iff, hy] }
end
lemma is_cycle.exists_unique_cycle {f : perm α} (hf : is_cycle f) :
∃! (s : cycle α), ∃ (h : s.nodup), s.form_perm h = f :=
begin
obtain ⟨x, hx, hy⟩ := id hf,
refine ⟨f.to_list x, ⟨nodup_to_list f x, _⟩, _⟩,
{ simp [form_perm_to_list, hf.cycle_of_eq hx] },
{ rintro ⟨l⟩ ⟨hn, rfl⟩,
simp only [cycle.mk_eq_coe, cycle.coe_eq_coe, subtype.coe_mk, cycle.form_perm_coe],
refine (to_list_form_perm_is_rotated_self _ _ hn _ _).symm,
{ contrapose! hx,
suffices : form_perm l = 1,
{ simp [this] },
rw form_perm_eq_one_iff _ hn,
exact nat.le_of_lt_succ hx },
{ rw ←mem_to_finset,
refine support_form_perm_le l _,
simpa using hx } }
end
lemma is_cycle.exists_unique_cycle_subtype {f : perm α} (hf : is_cycle f) :
∃! (s : {s : cycle α // s.nodup}), (s : cycle α).form_perm s.prop = f :=
begin
obtain ⟨s, ⟨hs, rfl⟩, hs'⟩ := hf.exists_unique_cycle,
refine ⟨⟨s, hs⟩, rfl, _⟩,
rintro ⟨t, ht⟩ ht',
simpa using hs' _ ⟨ht, ht'⟩
end
lemma is_cycle.exists_unique_cycle_nontrivial_subtype {f : perm α} (hf : is_cycle f) :
∃! (s : {s : cycle α // s.nodup ∧ s.nontrivial}), (s : cycle α).form_perm s.prop.left = f :=
begin
obtain ⟨⟨s, hn⟩, hs, hs'⟩ := hf.exists_unique_cycle_subtype,
refine ⟨⟨s, hn, _⟩, _, _⟩,
{ rw hn.nontrivial_iff,
subst f,
intro H,
refine hf.ne_one _,
simpa using cycle.form_perm_subsingleton _ H },
{ simpa using hs },
{ rintro ⟨t, ht, ht'⟩ ht'',
simpa using hs' ⟨t, ht⟩ ht'' }
end
/--
Given a cyclic `f : perm α`, generate the `cycle α` in the order
of application of `f`. Implemented by finding an element `x : α`
in the support of `f` in `finset.univ`, and iterating on using
`equiv.perm.to_list f x`.
-/
def to_cycle (f : perm α) (hf : is_cycle f) : cycle α :=
multiset.rec_on (finset.univ : finset α).val
(quot.mk _ [])
(λ x s l, if f x = x then l else to_list f x)
(by { intros x y m s,
refine heq_of_eq _,
split_ifs with hx hy hy; try { refl },
{ have hc : same_cycle f x y := is_cycle.same_cycle hf hx hy,
exact quotient.sound' hc.to_list_is_rotated }})
lemma to_cycle_eq_to_list (f : perm α) (hf : is_cycle f) (x : α) (hx : f x ≠ x) :
to_cycle f hf = to_list f x :=
begin
have key : (finset.univ : finset α).val = x ::ₘ finset.univ.val.erase x,
{ simp },
rw [to_cycle, key],
simp [hx]
end
lemma nodup_to_cycle (f : perm α) (hf : is_cycle f) : (to_cycle f hf).nodup :=
begin
obtain ⟨x, hx, -⟩ := id hf,
simpa [to_cycle_eq_to_list f hf x hx] using nodup_to_list _ _
end
lemma nontrivial_to_cycle (f : perm α) (hf : is_cycle f) : (to_cycle f hf).nontrivial :=
begin
obtain ⟨x, hx, -⟩ := id hf,
simp [to_cycle_eq_to_list f hf x hx, hx, cycle.nontrivial_coe_nodup_iff (nodup_to_list _ _)]
end
/--
Any cyclic `f : perm α` is isomorphic to the nontrivial `cycle α`
that corresponds to repeated application of `f`.
The forward direction is implemented by `equiv.perm.to_cycle`.
-/
def iso_cycle : {f : perm α // is_cycle f} ≃ {s : cycle α // s.nodup ∧ s.nontrivial} :=
{ to_fun := λ f, ⟨to_cycle (f : perm α) f.prop, nodup_to_cycle f f.prop,
nontrivial_to_cycle _ f.prop⟩,
inv_fun := λ s, ⟨(s : cycle α).form_perm s.prop.left,
(s : cycle α).is_cycle_form_perm _ s.prop.right⟩,
left_inv := λ f, by {
obtain ⟨x, hx, -⟩ := id f.prop,
simpa [to_cycle_eq_to_list (f : perm α) f.prop x hx, form_perm_to_list, subtype.ext_iff]
using f.prop.cycle_of_eq hx },
right_inv := λ s, by {
rcases s with ⟨⟨s⟩, hn, ht⟩,
obtain ⟨x, -, -, hx, -⟩ := id ht,
have hl : 2 ≤ s.length := by simpa using cycle.length_nontrivial ht,
simp only [cycle.mk_eq_coe, cycle.nodup_coe_iff, cycle.mem_coe_iff, subtype.coe_mk,
cycle.form_perm_coe] at hn hx ⊢,
rw to_cycle_eq_to_list _ _ x,
{ refine quotient.sound' _,
exact to_list_form_perm_is_rotated_self _ hl hn _ hx },
{ rw [←mem_support, support_form_perm_of_nodup _ hn],
{ simpa using hx },
{ rintro _ rfl,
simpa [nat.succ_le_succ_iff] using hl } } } }
/--
Any cyclic `f : perm α` is isomorphic to the nontrivial `cycle α`
that corresponds to repeated application of `f`.
The forward direction is implemented by finding this `cycle α` using `fintype.choose`.
-/
def iso_cycle' : {f : perm α // is_cycle f} ≃ {s : cycle α // s.nodup ∧ s.nontrivial} :=
{ to_fun := λ f, fintype.choose _ f.prop.exists_unique_cycle_nontrivial_subtype,
inv_fun := λ s, ⟨(s : cycle α).form_perm s.prop.left,
(s : cycle α).is_cycle_form_perm _ s.prop.right⟩,
left_inv := λ f, by simpa [subtype.ext_iff]
using fintype.choose_spec _ f.prop.exists_unique_cycle_nontrivial_subtype,
right_inv := λ ⟨s, hs, ht⟩, by {
simp [subtype.coe_mk],
convert fintype.choose_subtype_eq (λ (s' : cycle α), s'.nodup ∧ s'.nontrivial) _,
ext ⟨s', hs', ht'⟩,
simp [cycle.form_perm_eq_form_perm_iff, (iff_not_comm.mp hs.nontrivial_iff),
(iff_not_comm.mp hs'.nontrivial_iff), ht] } }
notation `c[` l:(foldr `, ` (h t, list.cons h t) list.nil `]`) :=
cycle.form_perm ↑l (cycle.nodup_coe_iff.mpr dec_trivial)
instance repr_perm [has_repr α] : has_repr (perm α) :=
⟨λ f, repr (multiset.pmap (λ (g : perm α) (hg : g.is_cycle),
iso_cycle ⟨g, hg⟩) -- to_cycle is faster?
(perm.cycle_factors_finset f).val
(λ g hg, (mem_cycle_factors_finset_iff.mp (finset.mem_def.mpr hg)).left))⟩
end equiv.perm
|
50a6d7fd8031a3707ab032a4c501a074be4ab50c
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/category_theory/groupoid/subgroupoid.lean
|
04d52184a73232db6e0d955a2ee83d5073441def
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 24,677
|
lean
|
/-
Copyright (c) 2022 Rémi Bottinelli. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Rémi Bottinelli, Junyan Xu
-/
import category_theory.groupoid.vertex_group
import category_theory.groupoid.basic
import category_theory.groupoid
import algebra.group.defs
import data.set.lattice
import group_theory.subgroup.basic
import order.galois_connection
/-!
# Subgroupoid
This file defines subgroupoids as `structure`s containing the subsets of arrows and their
stability under composition and inversion.
Also defined are:
* containment of subgroupoids is a complete lattice;
* images and preimages of subgroupoids under a functor;
* the notion of normality of subgroupoids and its stability under intersection and preimage;
* compatibility of the above with `groupoid.vertex_group`.
## Main definitions
Given a type `C` with associated `groupoid C` instance.
* `subgroupoid C` is the type of subgroupoids of `C`
* `subgroupoid.is_normal` is the property that the subgroupoid is stable under conjugation
by arbitrary arrows, _and_ that all identity arrows are contained in the subgroupoid.
* `subgroupoid.comap` is the "preimage" map of subgroupoids along a functor.
* `subgroupoid.map` is the "image" map of subgroupoids along a functor _injective on objects_.
* `subgroupoid.vertex_subgroup` is the subgroup of the `vertex group` at a given vertex `v`,
assuming `v` is contained in the `subgroupoid` (meaning, by definition, that the arrow `𝟙 v`
is contained in the subgroupoid).
## Implementation details
The structure of this file is copied from/inspired by `group_theory.subgroup.basic`
and `combinatorics.simple_graph.subgraph`.
## TODO
* Equivalent inductive characterization of generated (normal) subgroupoids.
* Characterization of normal subgroupoids as kernels.
* Prove that `full` and `disconnect` preserve intersections (and `disconnect` also unions)
## Tags
subgroupoid
-/
namespace category_theory
open set groupoid
local attribute [protected] category_theory.inv
universes u v
variables {C : Type u} [groupoid C]
/--
A sugroupoid of `C` consists of a choice of arrows for each pair of vertices, closed
under composition and inverses.
-/
@[ext] structure subgroupoid (C : Type u) [groupoid C] :=
(arrows : ∀ (c d : C), set (c ⟶ d))
(inv : ∀ {c d} {p : c ⟶ d} (hp : p ∈ arrows c d),
inv p ∈ arrows d c)
(mul : ∀ {c d e} {p} (hp : p ∈ arrows c d) {q} (hq : q ∈ arrows d e),
p ≫ q ∈ arrows c e)
attribute [protected] subgroupoid.inv subgroupoid.mul
namespace subgroupoid
variable (S : subgroupoid C)
lemma inv_mem_iff {c d : C} (f : c ⟶ d) : inv f ∈ S.arrows d c ↔ f ∈ S.arrows c d :=
begin
split,
{ rintro h,
suffices : inv (inv f) ∈ S.arrows c d,
{ simpa only [inv_eq_inv, is_iso.inv_inv] using this, },
{ apply S.inv h, }, },
{ apply S.inv, },
end
lemma mul_mem_cancel_left {c d e : C} {f : c ⟶ d} {g : d ⟶ e} (hf : f ∈ S.arrows c d) :
f ≫ g ∈ S.arrows c e ↔ g ∈ S.arrows d e :=
begin
split,
{ rintro h,
suffices : (inv f) ≫ f ≫ g ∈ S.arrows d e,
{ simpa only [inv_eq_inv, is_iso.inv_hom_id_assoc] using this, },
{ apply S.mul (S.inv hf) h, }, },
{ apply S.mul hf, },
end
lemma mul_mem_cancel_right {c d e : C} {f : c ⟶ d} {g : d ⟶ e} (hg : g ∈ S.arrows d e) :
f ≫ g ∈ S.arrows c e ↔ f ∈ S.arrows c d :=
begin
split,
{ rintro h,
suffices : (f ≫ g) ≫ (inv g) ∈ S.arrows c d,
{ simpa only [inv_eq_inv, is_iso.hom_inv_id, category.comp_id, category.assoc] using this, },
{ apply S.mul h (S.inv hg), }, },
{ exact λ hf, S.mul hf hg, },
end
/-- The vertices of `C` on which `S` has non-trivial isotropy -/
def objs : set C := {c : C | (S.arrows c c).nonempty}
lemma mem_objs_of_src {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : c ∈ S.objs :=
⟨f ≫ inv f, S.mul h (S.inv h)⟩
lemma mem_objs_of_tgt {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : d ∈ S.objs :=
⟨(inv f) ≫ f, S.mul (S.inv h) h⟩
lemma id_mem_of_nonempty_isotropy (c : C) :
c ∈ objs S → 𝟙 c ∈ S.arrows c c :=
begin
rintro ⟨γ,hγ⟩,
convert S.mul hγ (S.inv hγ),
simp only [inv_eq_inv, is_iso.hom_inv_id],
end
lemma id_mem_of_src {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : (𝟙 c) ∈ S.arrows c c :=
id_mem_of_nonempty_isotropy S c (mem_objs_of_src S h)
lemma id_mem_of_tgt {c d : C} {f : c ⟶ d} (h : f ∈ S.arrows c d) : (𝟙 d) ∈ S.arrows d d :=
id_mem_of_nonempty_isotropy S d (mem_objs_of_tgt S h)
/-- A subgroupoid seen as a quiver on vertex set `C` -/
def as_wide_quiver : quiver C := ⟨λ c d, subtype $ S.arrows c d⟩
/-- The coercion of a subgroupoid as a groupoid -/
@[simps to_category_comp_coe, simps inv_coe (lemmas_only)]
instance coe : groupoid S.objs :=
{ hom := λ a b, S.arrows a.val b.val,
id := λ a, ⟨𝟙 a.val, id_mem_of_nonempty_isotropy S a.val a.prop⟩,
comp := λ a b c p q, ⟨p.val ≫ q.val, S.mul p.prop q.prop⟩,
id_comp' := λ a b ⟨p,hp⟩, by simp only [category.id_comp],
comp_id' := λ a b ⟨p,hp⟩, by simp only [category.comp_id],
assoc' := λ a b c d ⟨p,hp⟩ ⟨q,hq⟩ ⟨r,hr⟩, by simp only [category.assoc],
inv := λ a b p, ⟨inv p.val, S.inv p.prop⟩,
inv_comp' := λ a b ⟨p,hp⟩, by simp only [inv_comp],
comp_inv' := λ a b ⟨p,hp⟩, by simp only [comp_inv] }
@[simp] lemma coe_inv_coe' {c d : S.objs} (p : c ⟶ d) :
(category_theory.inv p).val = category_theory.inv p.val :=
by { simp only [subtype.val_eq_coe, ←inv_eq_inv, coe_inv_coe], }
/-- The embedding of the coerced subgroupoid to its parent-/
def hom : S.objs ⥤ C :=
{ obj := λ c, c.val,
map := λ c d f, f.val,
map_id' := λ c, rfl,
map_comp' := λ c d e f g, rfl }
lemma hom.inj_on_objects : function.injective (hom S).obj :=
by { rintros ⟨c,hc⟩ ⟨d,hd⟩ hcd, simp only [subtype.mk_eq_mk], exact hcd }
lemma hom.faithful :
∀ c d, function.injective (λ (f : c ⟶ d), (hom S).map f) :=
by { rintros ⟨c,hc⟩ ⟨d,hd⟩ ⟨f,hf⟩ ⟨g,hg⟩ hfg, simp only [subtype.mk_eq_mk], exact hfg, }
/-- The subgroup of the vertex group at `c` given by the subgroupoid -/
def vertex_subgroup {c : C} (hc : c ∈ S.objs) : subgroup (c ⟶ c) :=
{ carrier := S.arrows c c,
mul_mem' := λ f g hf hg, S.mul hf hg,
one_mem' := id_mem_of_nonempty_isotropy _ _ hc,
inv_mem' := λ f hf, S.inv hf }
instance : set_like (subgroupoid C) (Σ (c d : C), c ⟶ d) :=
{ coe := λ S, {F | F.2.2 ∈ S.arrows F.1 F.2.1},
coe_injective' := λ ⟨S, _, _⟩ ⟨T, _, _⟩ h, by { ext c d f, apply set.ext_iff.1 h ⟨c, d, f⟩ } }
lemma mem_iff (S : subgroupoid C) (F : Σ c d, c ⟶ d) :
F ∈ S ↔ F.2.2 ∈ S.arrows F.1 F.2.1 := iff.rfl
lemma le_iff (S T : subgroupoid C) : (S ≤ T) ↔ (∀ {c d}, (S.arrows c d) ⊆ (T.arrows c d)) :=
by { rw [set_like.le_def, sigma.forall], exact forall_congr (λ c, sigma.forall) }
instance : has_top (subgroupoid C) :=
⟨ { arrows := (λ _ _, set.univ),
mul := by { rintros, trivial, },
inv := by { rintros, trivial, } } ⟩
lemma mem_top {c d : C} (f : c ⟶ d) : f ∈ (⊤ : subgroupoid C).arrows c d := trivial
lemma mem_top_objs (c : C) : c ∈ (⊤ : subgroupoid C).objs :=
by { dsimp [has_top.top,objs], simp only [univ_nonempty], }
instance : has_bot (subgroupoid C) :=
⟨ { arrows := (λ _ _, ∅),
mul := λ _ _ _ _, false.elim,
inv := λ _ _ _, false.elim } ⟩
instance : inhabited (subgroupoid C) := ⟨⊤⟩
instance : has_inf (subgroupoid C) :=
⟨ λ S T,
{ arrows := (λ c d, (S.arrows c d) ∩ (T.arrows c d)),
inv := by { rintros, exact ⟨S.inv hp.1, T.inv hp.2⟩, },
mul := by { rintros, exact ⟨S.mul hp.1 hq.1, T.mul hp.2 hq.2⟩, } } ⟩
instance : has_Inf (subgroupoid C) :=
⟨ λ s,
{ arrows := λ c d, ⋂ S ∈ s, (subgroupoid.arrows S c d),
inv := by { intros, rw mem_Inter₂ at hp ⊢, exact λ S hS, S.inv (hp S hS) },
mul := by { intros, rw mem_Inter₂ at hp hq ⊢,exact λ S hS, S.mul (hp S hS) (hq S hS) } } ⟩
instance : complete_lattice (subgroupoid C) :=
{ bot := (⊥),
bot_le := λ S, empty_subset _,
top := (⊤),
le_top := λ S, subset_univ _,
inf := (⊓),
le_inf := λ R S T RS RT _ pR, ⟨RS pR, RT pR⟩,
inf_le_left := λ R S _, and.left,
inf_le_right := λ R S _, and.right,
.. complete_lattice_of_Inf (subgroupoid C)
begin
refine (λ s, ⟨λ S Ss F, _, λ T Tl F fT, _⟩);
simp only [Inf, mem_iff, mem_Inter],
exacts [λ hp, hp S Ss, λ S Ss, Tl Ss fT],
end }
lemma le_objs {S T : subgroupoid C} (h : S ≤ T) : S.objs ⊆ T.objs :=
λ s ⟨γ, hγ⟩, ⟨γ, @h ⟨s, s, γ⟩ hγ⟩
/-- The functor associated to the embedding of subgroupoids -/
def inclusion {S T : subgroupoid C} (h : S ≤ T) : S.objs ⥤ T.objs :=
{ obj := λ s, ⟨s.val, le_objs h s.prop⟩,
map := λ s t f, ⟨f.val, @h ⟨s, t, f.val⟩ f.prop⟩,
map_id' := λ _, rfl,
map_comp' := λ _ _ _ _ _, rfl }
lemma inclusion_inj_on_objects {S T : subgroupoid C} (h : S ≤ T) :
function.injective (inclusion h).obj :=
λ ⟨s,hs⟩ ⟨t,ht⟩, by simpa only [inclusion, subtype.mk_eq_mk] using id
lemma inclusion_faithful {S T : subgroupoid C} (h : S ≤ T) (s t : S.objs) :
function.injective (λ (f : s ⟶ t), (inclusion h).map f) :=
λ ⟨f,hf⟩ ⟨g,hg⟩, by { dsimp only [inclusion], simpa only [subtype.mk_eq_mk] using id }
lemma inclusion_refl {S : subgroupoid C} : inclusion (le_refl S) = 𝟭 S.objs :=
functor.hext (λ ⟨s,hs⟩, rfl) (λ ⟨s,hs⟩ ⟨t,ht⟩ ⟨f,hf⟩, heq_of_eq rfl)
lemma inclusion_trans {R S T : subgroupoid C} (k : R ≤ S) (h : S ≤ T) :
inclusion (k.trans h) = (inclusion k) ⋙ (inclusion h) := rfl
lemma inclusion_comp_embedding {S T : subgroupoid C} (h : S ≤ T) :
(inclusion h) ⋙ T.hom = S.hom := rfl
/-- The family of arrows of the discrete groupoid -/
inductive discrete.arrows : Π (c d : C), (c ⟶ d) → Prop
| id (c : C) : discrete.arrows c c (𝟙 c)
/-- The only arrows of the discrete groupoid are the identity arrows. -/
def discrete : subgroupoid C :=
{ arrows := discrete.arrows,
inv := by { rintros _ _ _ ⟨⟩, simp only [inv_eq_inv, is_iso.inv_id], split, },
mul := by { rintros _ _ _ _ ⟨⟩ _ ⟨⟩, rw category.comp_id, split, } }
lemma mem_discrete_iff {c d : C} (f : c ⟶ d) :
(f ∈ (discrete).arrows c d) ↔ (∃ (h : c = d), f = eq_to_hom h) :=
⟨by { rintro ⟨⟩, exact ⟨rfl, rfl⟩ }, by { rintro ⟨rfl, rfl⟩, split }⟩
/-- A subgroupoid is wide if its carrier set is all of `C`-/
structure is_wide : Prop :=
(wide : ∀ c, (𝟙 c) ∈ (S.arrows c c))
lemma is_wide_iff_objs_eq_univ : S.is_wide ↔ S.objs = set.univ :=
begin
split,
{ rintro h,
ext, split; simp only [top_eq_univ, mem_univ, implies_true_iff, forall_true_left],
apply mem_objs_of_src S (h.wide x), },
{ rintro h,
refine ⟨λ c, _⟩,
obtain ⟨γ,γS⟩ := (le_of_eq h.symm : ⊤ ⊆ S.objs) (set.mem_univ c),
exact id_mem_of_src S γS, },
end
lemma is_wide.id_mem {S : subgroupoid C} (Sw : S.is_wide) (c : C) :
(𝟙 c) ∈ S.arrows c c := Sw.wide c
lemma is_wide.eq_to_hom_mem {S : subgroupoid C} (Sw : S.is_wide) {c d : C} (h : c = d) :
(eq_to_hom h) ∈ S.arrows c d := by
{ cases h, simp only [eq_to_hom_refl], apply Sw.id_mem c, }
/-- A subgroupoid is normal if it is wide and satisfies the expected stability under conjugacy. -/
structure is_normal extends (is_wide S) : Prop :=
(conj : ∀ {c d} (p : c ⟶ d) {γ : c ⟶ c} (hs : γ ∈ S.arrows c c),
((inv p) ≫ γ ≫ p) ∈ (S.arrows d d))
lemma is_normal.conj' {S : subgroupoid C} (Sn : is_normal S) :
∀ {c d} (p : d ⟶ c) {γ : c ⟶ c} (hs : γ ∈ S.arrows c c), (p ≫ γ ≫ (inv p)) ∈ (S.arrows d d) :=
λ c d p γ hs, by { convert Sn.conj (inv p) hs, simp, }
lemma is_normal.conjugation_bij (Sn : is_normal S) {c d} (p : c ⟶ d) :
set.bij_on (λ γ : c ⟶ c, (inv p) ≫ γ ≫ p) (S.arrows c c) (S.arrows d d) :=
begin
refine ⟨λ γ γS, Sn.conj p γS, λ γ₁ γ₁S γ₂ γ₂S h, _, λ δ δS, ⟨p ≫ δ ≫ (inv p), Sn.conj' p δS, _⟩⟩,
{ simpa only [inv_eq_inv, category.assoc, is_iso.hom_inv_id,
category.comp_id, is_iso.hom_inv_id_assoc] using p ≫= h =≫ inv p },
{ simp only [inv_eq_inv, category.assoc, is_iso.inv_hom_id,
category.comp_id, is_iso.inv_hom_id_assoc] },
end
lemma top_is_normal : is_normal (⊤ : subgroupoid C) :=
{ wide := (λ c, trivial),
conj := (λ a b c d e, trivial) }
lemma Inf_is_normal (s : set $ subgroupoid C) (sn : ∀ S ∈ s, is_normal S) : is_normal (Inf s) :=
{ wide := by { simp_rw [Inf, mem_Inter₂], exact λ c S Ss, (sn S Ss).wide c },
conj := by { simp_rw [Inf, mem_Inter₂], exact λ c d p γ hγ S Ss, (sn S Ss).conj p (hγ S Ss) } }
lemma discrete_is_normal : (@discrete C _).is_normal :=
{ wide := λ c, by { constructor, },
conj := λ c d f γ hγ, by
{ cases hγ, simp only [inv_eq_inv, category.id_comp, is_iso.inv_hom_id], constructor, } }
lemma is_normal.vertex_subgroup (Sn : is_normal S) (c : C) (cS : c ∈ S.objs) :
(S.vertex_subgroup cS).normal :=
{ conj_mem := λ x hx y, by { rw mul_assoc, exact Sn.conj' y hx } }
section generated_subgroupoid
-- TODO: proof that generated is just "words in X" and generated_normal is similarly
variable (X : ∀ c d : C, set (c ⟶ d))
/-- The subgropoid generated by the set of arrows `X` -/
def generated : subgroupoid C :=
Inf {S : subgroupoid C | ∀ c d, X c d ⊆ S.arrows c d}
lemma subset_generated (c d : C) : X c d ⊆ (generated X).arrows c d :=
begin
dsimp only [generated, Inf],
simp only [subset_Inter₂_iff],
exact λ S hS f fS, hS _ _ fS,
end
/-- The normal sugroupoid generated by the set of arrows `X` -/
def generated_normal : subgroupoid C :=
Inf {S : subgroupoid C | (∀ c d, X c d ⊆ S.arrows c d) ∧ S.is_normal}
lemma generated_le_generated_normal : generated X ≤ generated_normal X :=
begin
apply @Inf_le_Inf (subgroupoid C) _,
exact λ S ⟨h,_⟩, h,
end
lemma generated_normal_is_normal : (generated_normal X).is_normal :=
Inf_is_normal _ (λ S h, h.right)
lemma is_normal.generated_normal_le {S : subgroupoid C} (Sn : S.is_normal) :
generated_normal X ≤ S ↔ ∀ c d, X c d ⊆ S.arrows c d :=
begin
split,
{ rintro h c d,
let h' := generated_le_generated_normal X,
rw le_iff at h h',
exact ((subset_generated X c d).trans (@h' c d)).trans (@h c d), },
{ rintro h,
apply @Inf_le (subgroupoid C) _,
exact ⟨h,Sn⟩, },
end
end generated_subgroupoid
section hom
variables {D : Type*} [groupoid D] (φ : C ⥤ D)
/--
A functor between groupoid defines a map of subgroupoids in the reverse direction
by taking preimages.
-/
def comap (S : subgroupoid D) : subgroupoid C :=
{ arrows := λ c d, {f : c ⟶ d | φ.map f ∈ S.arrows (φ.obj c) (φ.obj d)},
inv := λ c d p hp, by { rw [mem_set_of, inv_eq_inv, φ.map_inv p, ← inv_eq_inv], exact S.inv hp },
mul := begin
rintros,
simp only [mem_set_of, functor.map_comp],
apply S.mul; assumption,
end }
lemma comap_mono (S T : subgroupoid D) :
S ≤ T → comap φ S ≤ comap φ T := λ ST ⟨c,d,p⟩, @ST ⟨_,_,_⟩
lemma is_normal_comap {S : subgroupoid D} (Sn : is_normal S) : is_normal (comap φ S) :=
{ wide := λ c, by { rw [comap, mem_set_of, functor.map_id], apply Sn.wide, },
conj := λ c d f γ hγ, by
{ simp_rw [inv_eq_inv f, comap, mem_set_of, functor.map_comp, functor.map_inv, ←inv_eq_inv],
exact Sn.conj _ hγ, } }
@[simp] lemma comap_comp {E : Type*} [groupoid E] (ψ : D ⥤ E) :
comap (φ ⋙ ψ) = (comap φ) ∘ (comap ψ) := rfl
/-- The kernel of a functor between subgroupoid is the preimage. -/
def ker : subgroupoid C := comap φ discrete
lemma mem_ker_iff {c d : C} (f : c ⟶ d) :
f ∈ (ker φ).arrows c d ↔ ∃ (h : φ.obj c = φ.obj d), φ.map f = eq_to_hom h :=
mem_discrete_iff (φ.map f)
lemma ker_is_normal : (ker φ).is_normal := is_normal_comap φ (discrete_is_normal)
@[simp]
lemma ker_comp {E : Type*} [groupoid E] (ψ : D ⥤ E) : ker (φ ⋙ ψ) = comap φ (ker ψ) := rfl
/-- The family of arrows of the image of a subgroupoid under a functor injective on objects -/
inductive map.arrows (hφ : function.injective φ.obj) (S : subgroupoid C) :
Π (c d : D), (c ⟶ d) → Prop
| im {c d : C} (f : c ⟶ d) (hf : f ∈ S.arrows c d) : map.arrows (φ.obj c) (φ.obj d) (φ.map f)
lemma map.arrows_iff (hφ : function.injective φ.obj) (S : subgroupoid C) {c d : D} (f : c ⟶ d) :
map.arrows φ hφ S c d f ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d) (hg : g ∈ S.arrows a b),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) :=
begin
split,
{ rintro ⟨g,hg⟩, exact ⟨_,_,g,rfl,rfl,hg, eq_conj_eq_to_hom _⟩ },
{ rintro ⟨a,b,g,rfl,rfl,hg,rfl⟩, rw ← eq_conj_eq_to_hom, split, exact hg },
end
/-- The "forward" image of a subgroupoid under a functor injective on objects -/
def map (hφ : function.injective φ.obj) (S : subgroupoid C) : subgroupoid D :=
{ arrows := map.arrows φ hφ S,
inv := begin
rintro _ _ _ ⟨⟩,
rw [inv_eq_inv, ←functor.map_inv, ←inv_eq_inv],
split, apply S.inv, assumption,
end,
mul := begin
rintro _ _ _ _ ⟨f,hf⟩ q hq,
obtain ⟨c₃,c₄,g,he,rfl,hg,gq⟩ := (map.arrows_iff φ hφ S q).mp hq,
cases hφ he, rw [gq, ← eq_conj_eq_to_hom, ← φ.map_comp],
split, exact S.mul hf hg,
end }
lemma mem_map_iff (hφ : function.injective φ.obj) (S : subgroupoid C) {c d : D} (f : c ⟶ d) :
f ∈ (map φ hφ S).arrows c d ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d) (hg : g ∈ S.arrows a b),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) := map.arrows_iff φ hφ S f
lemma galois_connection_map_comap (hφ : function.injective φ.obj) :
galois_connection (map φ hφ) (comap φ) :=
begin
rintro S T, simp_rw [le_iff], split,
{ exact λ h c d f fS, h (map.arrows.im f fS), },
{ rintros h _ _ g ⟨a,gφS⟩,
exact h gφS, },
end
lemma map_mono (hφ : function.injective φ.obj) (S T : subgroupoid C) :
S ≤ T → map φ hφ S ≤ map φ hφ T :=
λ h, (galois_connection_map_comap φ hφ).monotone_l h
lemma le_comap_map (hφ : function.injective φ.obj) (S : subgroupoid C) :
S ≤ comap φ (map φ hφ S) := (galois_connection_map_comap φ hφ).le_u_l S
lemma map_comap_le (hφ : function.injective φ.obj) (T : subgroupoid D) :
map φ hφ (comap φ T) ≤ T := (galois_connection_map_comap φ hφ).l_u_le T
lemma map_le_iff_le_comap (hφ : function.injective φ.obj)
(S : subgroupoid C) (T : subgroupoid D) :
map φ hφ S ≤ T ↔ S ≤ comap φ T := (galois_connection_map_comap φ hφ).le_iff_le
lemma mem_map_objs_iff (hφ : function.injective φ.obj) (d : D) :
d ∈ (map φ hφ S).objs ↔ ∃ c ∈ S.objs, φ.obj c = d :=
begin
dsimp [objs, map],
split,
{ rintro ⟨f,hf⟩,
change map.arrows φ hφ S d d f at hf, rw map.arrows_iff at hf,
obtain ⟨c,d,g,ec,ed,eg,gS,eg⟩ := hf,
exact ⟨c, ⟨mem_objs_of_src S eg, ec⟩⟩, },
{ rintros ⟨c,⟨γ,γS⟩,rfl⟩,
exact ⟨φ.map γ,⟨γ,γS⟩⟩, }
end
@[simp]
lemma map_objs_eq (hφ : function.injective φ.obj) : (map φ hφ S).objs = φ.obj '' S.objs :=
by { ext, convert mem_map_objs_iff S φ hφ x, simp only [mem_image, exists_prop], }
/-- The image of a functor injective on objects -/
def im (hφ : function.injective φ.obj) := map φ hφ (⊤)
lemma mem_im_iff (hφ : function.injective φ.obj) {c d : D} (f : c ⟶ d) :
f ∈ (im φ hφ).arrows c d ↔
∃ (a b : C) (g : a ⟶ b) (ha : φ.obj a = c) (hb : φ.obj b = d),
f = (eq_to_hom ha.symm) ≫ φ.map g ≫ (eq_to_hom hb) :=
by { convert map.arrows_iff φ hφ ⊤ f, simp only [has_top.top, mem_univ, exists_true_left] }
lemma mem_im_objs_iff (hφ : function.injective φ.obj) (d : D) :
d ∈ (im φ hφ).objs ↔ ∃ c : C, φ.obj c = d := by
{ simp only [im, mem_map_objs_iff, mem_top_objs, exists_true_left], }
lemma obj_surjective_of_im_eq_top (hφ : function.injective φ.obj) (hφ' : im φ hφ = ⊤) :
function.surjective φ.obj :=
begin
rintro d,
rw [←mem_im_objs_iff, hφ'],
apply mem_top_objs,
end
lemma is_normal_map (hφ : function.injective φ.obj) (hφ' : im φ hφ = ⊤) (Sn : S.is_normal) :
(map φ hφ S).is_normal :=
{ wide := λ d, by
{ obtain ⟨c,rfl⟩ := obj_surjective_of_im_eq_top φ hφ hφ' d,
change map.arrows φ hφ S _ _ (𝟙 _), rw ←functor.map_id,
constructor, exact Sn.wide c, },
conj := λ d d' g δ hδ, by
{ rw mem_map_iff at hδ,
obtain ⟨c,c',γ,cd,cd',γS,hγ⟩ := hδ, subst_vars, cases hφ cd',
have : d' ∈ (im φ hφ).objs, by { rw hφ', apply mem_top_objs, },
rw mem_im_objs_iff at this,
obtain ⟨c',rfl⟩ := this,
have : g ∈ (im φ hφ).arrows (φ.obj c) (φ.obj c'), by
{ rw hφ', trivial, },
rw mem_im_iff at this,
obtain ⟨b,b',f,hb,hb',_,hf⟩ := this, subst_vars, cases hφ hb, cases hφ hb',
change map.arrows φ hφ S (φ.obj c') (φ.obj c') _,
simp only [eq_to_hom_refl, category.comp_id, category.id_comp, inv_eq_inv],
suffices : map.arrows φ hφ S (φ.obj c') (φ.obj c') (φ.map $ inv f ≫ γ ≫ f),
{ simp only [inv_eq_inv, functor.map_comp, functor.map_inv] at this, exact this, },
{ constructor, apply Sn.conj f γS, } } }
end hom
section thin
/-- A subgroupoid `is_thin` if it has at most one arrow between any two vertices. -/
abbreviation is_thin := quiver.is_thin S.objs
lemma is_thin_iff : S.is_thin ↔ ∀ (c : S.objs), subsingleton (S.arrows c c) :=
by apply is_thin_iff
end thin
section disconnected
/-- A subgroupoid `is_totally_disconnected` if it has only isotropy arrows. -/
abbreviation is_totally_disconnected := is_totally_disconnected S.objs
lemma is_totally_disconnected_iff :
S.is_totally_disconnected ↔ ∀ c d, (S.arrows c d).nonempty → c = d :=
begin
split,
{ rintro h c d ⟨f,fS⟩,
rw ←@subtype.mk_eq_mk _ _ c (mem_objs_of_src S fS) d (mem_objs_of_tgt S fS),
exact h ⟨c, mem_objs_of_src S fS⟩ ⟨d, mem_objs_of_tgt S fS⟩ ⟨f, fS⟩, },
{ rintros h ⟨c, hc⟩ ⟨d, hd⟩ ⟨f, fS⟩,
simp only [subtype.mk_eq_mk],
exact h c d ⟨f, fS⟩, },
end
/-- The isotropy subgroupoid of `S` -/
def disconnect : subgroupoid C :=
{ arrows := λ c d f, c = d ∧ f ∈ S.arrows c d,
inv := by { rintros _ _ _ ⟨rfl, h⟩, exact ⟨rfl, S.inv h⟩, },
mul := by { rintros _ _ _ _ ⟨rfl, h⟩ _ ⟨rfl, h'⟩, exact ⟨rfl, S.mul h h'⟩, } }
lemma disconnect_le : S.disconnect ≤ S :=
by { rw le_iff, rintros _ _ _ ⟨⟩, assumption, }
lemma disconnect_normal (Sn : S.is_normal) : S.disconnect.is_normal :=
{ wide := λ c, ⟨rfl, Sn.wide c⟩,
conj := λ c d p γ ⟨_,h'⟩, ⟨rfl, Sn.conj _ h'⟩ }
@[simp] lemma mem_disconnect_objs_iff {c : C} : c ∈ S.disconnect.objs ↔ c ∈ S.objs :=
⟨λ ⟨γ, h, γS⟩, ⟨γ, γS⟩, λ ⟨γ, γS⟩, ⟨γ, rfl, γS⟩⟩
lemma disconnect_objs : S.disconnect.objs = S.objs :=
by { apply set.ext, apply mem_disconnect_objs_iff, }
lemma disconnect_is_totally_disconnected : S.disconnect.is_totally_disconnected :=
by { rw is_totally_disconnected_iff, exact λ c d ⟨f, h, fS⟩, h }
end disconnected
section full
variable (D : set C)
/-- The full subgroupoid on a set `D : set C` -/
def full : subgroupoid C :=
{ arrows := λ c d _, c ∈ D ∧ d ∈ D,
inv := by { rintros _ _ _ ⟨⟩, constructor; assumption, },
mul := by { rintros _ _ _ _ ⟨⟩ _ ⟨⟩, constructor; assumption,} }
lemma full_objs : (full D).objs = D :=
set.ext $ λ _, ⟨λ ⟨f, h, _⟩, h , λ h, ⟨𝟙 _, h, h⟩⟩
@[simp] lemma mem_full_iff {c d : C} {f : c ⟶ d} : f ∈ (full D).arrows c d ↔ c ∈ D ∧ d ∈ D :=
iff.rfl
@[simp] lemma mem_full_objs_iff {c : C} : c ∈ (full D).objs ↔ c ∈ D :=
by rw full_objs
@[simp] lemma full_empty : full ∅ = (⊥ : subgroupoid C) :=
by { ext, simp only [has_bot.bot, mem_full_iff, mem_empty_iff_false, and_self], }
@[simp] lemma full_univ : full set.univ = (⊤ : subgroupoid C) :=
by { ext, simp only [mem_full_iff, mem_univ, and_self, true_iff], }
lemma full_mono {D E : set C} (h : D ≤ E) : full D ≤ full E :=
begin
rw le_iff,
rintro c d f,
simp only [mem_full_iff],
exact λ ⟨hc, hd⟩, ⟨h hc, h hd⟩,
end
lemma full_arrow_eq_iff {c d : (full D).objs} {f g : c ⟶ d} :
f = g ↔ (↑f : c.val ⟶ d.val) = ↑g :=
by apply subtype.ext_iff
end full
end subgroupoid
end category_theory
|
56dd774d4bb8030430f1e9c72e2854943ba4a154
|
193da933cf42f2f9188bb47e3c973205bc2abc5c
|
/Exam2-Review-key.lean
|
56117d78380c34aaa8c609cc8d2be08bf908c405
|
[] |
no_license
|
pandaman64/cs-dm
|
aa4e2621c7a19e2dae911bc237c33e02fcb0c7a3
|
bfd2f5fd2612472e15bd970c7870b5d0dd73bd1c
|
refs/heads/master
| 1,647,620,340,607
| 1,570,055,187,000
| 1,570,055,187,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 18,867
|
lean
|
/-
You need to understand the following
elements of automated predicate logic
for Exam 2
Suppose P and Q are arbitrary propositions
(i.e., P Q : Prop) and T and V are arbitrary
types (i.e., T V : Type).
Know the following forms, how to prove them,
how to use proofs of them, and how to do these
things in Lean.
To know how to prove them and how to use proofs
of them, you need an intuitive understanding of
the introduction and elimination rules for each
form, and how to use them in Lean.
* true : Prop
* false : Prop
* = -- equality
* P ∧ Q : Prop
* (∀ p : P, Q) : Prop -- Q can involve p
* T → V : Type -- function type)
* P → Q : Prop -- implication)
* ¬ P : Prop
* P ↔ Q : Prop
* P ∨ Q : Prop
* (∃ p : P, Q) : Prop -- Q can involve p
* T → Prop -- a property of Ts
* T → V → Prop -- a T-V binary relation
Knowing this material, you are also expected to be
able to combine these reasoning rules to prove more
interesting propositions. In general, such a proof
first applies elimination rules to obtain additional
useful elements from given assumptions, and then uses
introduction rules to combine the elements obtained
into proofs of desired conclusions.
You should have an intuitive understanding of the
meaning of each form and how to use each form in
logical reasoning. In particular, understand (1)
the introduction rules for each -- how to construct
proofs in these forms -- and (2) the elimination
rules for each form -- how to use proofs in these
forms to obtain additional facts to be used in
constructing proofs of other propositions.
For extra credit, be able to work in Lean with
(1) propositions and proofs involving combinations
of quantifiers, such as (∀ x : X, ∃ y : Y, Z) and
(∃ x : X, ∀ y : Y, Z), and with (2) negations of
quantified propositions, such as ¬ (∃ p : P, Q)
and ¬ (∀ p : P, Q).
The test will be structured to help you to know and
to show how far you've gotten and where you still
have some work to do.
Here are exercises for each form.
-/
/- ***************************************** -/
/- ***************** true ****************** -/
/- ***************************************** -/
/-
(1)
Use "example" in Lean to prove that there
is a proof of true. Be sure that after the :=
you can provide a proof using both an expression
and a tactic script.
-/
example : true := true.intro
/- ***************************************** -/
/- ***************** false ***************** -/
/- ***************************************** -/
/-
(2)
Use "def" in Lean to define a function, fq,
that proves that if P and Q are propositions
and if Q is true then false → ¬ Q.
-/
def fq (P Q : Prop) (q : Q) : false → ¬ Q :=
begin
assume f : false,
apply false.elim f, -- shorthand false.elim f
end
/-
(3)
Use "example" in Lean to prove that if 0 = 1
then 0 ≠ 1.
-/
example : 0 = 1 → 0 ≠ 1 :=
begin
assume zo,
have f : false := nat.no_confusion zo,
/-
nat.no_confusion applied to an assumed
proof that 0, specifically, is equal to
something that is not zero, returns a
proof of false. The false elimination
principle then finishes off the proof.
-/
exact false.elim f,
end
/-
(4) Use "example" to prove that for any two
natural numbers, a and b if a = b then if
b ≠ a then a ≠ b.
-/
example : ∀ a b : ℕ, a = b → b ≠ a → a ≠ b :=
begin
assume a b,
assume aeqb,
assume nbeqa,
/-
The trick in this case is to apply
the symmetry rule for equality to
the proof of a=b to get a proof of
b=a so that you have a contradiction
that you can exploit to get a proof
of false, at which point you can use
false elimination.
-/
have beqa := eq.symm aeqb,
contradiction,
-- shorthand for (false.elim (nbeqa beqa))
end
/- ***************************************** -/
/- ***************** and ******************* -/
/- ***************************************** -/
/-
(5)
Use "example" to prove that if P, Q, and R
are propositions, P → Q → R → (P ∧ Q) ∧ R
-/
example :
∀ P Q R : Prop, P → Q → R → (P ∧ Q) ∧ R :=
begin
intros P Q R p q r,
exact (and.intro (and.intro p q) r),
end
/-
(6)
Use "example" to prove that if P, Q, and R
are propositions, (P ∧ Q) ∧ R → P ∧ R.
-/
example :
∀ P Q R : Prop, (P ∧ Q) ∧ R → P ∧ R :=
begin
intros P Q R pqr,
exact and.intro pqr.left.left pqr.right,
-- the following shorthand also works
-- exact ⟨ pqr.left.left, pqr.right ⟩
end
/- ***************************************** -/
/- *************** functions *************** -/
/- ***************************************** -/
/-
(7) Use example to prove that if S and T
are arbitrary types and if there is a value,
t : T, then S → T. Remember that a proof of
S → T has to be a function that, if given any
value of type S, returns some value of type T.
Present this proof as a Python-style function
definition, isFun, then using a tactic script.
The Π you can read and treast as ∀, for now.
-/
def isFun (S T: Type) (t : T) : S → T :=
/-
The key idea captured by this example is
that if you already have a value of type T
"lying around" (which you do in this case),
you can always define a function from S to
T. All it has to do is ignore its argument
and return t.
Similarly, if S and T were propositions, if
you have a proof, t, that T is true, then
you can easily prove S → T, and you'd do it
in exactly the same way. Assume a proof of
S, ignore it, and just return the proof, t,
of T, that you already have.
So, here's the function.
-/
λ s, t
-- and as a tactic script
example : ∀ S T : Type, T → (S → T) :=
begin
assume S T,
assume t,
show S → T,
from λ s, t,
end
/-
(8) use def to define comp to be a function
that takes as its argments, the types S, T,
and R, along with a function of type S → T
and a function of type T → R and that returns
a function of type S → R. It should take S,
T, and R implicitly and st and tr explicitly.
-/
def
/-
The arguments, S, T, and R are designated as
implicit by enclosing them in curly braces.
The values of these arguments (the types) can
be inferred from the explicit arguments, st
and tr.
-/
comp {S T R : Type}
(st : S → T)
(tr: T → R)
: S → R :=
/-
The comp (short for "compose") function,
given functions st and tr, composes them
into a function that takes an argument of
type S and returns a value of type R, by
first applying st to s, and then applying
tr to the result.
-/
λ s, tr (st s)
/-
(9) Define square to be a function that
takes a natural number and returns its
square, and inc to be a function that
takes a nat, n, and returns n + 1. Now
use def and comp to define incSquare to
be a function that takes a nat, n, as an
argument and that returns one more than
n^2. Use #reduce to check that the value
of (incSquare 5) is 26.
-/
def square (n : ℕ) := n^2
def inc (n : ℕ) := n + 1
def incSquare := comp square inc
#reduce incSquare 5
/-
(10)
Consider the function, sum4, below. What
is the type of (sum4 3 7)? What function is
(sum4 3 7)? Answer the second question
with a lambda abstraction.
-/
/-
NOTE: Sorry, when we posted this problem,
we deleted the definition of sum4 by mistake.
Here it is along with the solution.
-/
def sum4 (a b c d : ℕ) := a + b + c + d
#check sum4
#check sum4 7
#reduce (sum4 7)
#reduce (sum4 7 3)
#reduce (sum4 7 3 11)
#reduce (sum4 7 3 11 2)
-- ℕ → ℕ → ℕ → ℕ → ℕ
-- ℕ → (ℕ → (ℕ → (ℕ → ℕ))
-- The type of sum4 3 7 is ℕ → ℕ → ℕ
/-
The function, sum 3 7, is the function,
λ a b, 10 + a + b, i.e., the function that
adds 10 to the sum of its two arguments.
-/
/- ***************************************** -/
/- ************** implication ************** -/
/- ***************************************** -/
/-
(11)
Use example to prove (1) false → false, (2)
false → true, (3) ¬ (true → false).
-/
example : false → false := λ f : false, f
example : false → true := λ f, true.intro
example : ¬ true → false :=
begin
assume tf : true → false,
have t : true := true.intro,
contradiction,
end
/-
pf : ∃ n : ℕ, P n
-/
/-
(12) NOTE CORRECTION HERE!!
Use example to prove that for any two
propositions, P, Q, if P ∧ Q is true
then if ¬ (P ∨ Q) is true, then you
can derive a proof of false.
-/
example :
∀ P : Prop, ∀ Q : Prop,
(P ∧ Q) → ¬ (P ∨ Q) → false :=
begin
intro P,
intro Q,
assume paq,
intro npaq,
have p := paq.left,
have porq := or.intro_left Q p,
--apply false.elim (npaq porq),
contradiction,
end
/-
The contradiction tactic looks for two
contradictory assumptions in the context,
and if it finds them, applies false.elim
automatically.
-/
example :
∀ P Q : Prop,
P ∧ Q → ¬ (P ∧ Q) → false :=
begin
intros, -- let Lean give names to assumptions
contradiction,
end
/- ***************************************** -/
/- *************** forall (∀) ************** -/
/- ***************************************** -/
/-
(13)
Use example to prove that for all proposition,
P, Q, and R, if P → Q → R then P → R.
-/
/-
You might have been tempted to prove this, but
it's the wrong proposition.
-/
example : ∀ P Q R : Prop, P → Q → R → (P → R) :=
begin
intros P Q R,
intros p q r,
exact λ p, r
end
/-
As written in English, the proposition to be
proved the following, and it can't be proved
because it isn't true in general.
-/
example : ∀ P Q R : Prop, (P → Q → R) → (P → R) :=
begin
intros P Q R pqr,
assume p,
have qr := pqr p,
-- need but don't have a proof of Q
-- so we can only abandon the proof
end
/-
(14)
Prove that for any type, T, for any a : T,
and for any property, (P : T → Prop), if
(∀ t : T, P t), then P a.
-/
example : ∀ T : Type, ∀ a : T,
∀ (P : T → Prop), (∀ (t : T), P t) → P a :=
begin
assume T a P f,
exact f a
end
/-
This example illustrates the principle of
∀ elimination. A ∀ is really just a function
in constructive logic, expressing the idea
that if you provide *any* value of some type,
then there is a value of another type, namely
a proof of the following proposition. If you
have such a function, and a value of type T,
then you can obtain a proof of P a by simply
applying the "function" to a.
-/
/- ***************************************** -/
/- *************** negation **************** -/
/- ***************************************** -/
/-
(15)
Show that for any propositions, P and Q,
¬ ((P ∧ Q) ∧ ((P ∧ Q) → (¬ Q ∧ P)).
-/
example : forall P Q : Prop,
¬ ((P ∧ Q) ∧ ((P ∧ Q) → (¬ Q ∧ P))) :=
begin
intros,
assume pf,
have pq := pf.left,
have imp := pf.right,
have nqp := imp pq,
have q := pq.right,
have np := nqp.left,
contradiction,
end
/-
(16)
Prove that for any propositions, P and R,
if P → R and ¬ P → R, then R. You might
need to use the law of the excluded middle.
-/
open classical
example: ∀ P R : Prop,
(P → R) → (¬ P → R) → R :=
begin
assume P R pr npr,
/-
At this point it looks like we don't have
much to work with, but remember that if you
are willing to use classical reasoning with
the law of the excluded middle then for any
proposition, you can always use case analysis
to consider what happens if that proposition
is true, then false respectively. Now take
another look at the context? What if P is
true? What is P is false?
-/
have pornp := em P,
cases pornp with p np,
exact (pr p),
exact (npr np),
end
/- ***************************************** -/
/- ************* bi-implication ************ -/
/- ***************************************** -/
/-
(17)
Prove that for propositions P and Q,
(P -> Q ∧ Q → P) → (Q ↔ P).
-/
example : ∀ P Q : Prop, ((P → Q) ∧ (Q → P)) → (Q ↔ P) :=
begin
assume P Q pf,
exact iff.intro pf.2 pf.1,
end
/-
(18)
Prove that for propositions P and Q,
((P ↔ Q) ∧ P) → Q.
-/
example : forall P Q : Prop,
((P ↔ Q) ∧ P) → Q :=
begin
assume P Q pf,
have bi := pf.left,
have p := pf.right,
have pq := bi.1,
exact pq p,
end
/- ***************************************** -/
/- ************** disjunction ************** -/
/- ***************************************** -/
/-
(19)
Prove that if you eat donuts or if you eat
candy you will get cavities. The proposition
you prove should build in the assumptions
that if you eat donuts you will get cavities
and if you eat candy you will get cavities.
We start the proof for you. You complete it.
Start your proof like this:
example : ∀ donut candy cavity : Prop, ...
-/
/-
The problem asks you to prove "you will get
cavities" from a disjunction "eat donuts ∨
eat candy." It also allows you to assume
that eating either one will give you cavities.
This is a clear set-up for or elimination.
You could use or.elim directly, or use case
analysis, which is an indirect way of doing
the same thing.
-/
example : ∀ donut candy cavity : Prop,
donut ∨ candy → (candy → cavity) → (donut → cavity) → cavity :=
begin
intros D C V either candy donut,
cases either with d c,
exact (donut d),
exact (candy c),
end
/-
(20)
Prove that if P and Q are any propositions, and
if you have a proof of ¬(P ∨ Q) then you can
construct a proof of ¬ P.
-/
example : ∀ P Q: Prop, ¬ (P ∨ Q) -> ¬ P :=
begin
assume P,
intro Q,
assume nporq,
assume p,
have porq := or.intro_left Q p,
contradiction,
end
/-
Study this example carefully. The key insight
is that you have a proof of P and you also
have a proof of ¬ (P ∨ Q), so if you had a
proof of (P ∨ Q), you could use false elim;
but you can get yourself a proof of (P ∨ Q)
from the assumed proof of P using or intro!
This example makes it clear that you really
do have to think about what you have to work
with (in the context) and what you can do
with it.
-/
example : ∀ P : Prop, P → P :=
begin
assume P,
assume p,
end
/-
(21)
Show, without using em explicitly, that if
for any proposition, P, P ∨ ¬ P, then for any
proposition, Q, ¬ ¬ Q → Q. The proposition to
prove is (∀ P : Prop, P ∨ ¬ P) → (∀ Q, ¬¬ Q → Q).
Remember that a proof of (∀ P, S) can be applied
to a value of type P to get a value of type S.
-/
example : (∀ P : Prop, P ∨ ¬ P) → (∀ Q, ¬¬ Q → Q) :=
begin
assume em,
assume Q,
assume nnq,
have qornq := em Q,
cases qornq with q nq,
exact q,
apply false.elim (nnq nq),
end
/-
What you're being asked to do here is to
prove that double negation elimination is
valid if you can assume that ∀ P : Prop,
P ∨ ¬ P.
-/
example :
(∀ P : Prop, P ∨ ¬ P) → (∀ Q, ¬¬ Q → Q) :=
begin
assume em,
assume Q,
have QorNQ := em Q,
cases QorNQ with q nq,
assume nnq, assumption,
assume nnq, contradiction,
end
/- ***************************************** -/
/- **************** predicates ************* -/
/- ***************************************** -/
/-
(22)
Define notZero(n) to be a predicate on
natural numbers that is true when 0 ≠ n
(and false otherwise). Then prove two facts
using "example." First, ¬ (notZero 0). When
doing this proof, remember what (notZero 0)
means, and remember what negation means.
Second, prove (notZero 1).
-/
/-
A predicate is conceptually a proposition
with one or more parameter values to be
filled in. We represent a predicate as a
function from parameter values to Prop.
In this case, for any natural number, n,
we reduce it to the proposition that that
zero is unequal to the particular n given
as an argument.
-/
def notZero (n: ℕ) := 0 ≠ n
example : ¬ notZero 0 :=
begin
-- remember that ¬ X means X → false.
assume nzz,
apply nzz (eq.refl 0),
end
example : notZero 1 :=
begin
assume zeqo,
exact nat.no_confusion zeqo,
end
/-
(23)
Define eqString(s, t) to be a predicate on
values of type string, that is true when
s = t (and not true otherwise). Then prove:
eqString "Hello Lean" ("Hello " ++ "Lean")
-/
def eqString: string → string → Prop :=
λ s t, s = t
/-
What we have here is a predicate with two
arguments, which defines a binary relation.
Here the relation is one of equality between
pairs of strings. This relation contains all
pairs of strings where the two elements are
equal, and it contains no other pairs. We
can prove that a particular pair is in the
relation by simply using rfl (if the pair
really is in the relation, of course.) This
example shows again that equality in Lean
doesn't require exactly equal terms, but
rather terms that reduce to the same values.
-/
example :
eqString "Hello Lean" ("Hello " ++ "Lean") :=
rfl
/- ***************************************** -/
/- **************** exists ***************** -/
/- ***************************************** -/
/-
(24)
Prove that ∃ n : ℕ, n = 13.
-/
/-
A proof of an existentially quantified proposition
in constructive logic is a pair: ⟨ witness, proof ⟩,
where the witness is a value of the right type and
the proof is a proof that that particular witness
has the specified property. Here the only witness
that will work is 13, and the proof that 13 = 13 is
simply by the reflexivity of equality.
-/
example : ∃ n, n = 13 := exists.intro 13 rfl
/-
(25)
Prove ∀ s : string, ∃ n, n = string.length s.
-/
/-
This problem just asserts that for any string,
there is a natural number equal to the length
of the string. And the answer is string.length.
-/
example :
∀ s : string, ∃ n, n = string.length s :=
begin
intro s,
apply exists.intro (string.length s),
apply rfl,
end
example :
∀ s : string, ∃ n, n = string.length s :=
begin
assume s,
exact ⟨ string.length s, rfl ⟩ -- exists.intro
end
/-
(26)
Prove exists m : ℕ, exists n: ℕ, m * n = 100.
Remember that you can apply exists.intro to a
witness, leaving the proof to be constructed
interactively.
-/
example :
exists m : ℕ, exists n: ℕ, m * n = 100 :=
begin
apply exists.intro 10,
apply exists.intro 10,
exact rfl
end
/-
(27)
Prove that if P and S are properties of
natural numbers, and if (∃ n : ℕ, P n ∧ S n), then (∃ n : ℕ, P n ∨ S n).
-/
/-
The proof of this proposition follows the
common pattern of elmination rules followed
by introduction rules. Applying the elimination
rule for ∃ (followed by two intros) yields an
assumed witness, n, to (∃ n, P n ∧ S n), and
a proof, pf, that n has the property of having
both property P and property S. From there it
is a simple matter of using exists intro to
prove the final goal.
-/
example : ∀ P S : ℕ → Prop,
(∃ n, P n ∧ S n) → (∃ n, P n ∨ S n) :=
begin
intros P S pfex,
apply exists.elim pfex,
intro w,
assume w_has_prop,
apply exists.intro w,
apply or.intro_left,
exact and.elim_left w_has_prop,
end
assume P S,
assume ex,
apply exists.elim ex,
assume w pf,
apply exists.intro w,
apply or.inl,
exact pf.left,
end
|
f9042f33e2fd16f4b90b11b94e5f6aff2e6c024d
|
b392eb79fb36952401156496daa60628ccb07438
|
/Lib4/PostPort/Pow.lean
|
2ad43bf137ffe2e64a045d2cf0f56a02ea2cca4f
|
[
"Apache-2.0"
] |
permissive
|
AurelienSaue/mathportsource
|
d9eabe74e3ab7774baa6a10a6dc8d4855ff92266
|
1a164e4fff7204c522c1f4ecc5024fd909be3b0b
|
refs/heads/master
| 1,685,214,377,305
| 1,623,621,223,000
| 1,623,621,223,000
| 364,191,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 421
|
lean
|
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Daniel Selsam
-/
import Lean3Lib.init.core
namespace Mathlib
universes u v
variable {α : Type u} {β : Type v}
noncomputable instance [has_pow α β] : HPow α β α := ⟨pow⟩
noncomputable instance [has_pow α α] : Pow α := ⟨pow⟩
end Mathlib
|
4d10a9609b931ade0e4daaf9de42ca1ec56acfb3
|
36938939954e91f23dec66a02728db08a7acfcf9
|
/lean/deps/galois_stdlib/src/galois/data/nat/basic.lean
|
9f0005ab2289bc384051ce92af5870f00ade0e42
|
[
"Apache-2.0"
] |
permissive
|
pnwamk/reopt-vcg
|
f8b56dd0279392a5e1c6aee721be8138e6b558d3
|
c9f9f185fbefc25c36c4b506bbc85fd1a03c3b6d
|
refs/heads/master
| 1,631,145,017,772
| 1,593,549,019,000
| 1,593,549,143,000
| 254,191,418
| 0
| 0
| null | 1,586,377,077,000
| 1,586,377,077,000
| null |
UTF-8
|
Lean
| false
| false
| 4,855
|
lean
|
-- This file contains basic lemmas for nat
import ...algebra.order
namespace galois
namespace nat
open nat
/-- pred _ + _ rewrite rule. -/
theorem pred_add (m n : ℕ) : pred m + n = if m = 0 then n else pred (m + n) :=
begin
cases m,
case zero {
simp,
},
case succ : m {
have p : ¬(succ m = 0) := by trivial,
simp [pred, p],
},
end
/-- _ + pred _ rewrite rule. -/
theorem add_pred (m n : ℕ) : m + pred n = if n = 0 then m else pred (m + n) :=
begin
cases n,
case zero {
simp,
},
case succ : n {
have p : ¬(succ n = 0) := by trivial,
simp [pred, p],
},
end
-- Reduce x < y to theorem with addition
protected theorem lt_is_succ_le (x y : ℕ) : x < y ↔ x + 1 ≤ y := by trivial
-- Reduce succ x < succ y to x < y
protected
lemma succ_le_succ_iff (m n : ℕ) : succ m ≤ succ n ↔ m ≤ n := ⟨le_of_succ_le_succ, succ_le_succ⟩
-- Reduce succ x < succ y to x < y
protected
lemma succ_lt_succ_iff (m n : ℕ) : succ m < succ n ↔ m < n := ⟨lt_of_succ_lt_succ, succ_lt_succ⟩
theorem lt_succ_iff (m n : ℕ) : m < nat.succ n ↔ m ≤ n := nat.succ_le_succ_iff m n
-- This rewrites a subtraction on left-hand-side of inequality into a predicate
-- involving addition.
protected lemma sub_lt_to_add (a m n : ℕ) : a - n < m ↔ (a < m + n ∧ (n ≤ a ∨ 0 < m)) :=
begin
revert a m,
induction n,
case zero {
intros a m,
simp [nat.zero_le],
},
case succ : n ind {
intros a m,
cases a,
case zero {
simp [zero_sub, zero_lt_succ, not_succ_le_zero],
},
case succ {
simp [add_succ, nat.succ_lt_succ_iff, nat.succ_le_succ_iff, ind],
},
},
end
-- This rewrites a subtraction on right-hand side of inequality into an addition.
protected lemma lt_sub_iff (a m n : ℕ) : a < m - n ↔ a + n < m :=
begin
revert n,
induction m with m ind,
{ simp [nat.lt_is_succ_le, add_succ, not_succ_le_zero, zero_sub],
},
{ intro n,
cases n with n,
{ simp, },
{ simp [nat.succ_lt_succ_iff, ind],
},
},
end
lemma lt_one_is_zero (x:ℕ) : x < 1 ↔ x = 0 :=
begin
constructor,
{ intros x_lt_1,
apply eq_zero_of_le_zero (le_of_lt_succ x_lt_1),
},
{ intros, simp [a], exact (of_as_true trivial),
}
end
section -- mathlib copies
variables {m n k : ℕ}
theorem pred_sub (n m : ℕ) : pred n - m = pred (n - m) :=
by rw [← sub_one, nat.sub_sub, one_add]; refl
protected theorem lt_of_sub_lt_sub_right : m - k < n - k → m < n :=
lt_imp_lt_of_le_imp_le (λ h, nat.sub_le_sub_right h k)
protected theorem add_lt_of_lt_sub_right (h : m < n - k) : m + k < n :=
@nat.lt_of_sub_lt_sub_right _ _ k (by rwa nat.add_sub_cancel)
protected theorem add_lt_of_lt_sub_left (h : m < n - k) : k + m < n :=
by rw add_comm; exact nat.add_lt_of_lt_sub_right h
protected theorem le_sub_right_of_add_le (h : m + k ≤ n) : m ≤ n - k :=
by rw ← nat.add_sub_cancel m k; exact nat.sub_le_sub_right h k
protected theorem lt_sub_right_of_add_lt (h : m + k < n) : m < n - k :=
lt_of_succ_le $ nat.le_sub_right_of_add_le $
by rw succ_add; exact succ_le_of_lt h
protected theorem lt_sub_left_of_add_lt (h : k + m < n) : m < n - k :=
nat.lt_sub_right_of_add_lt (by rwa add_comm at h)
protected theorem lt_sub_left_iff_add_lt : n < k - m ↔ m + n < k :=
⟨nat.add_lt_of_lt_sub_left, nat.lt_sub_left_of_add_lt⟩
protected theorem le_sub_left_of_add_le (h : k + m ≤ n) : m ≤ n - k :=
nat.le_sub_right_of_add_le (by rwa add_comm at h)
protected theorem lt_sub_right_iff_add_lt : m < k - n ↔ m + n < k :=
by rw [nat.lt_sub_left_iff_add_lt, add_comm]
protected theorem lt_add_of_sub_lt_left : n - k < m → n < k + m :=
lt_imp_lt_of_le_imp_le nat.le_sub_left_of_add_le
protected theorem sub_le_left_of_le_add : n ≤ k + m → n - k ≤ m :=
le_imp_le_of_lt_imp_lt nat.add_lt_of_lt_sub_left
protected theorem sub_lt_left_iff_lt_add (H : n ≤ k) : k - n < m ↔ k < n + m :=
⟨nat.lt_add_of_sub_lt_left,
λ h₁,
have succ k ≤ n + m, from succ_le_of_lt h₁,
have succ (k - n) ≤ m, from
calc succ (k - n) = succ k - n : by rw (succ_sub H)
... ≤ n + m - n : nat.sub_le_sub_right this n
... = m : by rw nat.add_sub_cancel_left,
lt_of_succ_le this⟩
protected lemma sub_le_self (n m : ℕ) : n - m ≤ n :=
nat.sub_le_left_of_le_add (nat.le_add_left _ _)
end
theorem pow_add (a m n : ℕ) : a^(m + n) = a^m * a^n :=
by induction n; simp [*, pow_succ, mul_assoc]
lemma mul_pow_add_lt_pow {x y n m:ℕ} (Hx: x < 2^m) (Hy: y < 2^n) : x * 2^n + y < 2^(m + n) :=
calc
x*2^n + y < x*2^n + 2^n : nat.add_lt_add_left Hy (x*2^n)
... = (x + 1) * 2^n : begin rw right_distrib, simp end
... ≤ 2^m * 2^n : nat.mul_le_mul_right (2^n) (succ_le_of_lt Hx)
... = 2^(m + n) : eq.symm (nat.pow_add _ _ _)
end nat
end galois
|
ad659461eda6a6fcd99c06f85cdcaa22f8cc4ae9
|
b29f946a2f0afd23ef86b9219116968babbb9f4f
|
/src/problem_sheets/sheet_1/sht01Q01.lean
|
7a793de1f58415318bc6b1fb9e0eba44d48aaaaa
|
[
"Apache-2.0"
] |
permissive
|
ImperialCollegeLondon/M1P1-lean
|
58be7394fded719d95e45e6b10e1ecf2ed3c7c4c
|
3723468cc50f8bebd00a9811caf25224a578de17
|
refs/heads/master
| 1,587,063,867,779
| 1,572,727,164,000
| 1,572,727,164,000
| 165,845,802
| 14
| 4
|
Apache-2.0
| 1,549,730,698,000
| 1,547,554,675,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,788
|
lean
|
-- This import gives us a working copy of the real numbers ℝ,
-- and functions such as abs : ℝ → ℝ
import data.real.basic
-- This next import gives us several tactics of use to mathematicians:
-- (1) norm_num [to prove basic facts about reals like 2+2 = 4]
-- (2) ring [to prove basic algebra identities like (a+b)^2 = a^2+2ab+b^2]
-- (3) linarith [to prove basic inequalities like x > 0 -> x/2 > 0]
import tactic.linarith
-- These lines switch Lean into "maths proof mode" -- don't worry about them.
-- Basically they tell Lean to use the axiom of choice and the
-- law of the excluded middle, two standard maths facts.
noncomputable theory
local attribute [instance, priority 0] classical.prop_decidable
-- the maths starts here.
-- We introduce the usual mathematical notation for absolute value
local notation `|` x `|` := abs x
theorem Q1a (x y : ℝ) : | x + y | ≤ | x | + | y | :=
begin
-- Lean's definition of abs is abs x = max (x, -x)
-- [or max x (-x), as the computer scientists would write it]
unfold abs,
-- lean's definition of max a b is "if a<=b then b else a"
unfold max,
-- We now have a complicated statement with three "if"s in.
split_ifs,
-- We now have 2^3=8 goals corresponding to all the possibilities
-- x>=0 or x<0, y>=0 or y<0, (x+y)>=0 or (x+y)<0.
repeat {linarith},
-- all of them are easily solvable using the linarith tactic.
end
-- We can solve the remaining parts using part (a).
theorem Q1b (x y : ℝ) : |x + y| ≥ |x| - |y| :=
begin
-- Apply Q1a to x+y and -y, then follow your nose.
have h := Q1a (x + y) (-y),
simp at h,
linarith,
end
theorem Q1c (x y : ℝ) : |x + y| ≥ |y| - |x| :=
begin
-- Apply Q1a to x+y and -x, then follow your nose.
have h := Q1a (x + y) (-x),
simp at h,
linarith,
end
theorem Q1d (x y : ℝ) : |x - y| ≥ | |x| - |y| | :=
begin
-- Lean prefers ≤ to ≥
show _ ≤ _,
-- for this one we need to apply the result that |X| ≤ B ↔ -B ≤ X and X ≤ B
rw abs_le,
-- Now we have two goals:
-- first -|x - y| ≤ |x| - |y|
-- and second |x| - |y| ≤ |x - y|.
-- So we need to split.
split,
{ -- -|x - y| ≤ |x| - |y|
have h := Q1a (x - y) (-x),
simp at *,
linarith },
{ -- |x| - |y| ≤ |x - y|
have h := Q1a (x - y) y,
simp at *,
linarith}
end
theorem Q1e (x y : ℝ) : |x| ≤ |y| + |x - y| :=
begin
have h := Q1a y (x - y),
simp * at *,
end
theorem Q1f (x y : ℝ) : |x| ≥ |y| - |x - y| :=
begin
have h := Q1a (x - y) (-x),
simp * at *,
end
theorem Q1g (x y z : ℝ) : |x - y| ≤ |x - z| + |y - z| :=
begin
have h := Q1a (x - z) (z - y),
-- Lean needs more hints with this one.
-- First let's change that y - z into z - y,
rw ←abs_neg (y - z),
-- now use automation
simp * at *,
end
|
9b8a6d15bda15588c501410ce767b065d0651985
|
e898bfefd5cb60a60220830c5eba68cab8d02c79
|
/uexp/src/uexp/rules/transitiveInferenceProject.lean
|
70903ed46fb0fe553b18b0a2fcba1f9c8d2d9470
|
[
"BSD-2-Clause"
] |
permissive
|
kkpapa/Cosette
|
9ed09e2dc4c1ecdef815c30b5501f64a7383a2ce
|
fda8fdbbf0de6c1be9b4104b87bbb06cede46329
|
refs/heads/master
| 1,584,573,128,049
| 1,526,370,422,000
| 1,526,370,422,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,334
|
lean
|
import ..sql
import ..tactics
import ..u_semiring
import ..extra_constants
import ..ucongr
import ..TDP
set_option profiler true
open Expr
open Proj
open Pred
open SQL
open tree
open binary_operators
set_option profiler true
notation `int` := datatypes.int
variable integer_1: const datatypes.int
variable integer_7: const datatypes.int
theorem rule:
forall ( Γ scm_t scm_account scm_bonus scm_dept scm_emp: Schema) (rel_t: relation scm_t) (rel_account: relation scm_account) (rel_bonus: relation scm_bonus) (rel_dept: relation scm_dept) (rel_emp: relation scm_emp) (t_k0 : Column int scm_t) (t_c1 : Column int scm_t) (t_f1_a0 : Column int scm_t) (t_f2_a0 : Column int scm_t) (t_f0_c0 : Column int scm_t) (t_f1_c0 : Column int scm_t) (t_f0_c1 : Column int scm_t) (t_f1_c2 : Column int scm_t) (t_f2_c3 : Column int scm_t) (account_acctno : Column int scm_account) (account_type : Column int scm_account) (account_balance : Column int scm_account) (bonus_ename : Column int scm_bonus) (bonus_job : Column int scm_bonus) (bonus_sal : Column int scm_bonus) (bonus_comm : Column int scm_bonus) (dept_deptno : Column int scm_dept) (dept_name : Column int scm_dept) (emp_empno : Column int scm_emp) (emp_ename : Column int scm_emp) (emp_job : Column int scm_emp) (emp_mgr : Column int scm_emp) (emp_hiredate : Column int scm_emp) (emp_comm : Column int scm_emp) (emp_sal : Column int scm_emp) (emp_deptno : Column int scm_emp) (emp_slacker : Column int scm_emp),
denoteSQL ((SELECT1 (e2p (constantExpr integer_1)) FROM1 (product ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt))) (table rel_emp)) WHERE (equal (uvariable (right⋅left⋅emp_deptno)) (uvariable (right⋅right⋅emp_deptno)))) :SQL Γ _)
=
denoteSQL ((SELECT1 (e2p (constantExpr integer_1)) FROM1 (product ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt))) ((SELECT * FROM1 (table rel_emp) WHERE (castPred (combine (right⋅emp_deptno) (e2p (constantExpr integer_7)) ) predicates.gt)))) WHERE (equal (uvariable (right⋅left⋅emp_deptno)) (uvariable (right⋅right⋅emp_deptno)))) :SQL Γ _) :=
begin
intros,
unfold_all_denotations,
funext,
dsimp,
print_size,
try {simp},
print_size,
TDP,
end
|
539507ac4fc0d738da7a869199211f34f4817966
|
624f6f2ae8b3b1adc5f8f67a365c51d5126be45a
|
/src/Init/Lean/Elab/Syntax.lean
|
026b8f2b5b0030cc7e2554539edc182505e56fef
|
[
"Apache-2.0"
] |
permissive
|
mhuisi/lean4
|
28d35a4febc2e251c7f05492e13f3b05d6f9b7af
|
dda44bc47f3e5d024508060dac2bcb59fd12e4c0
|
refs/heads/master
| 1,621,225,489,283
| 1,585,142,689,000
| 1,585,142,689,000
| 250,590,438
| 0
| 2
|
Apache-2.0
| 1,602,443,220,000
| 1,585,327,814,000
|
C
|
UTF-8
|
Lean
| false
| false
| 16,309
|
lean
|
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Lean.Elab.Command
import Init.Lean.Elab.Quotation
namespace Lean
namespace Elab
namespace Term
/-
Expand `optional «precedence»` where
«precedence» := parser! " : " >> precedenceLit
precedenceLit : Parser := numLit <|> maxPrec
maxPrec := parser! nonReservedSymbol "max" -/
private def expandOptPrecedence (stx : Syntax) : Option Nat :=
if stx.isNone then none
else match ((stx.getArg 0).getArg 1).isNatLit? with
| some v => some v
| _ => some Parser.appPrec
private def mkParserSeq (ds : Array Syntax) : TermElabM Syntax :=
if ds.size == 0 then
throwUnsupportedSyntax
else if ds.size == 1 then
pure $ ds.get! 0
else
ds.foldlFromM (fun r d => `(ParserDescr.andthen $r $d)) (ds.get! 0) 1
structure ToParserDescrContext :=
(catName : Name)
(first : Bool)
(leftRec : Bool) -- true iff left recursion is allowed
/- When `leadingIdentAsSymbol == true` we convert
`Lean.Parser.Syntax.atom` into `Lean.ParserDescr.nonReservedSymbol`
See comment at `Parser.ParserCategory`. -/
(leadingIdentAsSymbol : Bool)
abbrev ToParserDescrM := ReaderT ToParserDescrContext (StateT Bool TermElabM)
private def markAsTrailingParser : ToParserDescrM Unit := set true
@[inline] private def withNotFirst {α} (x : ToParserDescrM α) : ToParserDescrM α :=
adaptReader (fun (ctx : ToParserDescrContext) => { first := false, .. ctx }) x
@[inline] private def withoutLeftRec {α} (x : ToParserDescrM α) : ToParserDescrM α :=
adaptReader (fun (ctx : ToParserDescrContext) => { leftRec := false, .. ctx }) x
def checkLeftRec (stx : Syntax) : ToParserDescrM Bool := do
ctx ← read;
if ctx.first && stx.getKind == `Lean.Parser.Syntax.cat then do
let cat := (stx.getIdAt 0).eraseMacroScopes;
if cat == ctx.catName then do
let rbp? : Option Nat := expandOptPrecedence (stx.getArg 1);
unless rbp?.isNone $ liftM $ throwError (stx.getArg 1) ("invalid occurrence of ':<num>' modifier in head");
unless ctx.leftRec $ liftM $
throwError (stx.getArg 3) ("invalid occurrence of '" ++ cat ++ "', parser algorithm does not allow this form of left recursion");
markAsTrailingParser; -- mark as trailing par
pure true
else
pure false
else
pure false
partial def toParserDescrAux : Syntax → ToParserDescrM Syntax
| stx =>
let kind := stx.getKind;
if kind == nullKind then do
let args := stx.getArgs;
condM (checkLeftRec (stx.getArg 0))
(do
when (args.size == 1) $ liftM $ throwError stx "invalid atomic left recursive syntax";
let args := args.eraseIdx 0;
args ← args.mapIdxM $ fun i arg => withNotFirst $ toParserDescrAux arg;
liftM $ mkParserSeq args)
(do
args ← args.mapIdxM $ fun i arg => withNotFirst $ toParserDescrAux arg;
liftM $ mkParserSeq args)
else if kind == choiceKind then do
toParserDescrAux (stx.getArg 0)
else if kind == `Lean.Parser.Syntax.paren then
toParserDescrAux (stx.getArg 1)
else if kind == `Lean.Parser.Syntax.cat then do
let cat := (stx.getIdAt 0).eraseMacroScopes;
ctx ← read;
if ctx.first && cat == ctx.catName then
liftM $ throwError stx "invalid atomic left recursive syntax"
else do
let rbp? : Option Nat := expandOptPrecedence (stx.getArg 1);
env ← liftM getEnv;
unless (Parser.isParserCategory env cat) $ liftM $ throwError (stx.getArg 3) ("unknown category '" ++ cat ++ "'");
let rbp := rbp?.getD 0;
`(ParserDescr.parser $(quote cat) $(quote rbp))
else if kind == `Lean.Parser.Syntax.atom then do
match (stx.getArg 0).isStrLit? with
| some atom => do
let rbp? : Option Nat := expandOptPrecedence (stx.getArg 1);
ctx ← read;
if ctx.leadingIdentAsSymbol && rbp?.isNone then
`(ParserDescr.nonReservedSymbol $(quote atom) false)
else
`(ParserDescr.symbol $(quote atom) $(quote rbp?))
| none => liftM throwUnsupportedSyntax
else if kind == `Lean.Parser.Syntax.num then
`(ParserDescr.numLit)
else if kind == `Lean.Parser.Syntax.str then
`(ParserDescr.strLit)
else if kind == `Lean.Parser.Syntax.char then
`(ParserDescr.charLit)
else if kind == `Lean.Parser.Syntax.ident then
`(ParserDescr.ident)
else if kind == `Lean.Parser.Syntax.try then do
d ← withoutLeftRec $ toParserDescrAux (stx.getArg 1);
`(ParserDescr.try $d)
else if kind == `Lean.Parser.Syntax.lookahead then do
d ← withoutLeftRec $ toParserDescrAux (stx.getArg 1);
`(ParserDescr.lookahead $d)
else if kind == `Lean.Parser.Syntax.sepBy then do
d₁ ← withoutLeftRec $ toParserDescrAux (stx.getArg 1);
d₂ ← withoutLeftRec $ toParserDescrAux (stx.getArg 2);
`(ParserDescr.sepBy $d₁ $d₂)
else if kind == `Lean.Parser.Syntax.sepBy1 then do
d₁ ← withoutLeftRec $ toParserDescrAux (stx.getArg 1);
d₂ ← withoutLeftRec $ toParserDescrAux (stx.getArg 2);
`(ParserDescr.sepBy1 $d₁ $d₂)
else if kind == `Lean.Parser.Syntax.many then do
d ← withoutLeftRec $ toParserDescrAux (stx.getArg 0);
`(ParserDescr.many $d)
else if kind == `Lean.Parser.Syntax.many1 then do
d ← withoutLeftRec $ toParserDescrAux (stx.getArg 0);
`(ParserDescr.many1 $d)
else if kind == `Lean.Parser.Syntax.optional then do
d ← withoutLeftRec $ toParserDescrAux (stx.getArg 0);
`(ParserDescr.optional $d)
else if kind == `Lean.Parser.Syntax.orelse then do
d₁ ← withoutLeftRec $ toParserDescrAux (stx.getArg 0);
d₂ ← withoutLeftRec $ toParserDescrAux (stx.getArg 2);
`(ParserDescr.orelse $d₁ $d₂)
else
liftM $ throwError stx $ "unexpected syntax kind of category `syntax`: " ++ kind
/--
Given a `stx` of category `syntax`, return a pair `(newStx, trailingParser)`,
where `newStx` is of category `term`. After elaboration, `newStx` should have type
`TrailingParserDescr` if `trailingParser == true`, and `ParserDescr` otherwise. -/
def toParserDescr (stx : Syntax) (catName : Name) : TermElabM (Syntax × Bool) := do
env ← getEnv;
let leadingIdentAsSymbol := Parser.leadingIdentAsSymbol env catName;
(toParserDescrAux stx { catName := catName, first := true, leftRec := true, leadingIdentAsSymbol := leadingIdentAsSymbol }).run false
end Term
namespace Command
@[builtinCommandElab syntaxCat] def elabDeclareSyntaxCat : CommandElab :=
fun stx => do
let catName := stx.getIdAt 1;
let attrName := catName.appendAfter "Parser";
env ← getEnv;
env ← liftIO stx $ Parser.registerParserCategory env attrName catName;
setEnv env
def mkKindName (catName : Name) : Name :=
`_kind ++ catName
def mkFreshKind (catName : Name) : CommandElabM Name := do
scp ← getCurrMacroScope;
mainModule ← getMainModule;
pure $ Lean.addMacroScope mainModule (mkKindName catName) scp
def Macro.mkFreshKind (catName : Name) : MacroM Name :=
Macro.addMacroScope (mkKindName catName)
private def elabKind (stx : Syntax) (catName : Name) : CommandElabM Name := do
if stx.isNone then
mkFreshKind catName
else
let kind := stx.getIdAt 1;
if kind.hasMacroScopes then
pure kind
else do
currNamespace ← getCurrNamespace;
pure (currNamespace ++ kind)
@[builtinCommandElab «syntax»] def elabSyntax : CommandElab :=
fun stx => do
env ← getEnv;
let cat := (stx.getIdAt 4).eraseMacroScopes;
unless (Parser.isParserCategory env cat) $ throwError (stx.getArg 4) ("unknown category '" ++ cat ++ "'");
kind ← elabKind (stx.getArg 1) cat;
let catParserId := mkIdentFrom stx (cat.appendAfter "Parser");
(val, trailingParser) ← runTermElabM none $ fun _ => Term.toParserDescr (stx.getArg 2) cat;
d ←
if trailingParser then
`(@[$catParserId:ident] def myParser : Lean.TrailingParserDescr := ParserDescr.trailingNode $(quote kind) $val)
else
`(@[$catParserId:ident] def myParser : Lean.ParserDescr := ParserDescr.node $(quote kind) $val);
trace `Elab stx $ fun _ => d;
withMacroExpansion stx d $ elabCommand d
def elabMacroRulesAux (k : SyntaxNodeKind) (alts : Array Syntax) : CommandElabM Syntax := do
alts ← alts.mapSepElemsM $ fun alt => do {
let lhs := alt.getArg 0;
let pat := lhs.getArg 0;
match_syntax pat with
| `(`($quot)) =>
let k' := quot.getKind;
if k' == k then
pure alt
else if k' == choiceKind then do
match quot.getArgs.find? $ fun quotAlt => quotAlt.getKind == k with
| none => throwError alt ("invalid macro_rules alternative, expected syntax node kind '" ++ k ++ "'")
| some quot => do
pat ← `(`($quot));
let lhs := lhs.setArg 0 pat;
pure $ alt.setArg 0 lhs
else
throwError alt ("invalid macro_rules alternative, unexpected syntax node kind '" ++ k' ++ "'")
| stx => throwUnsupportedSyntax
};
`(@[macro $(Lean.mkIdent k)] def myMacro : Macro := fun stx => match_syntax stx with $alts:matchAlt* | _ => throw Lean.Macro.Exception.unsupportedSyntax)
def inferMacroRulesAltKind (alt : Syntax) : CommandElabM SyntaxNodeKind :=
match_syntax (alt.getArg 0).getArg 0 with
| `(`($quot)) => pure quot.getKind
| stx => throwUnsupportedSyntax
def elabNoKindMacroRulesAux (alts : Array Syntax) : CommandElabM Syntax := do
k ← inferMacroRulesAltKind (alts.get! 0);
if k == choiceKind then
throwError (alts.get! 0)
"invalid macro_rules alternative, multiple interpretations for pattern (solution: specify node kind using `macro_rules [<kind>] ...`)"
else do
altsK ← alts.filterSepElemsM (fun alt => do k' ← inferMacroRulesAltKind alt; pure $ k == k');
altsNotK ← alts.filterSepElemsM (fun alt => do k' ← inferMacroRulesAltKind alt; pure $ k != k');
defCmd ← elabMacroRulesAux k altsK;
if altsNotK.isEmpty then
pure defCmd
else
`($defCmd:command macro_rules $altsNotK:matchAlt*)
@[builtinCommandElab «macro_rules»] def elabMacroRules : CommandElab :=
adaptExpander $ fun stx => match_syntax stx with
| `(macro_rules $alts:matchAlt*) => elabNoKindMacroRulesAux alts
| `(macro_rules | $alts:matchAlt*) => elabNoKindMacroRulesAux alts
| `(macro_rules [$kind] $alts:matchAlt*) => elabMacroRulesAux kind.getId alts
| `(macro_rules [$kind] | $alts:matchAlt*) => elabMacroRulesAux kind.getId alts
| _ => throwUnsupportedSyntax
/- We just ignore Lean3 notation declaration commands. -/
@[builtinCommandElab «mixfix»] def elabMixfix : CommandElab := fun _ => pure ()
@[builtinCommandElab «reserve»] def elabReserve : CommandElab := fun _ => pure ()
/- Wrap all occurrences of the given `ident` nodes in antiquotations -/
private partial def antiquote (vars : Array Syntax) : Syntax → Syntax
| stx => match_syntax stx with
| `($id:ident) =>
if (vars.findIdx? (fun var => var.getId == id.getId)).isSome then
Syntax.node `antiquot #[mkAtom "$", mkNullNode, id, mkNullNode, mkNullNode]
else
stx
| _ => match stx with
| Syntax.node k args => Syntax.node k (args.map antiquote)
| stx => stx
/- Convert `notation` command lhs item into a `syntax` command item -/
def expandNotationItemIntoSyntaxItem (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.identPrec then
pure $ Syntax.node `Lean.Parser.Syntax.cat #[mkIdentFrom stx `term, stx.getArg 1]
else if k == `Lean.Parser.Command.quotedSymbolPrec then
match (stx.getArg 0).getArg 1 with
| Syntax.atom info val => pure $ Syntax.node `Lean.Parser.Syntax.atom #[mkStxStrLit val info, stx.getArg 1]
| _ => Macro.throwUnsupported
else if k == `Lean.Parser.Command.strLitPrec then
pure $ Syntax.node `Lean.Parser.Syntax.atom stx.getArgs
else
Macro.throwUnsupported
def strLitPrecToPattern (stx: Syntax) : MacroM Syntax :=
match (stx.getArg 0).isStrLit? with
| some str => pure $ mkAtomFrom stx str
| none => Macro.throwUnsupported
/- Convert `notation` command lhs item a pattern element -/
def expandNotationItemIntoPattern (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.identPrec then
let item := stx.getArg 0;
pure $ mkNode `antiquot #[mkAtom "$", mkNullNode, item, mkNullNode, mkNullNode]
else if k == `Lean.Parser.Command.quotedSymbolPrec then
pure $ (stx.getArg 0).getArg 1
else if k == `Lean.Parser.Command.strLitPrec then
strLitPrecToPattern stx
else
Macro.throwUnsupported
@[builtinMacro Lean.Parser.Command.notation] def expandNotation : Macro :=
fun stx => match_syntax stx with
| `(notation $items* => $rhs) => do
kind ← Macro.mkFreshKind `term;
-- build parser
syntaxParts ← items.mapM expandNotationItemIntoSyntaxItem;
let cat := mkIdentFrom stx `term;
-- build macro rules
let vars := items.filter $ fun item => item.getKind == `Lean.Parser.Command.identPrec;
let vars := vars.map $ fun var => var.getArg 0;
let rhs := antiquote vars rhs;
patArgs ← items.mapM expandNotationItemIntoPattern;
let pat := Syntax.node kind patArgs;
`(syntax [$(mkIdentFrom stx kind)] $syntaxParts* : $cat macro_rules | `($pat) => `($rhs))
| _ => Macro.throwUnsupported
/- Convert `macro` argument into a `syntax` command item -/
def expandMacroArgIntoSyntaxItem (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.macroArgSimple then
let argType := stx.getArg 2;
match argType with
| Syntax.atom _ "ident" => pure $ Syntax.node `Lean.Parser.Syntax.ident #[argType]
| Syntax.atom _ "num" => pure $ Syntax.node `Lean.Parser.Syntax.num #[argType]
| Syntax.atom _ "str" => pure $ Syntax.node `Lean.Parser.Syntax.str #[argType]
| Syntax.atom _ "char" => pure $ Syntax.node `Lean.Parser.Syntax.char #[argType]
| Syntax.ident _ _ _ _ => pure $ Syntax.node `Lean.Parser.Syntax.cat #[stx.getArg 2, stx.getArg 3]
| _ => Macro.throwUnsupported
else if k == `Lean.Parser.Command.strLitPrec then
pure $ Syntax.node `Lean.Parser.Syntax.atom stx.getArgs
else
Macro.throwUnsupported
/- Convert `macro` head into a `syntax` command item -/
def expandMacroHeadIntoSyntaxItem (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.identPrec then
let info := stx.getHeadInfo;
let id := (stx.getArg 0).getId;
pure $ Syntax.node `Lean.Parser.Syntax.atom #[mkStxStrLit (toString id) info, stx.getArg 1]
else
expandMacroArgIntoSyntaxItem stx
/- Convert `macro` arg into a pattern element -/
def expandMacroArgIntoPattern (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.macroArgSimple then
let item := stx.getArg 0;
pure $ mkNode `antiquot #[mkAtom "$", mkNullNode, item, mkNullNode, mkNullNode]
else if k == `Lean.Parser.Command.strLitPrec then
strLitPrecToPattern stx
else
Macro.throwUnsupported
/- Convert `macro` head into a pattern element -/
def expandMacroHeadIntoPattern (stx : Syntax) : MacroM Syntax :=
let k := stx.getKind;
if k == `Lean.Parser.Command.identPrec then
let str := toString (stx.getArg 0).getId;
pure $ mkAtomFrom stx str
else
expandMacroArgIntoPattern stx
@[builtinMacro Lean.Parser.Command.macro] def expandMacro : Macro :=
fun stx => do
let head := stx.getArg 1;
let args := (stx.getArg 2).getArgs;
let cat := stx.getArg 4;
kind ← Macro.mkFreshKind (cat.getId).eraseMacroScopes;
-- build parser
stxPart ← expandMacroHeadIntoSyntaxItem head;
stxParts ← args.mapM expandMacroArgIntoSyntaxItem;
let stxParts := #[stxPart] ++ stxParts;
-- build macro rules
patHead ← expandMacroHeadIntoPattern head;
patArgs ← args.mapM expandMacroArgIntoPattern;
let pat := Syntax.node kind (#[patHead] ++ patArgs);
if stx.getArgs.size == 7 then
-- `stx` is of the form `macro $head $args* : $cat => term`
let rhs := stx.getArg 6;
`(syntax [$(mkIdentFrom stx kind)] $stxParts* : $cat macro_rules | `($pat) => $rhs)
else
-- `stx` is of the form `macro $head $args* : $cat => `( $body )`
let rhsBody := stx.getArg 7;
`(syntax [$(mkIdentFrom stx kind)] $stxParts* : $cat macro_rules | `($pat) => `($rhsBody))
@[init] private def regTraceClasses : IO Unit := do
registerTraceClass `Elab.syntax;
pure ()
end Command
end Elab
end Lean
|
a4c07928069e1cb4a254abbdb50923c314e32377
|
7cdf3413c097e5d36492d12cdd07030eb991d394
|
/src/game/world3/level1.lean
|
6c7a41dc059016c07e7a70c9f57e0326be19da2d
|
[] |
no_license
|
alreadydone/natural_number_game
|
3135b9385a9f43e74cfbf79513fc37e69b99e0b3
|
1a39e693df4f4e871eb449890d3c7715a25c2ec9
|
refs/heads/master
| 1,599,387,390,105
| 1,573,200,587,000
| 1,573,200,691,000
| 220,397,084
| 0
| 0
| null | 1,573,192,734,000
| 1,573,192,733,000
| null |
UTF-8
|
Lean
| false
| false
| 2,277
|
lean
|
import game.world2.level6 -- hide
import mynat.mul -- import the definition of multiplication on mynat
-- World name : Multiplication world
/- Axiom : mul_zero (a : mynat) :
a * 0 = 0
-/
/- Axiom : mul_succ (a b : mynat) :
a * succ(b) = a * b + a
-/
/-
# World 3: Multiplication World
A new import! This import gives you the definition of multiplication on your
natural numbers. It is defined by recursion, just like addition.
Here are the two new axioms:
* `mul_zero (a : mynat) : a * 0 = 0`
* `mul_succ (a b : mynat) : a * succ(b) = a * b + a`
In words, we define multiplication by "induction on the second variable",
with `a * 0` defined to be `0` and, if we know `a * b`, then `a` times
the number after `b` is defined to be `a * b + a`.
You can keep all the theorems you proved about addition, but
for multiplication, those two results above are you've got right now.
I would recommend that you sort out the bar on the left. Fold up everything,
and then unfold just Theorem Statements -> Multiplication World. This will
remind you of your two new theorems, both of which are true by definition.
If you want to be reminded of the theorems you have proved about addition,
you can just open up the Addition World theorem statements and take a look.
If you don't want to keep opening and closing these menus, why not think
a bit about the logic behind the naming of the theorems? After a while you
might find that you can guess the name of the theorem you want.
Anyway, what's going on in multiplication world? Like addition, we need to go
for the proofs that multiplication
is commutative and associative, but as well as that we will
need to prove facts about the relationship between multiplication
and addition, for example `a * (b + c) = a * b + a * c`, so now
there is a lot more to do. Good luck!
We are given `mul_zero`, and the first thing to prove is `zero_mul`.
Like `zero_add`, we of course prove it by induction.
## Level 1 : `zero_mul`
-/
namespace mynat -- hide
/- Lemma
For all natural numbers $m$, we have
$$ 0 \times m = 0. $$
-/
lemma zero_mul (m : mynat) : 0 * m = 0 :=
begin [less_leaky]
induction m with d hd,
{
rw mul_zero,
refl
},
{
rw mul_succ,
rw hd,
rw add_zero,
refl
}
end
end mynat -- hide
|
cf1b8d889ace61a96048d3aeca9c4af27f530ee6
|
1f6fe2f89976b14a4567ab298c35792b21f2e50b
|
/homotopy/EM.hlean
|
474b71690a5e14982dbf712bab6e91e2422fc9eb
|
[
"Apache-2.0"
] |
permissive
|
jonas-frey/Spectral
|
e5c1c2f7bcac26aa55f7b1e041a81272a146198d
|
72d521091525a4bc9a31cac859840efe9461cf66
|
refs/heads/master
| 1,610,235,743,345
| 1,505,417,795,000
| 1,505,417,795,000
| 102,653,342
| 0
| 0
| null | 1,504,728,483,000
| 1,504,728,483,000
| null |
UTF-8
|
Lean
| false
| false
| 28,903
|
hlean
|
-- Authors: Floris van Doorn
import homotopy.EM algebra.category.functor.equivalence types.pointed2 ..pointed_pi ..pointed
..move_to_lib .susp ..algebra.exactness
open eq equiv is_equiv algebra group nat pointed EM.ops is_trunc trunc susp function is_conn
/- TODO: try to fix the speed of this file -/
namespace EM
definition EMadd1_functor_succ [unfold_full] {G H : AbGroup} (φ : G →g H) (n : ℕ) :
EMadd1_functor φ (succ n) ~* ptrunc_functor (n+2) (susp_functor (EMadd1_functor φ n)) :=
by reflexivity
definition EM1_functor_gid (G : Group) : EM1_functor (gid G) ~* !pid :=
begin
fapply phomotopy.mk,
{ intro x, induction x,
{ reflexivity },
{ apply eq_pathover_id_right, apply hdeg_square, apply elim_pth, },
{ apply @is_prop.elim, apply is_trunc_pathover }},
{ reflexivity },
end
definition EMadd1_functor_gid (G : AbGroup) (n : ℕ) : EMadd1_functor (gid G) n ~* !pid :=
begin
induction n with n p,
{ apply EM1_functor_gid },
{ refine !EMadd1_functor_succ ⬝* _,
refine ptrunc_functor_phomotopy _ (susp_functor_phomotopy p ⬝* !susp_functor_pid) ⬝* _,
apply ptrunc_functor_pid }
end
definition EM_functor_gid (G : AbGroup) (n : ℕ) : EM_functor (gid G) n ~* !pid :=
begin
cases n with n,
{ apply pmap_of_homomorphism_gid },
{ apply EMadd1_functor_gid }
end
definition EM1_functor_gcompose {G H K : Group} (ψ : H →g K) (φ : G →g H) :
EM1_functor (ψ ∘g φ) ~* EM1_functor ψ ∘* EM1_functor φ :=
begin
fapply phomotopy.mk,
{ intro x, induction x,
{ reflexivity },
{ apply eq_pathover, apply hdeg_square, esimp,
refine !elim_pth ⬝ _ ⬝ (ap_compose (EM1_functor ψ) _ _)⁻¹,
refine _ ⬝ ap02 _ !elim_pth⁻¹, exact !elim_pth⁻¹ },
{ apply @is_prop.elim, apply is_trunc_pathover }},
{ reflexivity },
end
definition EMadd1_functor_gcompose {G H K : AbGroup} (ψ : H →g K) (φ : G →g H) (n : ℕ) :
EMadd1_functor (ψ ∘g φ) n ~* EMadd1_functor ψ n ∘* EMadd1_functor φ n :=
begin
induction n with n p,
{ apply EM1_functor_gcompose },
{ refine !EMadd1_functor_succ ⬝* _,
refine ptrunc_functor_phomotopy _ (susp_functor_phomotopy p ⬝* !susp_functor_pcompose) ⬝* _,
apply ptrunc_functor_pcompose }
end
definition EM_functor_gcompose {G H K : AbGroup} (ψ : H →g K) (φ : G →g H) (n : ℕ) :
EM_functor (ψ ∘g φ) n ~* EM_functor ψ n ∘* EM_functor φ n :=
begin
cases n with n,
{ apply pmap_of_homomorphism_gcompose },
{ apply EMadd1_functor_gcompose }
end
definition EM1_functor_phomotopy {G H : Group} {φ ψ : G →g H} (p : φ ~ ψ) :
EM1_functor φ ~* EM1_functor ψ :=
begin
fapply phomotopy.mk,
{ intro x, induction x,
{ reflexivity },
{ apply eq_pathover, apply hdeg_square, esimp,
refine !elim_pth ⬝ _ ⬝ !elim_pth⁻¹, exact ap pth (p g) },
{ apply @is_prop.elim, apply is_trunc_pathover }},
{ reflexivity },
end
definition EMadd1_functor_phomotopy {G H : AbGroup} {φ ψ : G →g H} (p : φ ~ ψ) (n : ℕ) :
EMadd1_functor φ n ~* EMadd1_functor ψ n :=
begin
induction n with n q,
{ exact EM1_functor_phomotopy p },
{ exact ptrunc_functor_phomotopy _ (susp_functor_phomotopy q) }
end
definition EM_functor_phomotopy {G H : AbGroup} {φ ψ : G →g H} (p : φ ~ ψ) (n : ℕ) :
EM_functor φ n ~* EM_functor ψ n :=
begin
cases n with n,
{ exact pmap_of_homomorphism_phomotopy p },
{ exact EMadd1_functor_phomotopy p n }
end
definition EM_equiv_EM [constructor] {G H : AbGroup} (φ : G ≃g H) (n : ℕ) : K G n ≃* K H n :=
begin
fapply pequiv.MK',
{ exact EM_functor φ n },
{ exact EM_functor φ⁻¹ᵍ n },
{ intro x, refine (EM_functor_gcompose φ⁻¹ᵍ φ n)⁻¹* x ⬝ _,
refine _ ⬝ EM_functor_gid G n x,
refine EM_functor_phomotopy _ n x,
rexact left_inv φ },
{ intro x, refine (EM_functor_gcompose φ φ⁻¹ᵍ n)⁻¹* x ⬝ _,
refine _ ⬝ EM_functor_gid H n x,
refine EM_functor_phomotopy _ n x,
rexact right_inv φ }
end
definition is_equiv_EM_functor [constructor] {G H : AbGroup} (φ : G →g H) [H2 : is_equiv φ]
(n : ℕ) : is_equiv (EM_functor φ n) :=
to_is_equiv (EM_equiv_EM (isomorphism.mk φ H2) n)
definition fundamental_group_EM1' (G : Group) : G ≃g π₁ (EM1 G) :=
(fundamental_group_EM1 G)⁻¹ᵍ
definition ghomotopy_group_EMadd1' (G : AbGroup) (n : ℕ) : G ≃g πg[n+1] (EMadd1 G n) :=
begin
change G ≃g π₁ (Ω[n] (EMadd1 G n)),
refine _ ⬝g homotopy_group_isomorphism_of_pequiv 0 (loopn_EMadd1_pequiv_EM1 G n),
apply fundamental_group_EM1'
end
definition homotopy_group_functor_EM1_functor {G H : Group} (φ : G →g H) :
π→g[1] (EM1_functor φ) ∘ fundamental_group_EM1' G ~ fundamental_group_EM1' H ∘ φ :=
begin
intro g, apply ap tr, exact !idp_con ⬝ !elim_pth,
end
section
definition ghomotopy_group_EMadd1'_0 (G : AbGroup) :
ghomotopy_group_EMadd1' G 0 ~ fundamental_group_EM1' G :=
begin
refine _ ⬝hty id_compose _,
unfold [ghomotopy_group_EMadd1'],
apply hwhisker_right (fundamental_group_EM1' G),
refine _ ⬝hty trunc_functor_id _ _,
exact trunc_functor_homotopy _ ap1_pid,
end
definition loopn_EMadd1_pequiv_EM1_succ (G : AbGroup) (n : ℕ) :
loopn_EMadd1_pequiv_EM1 G (succ n) ~* (loopn_succ_in (EMadd1 G (succ n)) n)⁻¹ᵉ* ∘*
Ω→[n] (loop_EMadd1 G n) ∘* loopn_EMadd1_pequiv_EM1 G n :=
by reflexivity
-- definition is_trunc_EMadd1' [instance] (G : AbGroup) (n : ℕ) : is_trunc (succ n) (EMadd1 G n) :=
-- is_trunc_EMadd1 G n
definition loop_EMadd1_succ (G : AbGroup) (n : ℕ) :
loop_EMadd1 G (n+1) ~* (loop_ptrunc_pequiv (n+1+1) (susp (EMadd1 G (n+1))))⁻¹ᵉ* ∘*
freudenthal_pequiv (EMadd1 G (n+1)) (add_mul_le_mul_add n 1 1) ∘*
(ptrunc_pequiv (n+1+1) (EMadd1 G (n+1)))⁻¹ᵉ* :=
by reflexivity
definition ap1_EMadd1_natural {G H : AbGroup} (φ : G →g H) (n : ℕ) :
Ω→ (EMadd1_functor φ (succ n)) ∘* loop_EMadd1 G n ~* loop_EMadd1 H n ∘* EMadd1_functor φ n :=
begin
refine pwhisker_right _ (ap1_phomotopy !EMadd1_functor_succ) ⬝* _,
induction n with n IH,
{ refine pwhisker_left _ !hopf.to_pmap_delooping_pinv ⬝* _ ⬝*
pwhisker_right _ !hopf.to_pmap_delooping_pinv⁻¹*,
refine !loop_susp_unit_natural⁻¹* ⬝h* _,
apply ap1_psquare,
apply ptr_natural },
{ refine pwhisker_left _ !loop_EMadd1_succ ⬝* _ ⬝* pwhisker_right _ !loop_EMadd1_succ⁻¹*,
refine _ ⬝h* !ap1_ptrunc_functor,
refine (@(ptrunc_pequiv_natural (n+1+1) _) _ _)⁻¹ʰ* ⬝h* _,
refine pwhisker_left _ !to_pmap_freudenthal_pequiv ⬝* _ ⬝*
pwhisker_right _ !to_pmap_freudenthal_pequiv⁻¹*,
apply ptrunc_functor_psquare,
exact !loop_susp_unit_natural⁻¹* }
end
definition apn_EMadd1_pequiv_EM1_natural {G H : AbGroup} (φ : G →g H) (n : ℕ) :
Ω→[n] (EMadd1_functor φ n) ∘* loopn_EMadd1_pequiv_EM1 G n ~*
loopn_EMadd1_pequiv_EM1 H n ∘* EM1_functor φ :=
begin
induction n with n IH,
{ reflexivity },
{ refine pwhisker_left _ !loopn_EMadd1_pequiv_EM1_succ ⬝* _,
refine _ ⬝* pwhisker_right _ !loopn_EMadd1_pequiv_EM1_succ⁻¹*,
refine _ ⬝h* !loopn_succ_in_inv_natural,
exact IH ⬝h* (apn_psquare n !ap1_EMadd1_natural) }
end
definition homotopy_group_functor_EMadd1_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
π→g[n+1] (EMadd1_functor φ n) ∘ ghomotopy_group_EMadd1' G n ~
ghomotopy_group_EMadd1' H n ∘ φ :=
begin
refine hwhisker_left _ (to_fun_isomorphism_trans _ _) ⬝hty _ ⬝hty
hwhisker_right _ (to_fun_isomorphism_trans _ _)⁻¹ʰᵗʸ,
refine _ ⬝htyh (homotopy_group_homomorphism_psquare 1 (apn_EMadd1_pequiv_EM1_natural φ n)),
apply homotopy_group_functor_EM1_functor
end
definition homotopy_group_functor_EMadd1_functor' {G H : AbGroup} (φ : G →g H) (n : ℕ) :
φ ∘ (ghomotopy_group_EMadd1' G n)⁻¹ᵍ ~
(ghomotopy_group_EMadd1' H n)⁻¹ᵍ ∘ π→g[n+1] (EMadd1_functor φ n) :=
(homotopy_group_functor_EMadd1_functor φ n)⁻¹ʰᵗʸʰ
definition EM1_pmap_natural {G H : Group} {X Y : Type*} (f : X →* Y) (eX : G → Ω X)
(eY : H → Ω Y) (rX : Πg h, eX (g * h) = eX g ⬝ eX h) (rY : Πg h, eY (g * h) = eY g ⬝ eY h)
[H1 : is_conn 0 X] [H2 : is_trunc 1 X] [is_conn 0 Y] [is_trunc 1 Y]
(φ : G →g H) (p : hsquare eX eY φ (Ω→ f)) :
psquare (EM1_pmap eX rX) (EM1_pmap eY rY) (EM1_functor φ) f :=
begin
fapply phomotopy.mk,
{ intro x, induction x using EM.set_rec,
{ exact respect_pt f },
{ apply eq_pathover,
refine ap_compose f _ _ ⬝ph _ ⬝hp (ap_compose (EM1_pmap eY rY) _ _)⁻¹,
refine ap02 _ !elim_pth ⬝ph _ ⬝hp ap02 _ !elim_pth⁻¹,
refine _ ⬝hp !elim_pth⁻¹, apply transpose, exact square_of_eq_bot (p g) }},
{ exact !idp_con⁻¹ }
end
definition EM1_pequiv'_natural {G H : Group} {X Y : Type*} (f : X →* Y) (eX : G ≃* Ω X)
(eY : H ≃* Ω Y) (rX : Πg h, eX (g * h) = eX g ⬝ eX h) (rY : Πg h, eY (g * h) = eY g ⬝ eY h)
[H1 : is_conn 0 X] [H2 : is_trunc 1 X] [is_conn 0 Y] [is_trunc 1 Y]
(φ : G →g H) (p : Ω→ f ∘ eX ~ eY ∘ φ) :
f ∘* EM1_pequiv' eX rX ~* EM1_pequiv' eY rY ∘* EM1_functor φ :=
EM1_pmap_natural f eX eY rX rY φ p
definition EM1_pequiv_natural {G H : Group} {X Y : Type*} (f : X →* Y) (eX : G ≃g π₁ X)
(eY : H ≃g π₁ Y) [H1 : is_conn 0 X] [H2 : is_trunc 1 X] [is_conn 0 Y] [is_trunc 1 Y]
(φ : G →g H) (p : π→g[1] f ∘ eX ~ eY ∘ φ) :
f ∘* EM1_pequiv eX ~* EM1_pequiv eY ∘* EM1_functor φ :=
EM1_pequiv'_natural f _ _ _ _ φ
begin
assert p' : ptrunc_functor 0 (Ω→ f) ∘* pequiv_of_isomorphism eX ~*
pequiv_of_isomorphism eY ∘* pmap_of_homomorphism φ, exact phomotopy_of_homotopy p,
exact p' ⬝h* (ptrunc_pequiv_natural 0 (Ω→ f)),
end
definition EM1_pequiv_type_natural {X Y : Type*} (f : X →* Y) [H1 : is_conn 0 X] [H2 : is_trunc 1 X]
[H3 : is_conn 0 Y] [H4 : is_trunc 1 Y] :
f ∘* EM1_pequiv_type X ~* EM1_pequiv_type Y ∘* EM1_functor (π→g[1] f) :=
begin refine EM1_pequiv_natural f _ _ _ _, reflexivity end
definition EM_up_natural {G H : AbGroup} (φ : G →g H) {X Y : Type*} (f : X →* Y) {n : ℕ}
(eX : Ω[succ (succ n)] X ≃* G) (eY : Ω[succ (succ n)] Y ≃* H) (p : φ ∘ eX ~ eY ∘ Ω→[succ (succ n)] f)
: φ ∘ EM_up eX ~ EM_up eY ∘ Ω→[succ n] (Ω→ f) :=
begin
refine _ ⬝htyh p,
exact to_homotopy (phinverse (loopn_succ_in_natural (succ n) f)⁻¹*)
end
definition EMadd1_pmap_natural {G H : AbGroup} {X Y : Type*} (f : X →* Y) (n : ℕ) (eX : Ω[succ n] X ≃* G)
(eY : Ω[succ n] Y ≃* H) (rX : Πp q, eX (p ⬝ q) = eX p * eX q) (rY : Πp q, eY (p ⬝ q) = eY p * eY q)
[H1 : is_conn n X] [H2 : is_trunc (n.+1) X] [H3 : is_conn n Y] [H4 : is_trunc (n.+1) Y]
(φ : G →g H) (p : φ ∘ eX ~ eY ∘ Ω→[succ n] f) :
f ∘* EMadd1_pmap n eX rX ~* EMadd1_pmap n eY rY ∘* EMadd1_functor φ n :=
begin
revert X Y f eX eY rX rY H1 H2 H3 H4 p, induction n with n IH: intros,
{ apply EM1_pmap_natural, exact @hhinverse _ _ _ _ _ _ eX eY p },
{ do 2 rewrite [EMadd1_pmap_succ], refine _ ⬝* pwhisker_left _ !EMadd1_functor_succ⁻¹*,
refine (ptrunc_elim_pcompose ((succ n).+1) _ _)⁻¹* ⬝* _ ⬝*
(ptrunc_elim_ptrunc_functor ((succ n).+1) _ _)⁻¹*,
apply ptrunc_elim_phomotopy,
refine _ ⬝* !susp_elim_susp_functor⁻¹*,
refine _ ⬝* susp_elim_phomotopy (IH _ _ _ _ _ (is_homomorphism_EM_up eX rX) _ (@is_conn_loop _ _ H1)
(@is_trunc_loop _ _ H2) _ _ (EM_up_natural φ f eX eY p)),
apply susp_elim_natural }
end
definition EMadd1_pequiv'_natural {G H : AbGroup} {X Y : Type*} (f : X →* Y) (n : ℕ) (eX : Ω[succ n] X ≃* G)
(eY : Ω[succ n] Y ≃* H) (rX : Πp q, eX (p ⬝ q) = eX p * eX q) (rY : Πp q, eY (p ⬝ q) = eY p * eY q)
[H1 : is_conn n X] [H2 : is_trunc (n.+1) X] [is_conn n Y] [is_trunc (n.+1) Y]
(φ : G →g H) (p : φ ∘ eX ~ eY ∘ Ω→[succ n] f) :
f ∘* EMadd1_pequiv' n eX rX ~* EMadd1_pequiv' n eY rY ∘* EMadd1_functor φ n :=
begin rexact EMadd1_pmap_natural f n eX eY rX rY φ p end
definition EMadd1_pequiv_natural_local_instance {X : Type*} (n : ℕ) [H : is_trunc (n.+1) X] :
is_set (Ω[succ n] X) :=
@(is_set_loopn (succ n) X) H
local attribute EMadd1_pequiv_natural_local_instance [instance]
definition EMadd1_pequiv_natural {G H : AbGroup} {X Y : Type*} (f : X →* Y) (n : ℕ) (eX : πg[n+1] X ≃g G)
(eY : πg[n+1] Y ≃g H) [H1 : is_conn n X] [H2 : is_trunc (n.+1) X] [H3 : is_conn n Y]
[H4 : is_trunc (n.+1) Y] (φ : G →g H) (p : φ ∘ eX ~ eY ∘ π→g[n+1] f) :
f ∘* EMadd1_pequiv n eX ~* EMadd1_pequiv n eY ∘* EMadd1_functor φ n :=
EMadd1_pequiv'_natural f n
((ptrunc_pequiv 0 (Ω[succ n] X))⁻¹ᵉ* ⬝e* pequiv_of_isomorphism eX)
((ptrunc_pequiv 0 (Ω[succ n] Y))⁻¹ᵉ* ⬝e* pequiv_of_isomorphism eY)
_ _ φ (hhconcat (to_homotopy (phinverse (ptrunc_pequiv_natural 0 (Ω→[succ n] f)))) p)
definition EMadd1_pequiv_succ_natural {G H : AbGroup} {X Y : Type*} (f : X →* Y) (n : ℕ)
(eX : πag[n+2] X ≃g G) (eY : πag[n+2] Y ≃g H) [is_conn (n.+1) X] [is_trunc (n.+2) X]
[is_conn (n.+1) Y] [is_trunc (n.+2) Y] (φ : G →g H) (p : φ ∘ eX ~ eY ∘ π→g[n+2] f) :
f ∘* EMadd1_pequiv_succ n eX ~* EMadd1_pequiv_succ n eY ∘* EMadd1_functor φ (n+1) :=
@(EMadd1_pequiv_natural f (succ n) eX eY) _ _ _ _ φ p
definition EMadd1_pequiv_type_natural {X Y : Type*} (f : X →* Y) (n : ℕ)
[H1 : is_conn (n+1) X] [H2 : is_trunc (n+1+1) X] [H3 : is_conn (n+1) Y] [H4 : is_trunc (n+1+1) Y] :
f ∘* EMadd1_pequiv_type X n ~* EMadd1_pequiv_type Y n ∘* EMadd1_functor (π→g[n+2] f) (succ n) :=
EMadd1_pequiv_succ_natural f n !isomorphism.refl !isomorphism.refl (π→g[n+2] f)
proof λa, idp qed
/- The Eilenberg-MacLane space K(G,n) with the same homotopy group as X on level n.
On paper this is written K(πₙ(X), n). The problem is that for
* n = 0 the expression π₀(X) is a pointed set, and K(X,0) needs X to be a pointed set
* n = 1 the expression π₁(X) is a group, and K(G,1) needs G to be a group
* n ≥ 2 the expression πₙ(X) is an abelian, and K(G,n) needs X to be an abelian group
-/
definition EM_type (X : Type*) : ℕ → Type*
| 0 := ptrunc 0 X
| 1 := EM1 (π₁ X)
| (n+2) := EMadd1 (πag[n+2] X) (n+1)
definition EM_type_pequiv.{u} {X Y : pType.{u}} (n : ℕ) [Hn : is_succ n] (e : πg[n] Y ≃g πg[n] X)
[H1 : is_conn (n.-1) X] [H2 : is_trunc n X] : EM_type Y n ≃* X :=
begin
induction Hn with n, cases n with n,
{ have is_conn 0 X, from H1,
have is_trunc 1 X, from H2,
exact EM1_pequiv e },
{ have is_conn (n+1) X, from H1,
have is_trunc ((n+1).+1) X, from H2,
exact EMadd1_pequiv (n+1) e⁻¹ᵍ }
end
-- definition EM1_functor_equiv' (X Y : Type*) [H1 : is_conn 0 X] [H2 : is_trunc 1 X]
-- [H3 : is_conn 0 Y] [H4 : is_trunc 1 Y] : (X →* Y) ≃ (π₁ X →g π₁ Y) :=
-- begin
-- fapply equiv.MK,
-- { intro f, exact π→g[1] f },
-- { intro φ, exact EM1_pequiv_type Y ∘* EM1_functor φ ∘* (EM1_pequiv_type X)⁻¹ᵉ* },
-- { intro φ, apply homomorphism_eq,
-- refine homotopy_group_homomorphism_pcompose _ _ _ ⬝hty _,
-- refine hwhisker_left _ (homotopy_group_homomorphism_pcompose _ _ _) ⬝hty _,
-- refine (hassoc _ _ _)⁻¹ʰᵗʸ ⬝hty _, exact sorry },
-- { intro f, apply eq_of_phomotopy, refine !passoc⁻¹* ⬝* _, apply pinv_right_phomotopy_of_phomotopy,
-- exact sorry }
-- end
-- definition EMadd1_functor_equiv' (n : ℕ) (X Y : Type*) [H1 : is_conn (n+1) X] [H2 : is_trunc (n+1+1) X]
-- [H3 : is_conn (n+1) Y] [H4 : is_trunc (n+1+1) Y] : (X →* Y) ≃ (πag[n+2] X →g πag[n+2] Y) :=
-- begin
-- fapply equiv.MK,
-- { intro f, exact π→g[n+2] f },
-- { intro φ, exact EMadd1_pequiv_type Y n ∘* EMadd1_functor φ (n+1) ∘* (EMadd1_pequiv_type X n)⁻¹ᵉ* },
-- { intro φ, apply homomorphism_eq,
-- refine homotopy_group_homomorphism_pcompose _ _ _ ⬝hty _,
-- refine hwhisker_left _ (homotopy_group_homomorphism_pcompose _ _ _) ⬝hty _,
-- intro g, exact sorry },
-- { intro f, apply eq_of_phomotopy, refine !passoc⁻¹* ⬝* _, apply pinv_right_phomotopy_of_phomotopy,
-- exact !EMadd1_pequiv_type_natural⁻¹* }
-- end
-- definition EM_functor_equiv (n : ℕ) (G H : AbGroup) : (G →g H) ≃ (EMadd1 G (n+1) →* EMadd1 H (n+1)) :=
-- begin
-- fapply equiv.MK,
-- { intro φ, exact EMadd1_functor φ (n+1) },
-- { intro f, exact ghomotopy_group_EMadd1 H (n+1) ∘g π→g[n+2] f ∘g (ghomotopy_group_EMadd1 G (n+1))⁻¹ᵍ },
-- { intro f, apply homomorphism_eq, },
-- { }
-- end
-- definition EMadd1_pmap {G : AbGroup} {X : Type*} (n : ℕ)
-- (e : Ω[succ n] X ≃* G)
-- (r : Πp q, e (p ⬝ q) = e p * e q)
-- [H1 : is_conn n X] [H2 : is_trunc (n.+1) X] : EMadd1 G n →* X :=
-- begin
-- revert X e r H1 H2, induction n with n f: intro X e r H1 H2,
-- { exact EM1_pmap e⁻¹ᵉ* (equiv.inv_preserve_binary e concat mul r) },
-- rewrite [EMadd1_succ],
-- exact ptrunc.elim ((succ n).+1)
-- (susp.elim (f _ (EM_up e) (is_mul_hom_EM_up e r) _ _)),
-- end
-- definition is_set_pmap_ptruncconntype {n : ℕ₋₂} (X Y : (n.+1)-Type*[n]) : is_set (X →* Y) :=
-- begin
-- apply is_trunc_succ_intro,
-- intro f g,
-- apply @(is_trunc_equiv_closed_rev -1 (pmap_eq_equiv f g)),
-- apply is_prop.mk,
-- exact sorry
-- end
end
section category
/- category -/
structure ptruncconntype' (n : ℕ₋₂) : Type :=
(A : Type*)
(H1 : is_conn n A)
(H2 : is_trunc (n+1) A)
attribute ptruncconntype'.A [coercion]
attribute ptruncconntype'.H1 ptruncconntype'.H2 [instance]
definition EM1_pequiv_ptruncconntype' (X : ptruncconntype' 0) : EM1 (πg[1] X) ≃* X :=
@(EM1_pequiv_type X) _ (ptruncconntype'.H2 X)
definition EMadd1_pequiv_ptruncconntype' {n : ℕ} (X : ptruncconntype' (n+1)) :
EMadd1 (πag[n+2] X) (succ n) ≃* X :=
@(EMadd1_pequiv_type X n) _ (ptruncconntype'.H2 X)
open trunc_index
definition is_set_pmap_ptruncconntype {n : ℕ₋₂} (X Y : ptruncconntype' n) : is_set (X →* Y) :=
begin
cases n with n, { exact _ },
cases Y with Y H1 H2, cases Y with Y y₀,
exact is_trunc_pmap_of_is_conn X n -1 _ (pointed.MK Y y₀) !le.refl H2,
end
open category functor nat_trans
definition precategory_ptruncconntype'.{u} [constructor] (n : ℕ₋₂) :
precategory.{u+1 u} (ptruncconntype' n) :=
begin
fapply precategory.mk,
{ exact λX Y, X →* Y },
{ exact is_set_pmap_ptruncconntype },
{ exact λX Y Z g f, g ∘* f },
{ exact λX, pid X },
{ intros, apply eq_of_phomotopy, exact !passoc⁻¹* },
{ intros, apply eq_of_phomotopy, apply pid_pcompose },
{ intros, apply eq_of_phomotopy, apply pcompose_pid }
end
definition cptruncconntype' [constructor] (n : ℕ₋₂) : Precategory :=
precategory.Mk (precategory_ptruncconntype' n)
notation `cType*[`:95 n `]`:0 := cptruncconntype' n
definition tEM1 [constructor] (G : Group) : ptruncconntype' 0 :=
ptruncconntype'.mk (EM1 G) _ !is_trunc_EM1
definition tEM [constructor] (G : AbGroup) (n : ℕ) : ptruncconntype' (n.-1) :=
ptruncconntype'.mk (EM G n) _ !is_trunc_EM
definition EM1_cfunctor : Grp ⇒ cType*[0] :=
functor.mk
(λG, tEM1 G)
(λG H φ, EM1_functor φ)
begin intro, fapply eq_of_phomotopy, apply EM1_functor_gid end
begin intros, fapply eq_of_phomotopy, apply EM1_functor_gcompose end
definition EM_cfunctor (n : ℕ) : AbGrp ⇒ cType*[n.-1] :=
functor.mk
(λG, tEM G n)
(λG H φ, EM_functor φ n)
begin intro, fapply eq_of_phomotopy, apply EM_functor_gid end
begin intros, fapply eq_of_phomotopy, apply EM_functor_gcompose end
definition homotopy_group_cfunctor : cType*[0] ⇒ Grp :=
functor.mk
(λX, πg[1] X)
(λX Y f, π→g[1] f)
begin intro, apply homomorphism_eq, exact to_homotopy !homotopy_group_functor_pid end
begin intros, apply homomorphism_eq, exact to_homotopy !homotopy_group_functor_compose end
definition ab_homotopy_group_cfunctor (n : ℕ) : cType*[n+2.-1] ⇒ AbGrp :=
functor.mk
(λX, πag[n+2] X)
(λX Y f, π→g[n+2] f)
begin intro, apply homomorphism_eq, exact to_homotopy !homotopy_group_functor_pid end
begin intros, apply homomorphism_eq, exact to_homotopy !homotopy_group_functor_compose end
definition is_equivalence_EM1_cfunctor.{u} : is_equivalence EM1_cfunctor.{u} :=
begin
fapply is_equivalence.mk,
{ exact homotopy_group_cfunctor.{u} },
{ fapply natural_iso.mk,
{ fapply nat_trans.mk,
{ intro G, exact (fundamental_group_EM1' G)⁻¹ᵍ },
{ intro G H φ, apply homomorphism_eq, exact hhinverse (homotopy_group_functor_EM1_functor φ) }},
{ intro G, fapply iso.is_iso.mk,
{ exact fundamental_group_EM1' G },
{ apply homomorphism_eq,
exact to_right_inv (equiv_of_isomorphism (fundamental_group_EM1' G)), },
{ apply homomorphism_eq,
exact to_left_inv (equiv_of_isomorphism (fundamental_group_EM1' G)), }}},
{ fapply natural_iso.mk,
{ fapply nat_trans.mk,
{ intro X, exact EM1_pequiv_ptruncconntype' X },
{ intro X Y f, apply eq_of_phomotopy, apply EM1_pequiv_type_natural }},
{ intro X, fapply iso.is_iso.mk,
{ exact (EM1_pequiv_ptruncconntype' X)⁻¹ᵉ* },
{ apply eq_of_phomotopy, apply pleft_inv },
{ apply eq_of_phomotopy, apply pright_inv }}}
end
definition is_equivalence_EM_cfunctor (n : ℕ) : is_equivalence (EM_cfunctor (n+2)) :=
begin
fapply is_equivalence.mk,
{ exact ab_homotopy_group_cfunctor n },
{ fapply natural_iso.mk,
{ fapply nat_trans.mk,
{ intro G, exact (ghomotopy_group_EMadd1' G (n+1))⁻¹ᵍ },
{ intro G H φ, apply homomorphism_eq, exact homotopy_group_functor_EMadd1_functor' φ (n+1) }},
{ intro G, fapply iso.is_iso.mk,
{ exact ghomotopy_group_EMadd1' G (n+1) },
{ apply homomorphism_eq,
exact to_right_inv (equiv_of_isomorphism (ghomotopy_group_EMadd1' G (n+1))), },
{ apply homomorphism_eq,
exact to_left_inv (equiv_of_isomorphism (ghomotopy_group_EMadd1' G (n+1))), }}},
{ fapply natural_iso.mk,
{ fapply nat_trans.mk,
{ intro X, exact EMadd1_pequiv_ptruncconntype' X },
{ intro X Y f, apply eq_of_phomotopy, apply EMadd1_pequiv_type_natural }},
{ intro X, fapply iso.is_iso.mk,
{ exact (EMadd1_pequiv_ptruncconntype' X)⁻¹ᵉ* },
{ apply eq_of_phomotopy, apply pleft_inv },
{ apply eq_of_phomotopy, apply pright_inv }}}
end
definition Grp_equivalence_cptruncconntype'.{u} [constructor] : Grp.{u} ≃c cType*[0] :=
equivalence.mk EM1_cfunctor.{u} is_equivalence_EM1_cfunctor.{u}
definition AbGrp_equivalence_cptruncconntype' [constructor] (n : ℕ) : AbGrp ≃c cType*[n+2.-1] :=
equivalence.mk (EM_cfunctor (n+2)) (is_equivalence_EM_cfunctor n)
end category
definition pequiv_EMadd1_of_loopn_pequiv_EM1 {G : AbGroup} {X : Type*} (n : ℕ) (e : Ω[n] X ≃* EM1 G)
[H1 : is_conn n X] : X ≃* EMadd1 G n :=
begin
symmetry, apply EMadd1_pequiv,
refine isomorphism_of_eq (ap (λx, πg[x+1] X) !zero_add⁻¹) ⬝g homotopy_group_add X 0 n ⬝g _ ⬝g
!fundamental_group_EM1,
exact homotopy_group_isomorphism_of_pequiv 0 e,
refine is_trunc_of_is_trunc_loopn n 1 X _ _,
apply is_trunc_equiv_closed_rev 1 e
end
definition EM1_pequiv_EM1 {G H : Group} (φ : G ≃g H) : EM1 G ≃* EM1 H :=
pequiv.MK (EM1_functor φ) (EM1_functor φ⁻¹ᵍ)
abstract (EM1_functor_gcompose φ⁻¹ᵍ φ)⁻¹* ⬝* EM1_functor_phomotopy proof left_inv φ qed ⬝*
EM1_functor_gid G end
abstract (EM1_functor_gcompose φ φ⁻¹ᵍ)⁻¹* ⬝* EM1_functor_phomotopy proof right_inv φ qed ⬝*
EM1_functor_gid H end
definition is_contr_EM1 {G : Group} (H : is_contr G) : is_contr (EM1 G) :=
is_contr_of_is_conn_of_is_trunc
(is_trunc_succ_succ_of_is_trunc_loop _ _ (is_trunc_equiv_closed _ !loop_EM1) _) !is_conn_EM1
definition is_contr_EMadd1 (n : ℕ) {G : AbGroup} (H : is_contr G) : is_contr (EMadd1 G n) :=
begin
induction n with n IH,
{ exact is_contr_EM1 H },
{ have is_contr (ptrunc (n+2) (susp (EMadd1 G n))), from _,
exact this }
end
definition is_contr_EM (n : ℕ) {G : AbGroup} (H : is_contr G) : is_contr (K G n) :=
begin
cases n with n,
{ exact H },
{ exact is_contr_EMadd1 n H }
end
definition EMadd1_pequiv_EMadd1 (n : ℕ) {G H : AbGroup} (φ : G ≃g H) : EMadd1 G n ≃* EMadd1 H n :=
pequiv.MK (EMadd1_functor φ n) (EMadd1_functor φ⁻¹ᵍ n)
abstract (EMadd1_functor_gcompose φ⁻¹ᵍ φ n)⁻¹* ⬝* EMadd1_functor_phomotopy proof left_inv φ qed n ⬝*
EMadd1_functor_gid G n end
abstract (EMadd1_functor_gcompose φ φ⁻¹ᵍ n)⁻¹* ⬝* EMadd1_functor_phomotopy proof right_inv φ qed n ⬝*
EMadd1_functor_gid H n end
definition EM_pequiv_EM (n : ℕ) {G H : AbGroup} (φ : G ≃g H) : K G n ≃* K H n :=
begin
cases n with n,
{ exact pequiv_of_isomorphism φ },
{ exact EMadd1_pequiv_EMadd1 n φ }
end
definition ppi_EMadd1 {X : Type*} (Y : X → Type*) (n : ℕ) :
(Π*(x : X), EMadd1 (πag[n+2] (Y x)) (n+1)) ≃* EMadd1 (πag[n+2] (Π*(x : X), Y x)) (n+1) :=
begin
exact sorry
end
--EM_spectrum (πₛ[s] (spi X Y)) k ≃* spi X (λx, EM_spectrum (πₛ[s] (Y x))) k
/- fiber of EM_functor -/
open fiber
definition is_trunc_fiber_EM1_functor {G H : Group} (φ : G →g H) :
is_trunc 1 (pfiber (EM1_functor φ)) :=
!is_trunc_fiber
definition is_conn_fiber_EM1_functor {G H : Group} (φ : G →g H) :
is_conn -1 (pfiber (EM1_functor φ)) :=
begin
apply is_conn_fiber
end
definition is_trunc_fiber_EMadd1_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
is_trunc (n+1) (pfiber (EMadd1_functor φ n)) :=
begin
apply is_trunc_fiber
end
definition is_conn_fiber_EMadd1_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
is_conn (n.-1) (pfiber (EMadd1_functor φ n)) :=
begin
apply is_conn_fiber, apply is_conn_of_is_conn_succ, apply is_conn_EMadd1,
apply is_conn_EMadd1
end
definition is_trunc_fiber_EM_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
is_trunc n (pfiber (EM_functor φ n)) :=
begin
apply is_trunc_fiber
end
definition is_conn_fiber_EM_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
is_conn (n.-2) (pfiber (EM_functor φ n)) :=
begin
apply is_conn_fiber, apply is_conn_of_is_conn_succ
end
section
open chain_complex prod fin
/- TODO: other cases -/
definition LES_isomorphism_kernel_of_trivial.{u}
{X Y : pType.{u}} (f : X →* Y) (n : ℕ) [H : is_succ n]
(H1 : is_contr (πg[n+1] Y)) : πg[n] (pfiber f) ≃g Kernel (π→g[n] f) :=
begin
induction H with n,
have H2 : is_exact (π→g[n+1] (ppoint f)) (π→g[n+1] f),
from is_exact_of_is_exact_at (is_exact_LES_of_homotopy_groups f (n+1, 0)),
have H3 : is_exact (π→g[n+1] (boundary_map f) ∘g ghomotopy_group_succ_in Y n)
(π→g[n+1] (ppoint f)),
from is_exact_of_is_exact_at (is_exact_LES_of_homotopy_groups f (n+1, 1)),
exact isomorphism_kernel_of_is_exact H3 H2 H1
end
end
open group algebra is_trunc
definition homotopy_group_fiber_EM1_functor.{u} {G H : Group.{u}} (φ : G →g H) :
π₁ (pfiber (EM1_functor φ)) ≃g Kernel φ :=
have H1 : is_trunc 1 (EM1 H), from sorry,
have H2 : 1 <[ℕ] 1 + 1, from sorry,
LES_isomorphism_kernel_of_trivial (EM1_functor φ) 1
(@trivial_homotopy_group_of_is_trunc _ 1 2 H1 H2) ⬝g
sorry
definition homotopy_group_fiber_EMadd1_functor {G H : AbGroup} (φ : G →g H) (n : ℕ) :
πg[n+1] (pfiber (EMadd1_functor φ n)) ≃g Kernel φ :=
sorry
/- TODO: move-/
definition cokernel {G H : AbGroup} (φ : G →g H) : AbGroup :=
quotient_ab_group (image φ)
definition trunc_fiber_EM1_functor {G H : Group} (φ : G →g H) :
ptrunc 0 (pfiber (EM1_functor φ)) ≃* sorry :=
sorry
end EM
|
011b2195bfa50bc7732370f16d66e7b10edb6d62
|
302c785c90d40ad3d6be43d33bc6a558354cc2cf
|
/src/analysis/normed_space/units.lean
|
46f599066435aad9c364a6e6a6e75c7a349b43af
|
[
"Apache-2.0"
] |
permissive
|
ilitzroth/mathlib
|
ea647e67f1fdfd19a0f7bdc5504e8acec6180011
|
5254ef14e3465f6504306132fe3ba9cec9ffff16
|
refs/heads/master
| 1,680,086,661,182
| 1,617,715,647,000
| 1,617,715,647,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 11,044
|
lean
|
/-
Copyright (c) 2020 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import analysis.specific_limits
import analysis.asymptotics.asymptotics
/-!
# The group of units of a complete normed ring
This file contains the basic theory for the group of units (invertible elements) of a complete
normed ring (Banach algebras being a notable special case).
## Main results
The constructions `one_sub`, `add` and `unit_of_nearby` state, in varying forms, that perturbations
of a unit are units. The latter two are not stated in their optimal form; more precise versions
would use the spectral radius.
The first main result is `is_open`: the group of units of a complete normed ring is an open subset
of the ring.
The function `inverse` (defined in `algebra.ring`), for a ring `R`, sends `a : R` to `a⁻¹` if `a` is
a unit and 0 if not. The other major results of this file (notably `inverse_add`,
`inverse_add_norm` and `inverse_add_norm_diff_nth_order`) cover the asymptotic properties of
`inverse (x + t)` as `t → 0`.
-/
noncomputable theory
open_locale topological_space
variables {R : Type*} [normed_ring R] [complete_space R]
namespace units
/-- In a complete normed ring, a perturbation of `1` by an element `t` of distance less than `1`
from `1` is a unit. Here we construct its `units` structure. -/
def one_sub (t : R) (h : ∥t∥ < 1) : units R :=
{ val := 1 - t,
inv := ∑' n : ℕ, t ^ n,
val_inv := mul_neg_geom_series t h,
inv_val := geom_series_mul_neg t h }
@[simp] lemma one_sub_coe (t : R) (h : ∥t∥ < 1) : ↑(one_sub t h) = 1 - t := rfl
/-- In a complete normed ring, a perturbation of a unit `x` by an element `t` of distance less than
`∥x⁻¹∥⁻¹` from `x` is a unit. Here we construct its `units` structure. -/
def add (x : units R) (t : R) (h : ∥t∥ < ∥(↑x⁻¹ : R)∥⁻¹) : units R :=
x * (units.one_sub (-(↑x⁻¹ * t))
begin
nontriviality R using [zero_lt_one],
have hpos : 0 < ∥(↑x⁻¹ : R)∥ := units.norm_pos x⁻¹,
calc ∥-(↑x⁻¹ * t)∥
= ∥↑x⁻¹ * t∥ : by { rw norm_neg }
... ≤ ∥(↑x⁻¹ : R)∥ * ∥t∥ : norm_mul_le ↑x⁻¹ _
... < ∥(↑x⁻¹ : R)∥ * ∥(↑x⁻¹ : R)∥⁻¹ : by nlinarith only [h, hpos]
... = 1 : mul_inv_cancel (ne_of_gt hpos)
end)
@[simp] lemma add_coe (x : units R) (t : R) (h : ∥t∥ < ∥(↑x⁻¹ : R)∥⁻¹) :
((x.add t h) : R) = x + t := by { unfold units.add, simp [mul_add] }
/-- In a complete normed ring, an element `y` of distance less than `∥x⁻¹∥⁻¹` from `x` is a unit.
Here we construct its `units` structure. -/
def unit_of_nearby (x : units R) (y : R) (h : ∥y - x∥ < ∥(↑x⁻¹ : R)∥⁻¹) : units R :=
x.add ((y : R) - x) h
@[simp] lemma unit_of_nearby_coe (x : units R) (y : R) (h : ∥y - x∥ < ∥(↑x⁻¹ : R)∥⁻¹) :
↑(x.unit_of_nearby y h) = y := by { unfold units.unit_of_nearby, simp }
/-- The group of units of a complete normed ring is an open subset of the ring. -/
protected lemma is_open : is_open {x : R | is_unit x} :=
begin
nontriviality R,
apply metric.is_open_iff.mpr,
rintros x' ⟨x, rfl⟩,
refine ⟨∥(↑x⁻¹ : R)∥⁻¹, inv_pos.mpr (units.norm_pos x⁻¹), _⟩,
intros y hy,
rw [metric.mem_ball, dist_eq_norm] at hy,
exact ⟨x.unit_of_nearby y hy, unit_of_nearby_coe _ _ _⟩
end
protected lemma nhds (x : units R) : {x : R | is_unit x} ∈ 𝓝 (x : R) :=
mem_nhds_sets units.is_open x.is_unit
end units
namespace normed_ring
open_locale classical big_operators
open asymptotics filter metric finset ring
lemma inverse_one_sub (t : R) (h : ∥t∥ < 1) : inverse (1 - t) = ↑(units.one_sub t h)⁻¹ :=
by rw [← inverse_unit (units.one_sub t h), units.one_sub_coe]
/-- The formula `inverse (x + t) = inverse (1 + x⁻¹ * t) * x⁻¹` holds for `t` sufficiently small. -/
lemma inverse_add (x : units R) :
∀ᶠ t in (𝓝 0), inverse ((x : R) + t) = inverse (1 + ↑x⁻¹ * t) * ↑x⁻¹ :=
begin
nontriviality R,
rw [eventually_iff, mem_nhds_iff],
have hinv : 0 < ∥(↑x⁻¹ : R)∥⁻¹, by cancel_denoms,
use [∥(↑x⁻¹ : R)∥⁻¹, hinv],
intros t ht,
simp only [mem_ball, dist_zero_right] at ht,
have ht' : ∥-↑x⁻¹ * t∥ < 1,
{ refine lt_of_le_of_lt (norm_mul_le _ _) _,
rw norm_neg,
refine lt_of_lt_of_le (mul_lt_mul_of_pos_left ht x⁻¹.norm_pos) _,
cancel_denoms },
have hright := inverse_one_sub (-↑x⁻¹ * t) ht',
have hleft := inverse_unit (x.add t ht),
simp only [← neg_mul_eq_neg_mul, sub_neg_eq_add] at hright,
simp only [units.add_coe] at hleft,
simp [hleft, hright, units.add]
end
lemma inverse_one_sub_nth_order (n : ℕ) :
∀ᶠ t in (𝓝 0), inverse ((1:R) - t) = (∑ i in range n, t ^ i) + (t ^ n) * inverse (1 - t) :=
begin
simp only [eventually_iff, mem_nhds_iff],
use [1, by norm_num],
intros t ht,
simp only [mem_ball, dist_zero_right] at ht,
simp only [inverse_one_sub t ht, set.mem_set_of_eq],
have h : 1 = ((range n).sum (λ i, t ^ i)) * (units.one_sub t ht) + t ^ n,
{ simp only [units.one_sub_coe],
rw [← geom_sum, geom_sum_mul_neg],
simp },
rw [← one_mul ↑(units.one_sub t ht)⁻¹, h, add_mul],
congr,
{ rw [mul_assoc, (units.one_sub t ht).mul_inv],
simp },
{ simp only [units.one_sub_coe],
rw [← add_mul, ← geom_sum, geom_sum_mul_neg],
simp }
end
/-- The formula
`inverse (x + t) = (∑ i in range n, (- x⁻¹ * t) ^ i) * x⁻¹ + (- x⁻¹ * t) ^ n * inverse (x + t)`
holds for `t` sufficiently small. -/
lemma inverse_add_nth_order (x : units R) (n : ℕ) :
∀ᶠ t in (𝓝 0), inverse ((x : R) + t)
= (∑ i in range n, (- ↑x⁻¹ * t) ^ i) * ↑x⁻¹ + (- ↑x⁻¹ * t) ^ n * inverse (x + t) :=
begin
refine (inverse_add x).mp _,
have hzero : tendsto (λ (t : R), - ↑x⁻¹ * t) (𝓝 0) (𝓝 0),
{ convert ((mul_left_continuous (- (↑x⁻¹ : R))).tendsto 0).comp tendsto_id,
simp },
refine (hzero.eventually (inverse_one_sub_nth_order n)).mp (eventually_of_forall _),
simp only [neg_mul_eq_neg_mul_symm, sub_neg_eq_add],
intros t h1 h2,
have h := congr_arg (λ (a : R), a * ↑x⁻¹) h1,
dsimp at h,
convert h,
rw [add_mul, mul_assoc],
simp [h2.symm]
end
lemma inverse_one_sub_norm : is_O (λ t, inverse ((1:R) - t)) (λ t, (1:ℝ)) (𝓝 (0:R)) :=
begin
simp only [is_O, is_O_with, eventually_iff, mem_nhds_iff],
refine ⟨∥(1:R)∥ + 1, (2:ℝ)⁻¹, by norm_num, _⟩,
intros t ht,
simp only [ball, dist_zero_right, set.mem_set_of_eq] at ht,
have ht' : ∥t∥ < 1,
{ have : (2:ℝ)⁻¹ < 1 := by cancel_denoms,
linarith },
simp only [inverse_one_sub t ht', norm_one, mul_one, set.mem_set_of_eq],
change ∥∑' n : ℕ, t ^ n∥ ≤ _,
have := normed_ring.tsum_geometric_of_norm_lt_1 t ht',
have : (1 - ∥t∥)⁻¹ ≤ 2,
{ rw ← inv_inv' (2:ℝ),
refine inv_le_inv_of_le (by norm_num) _,
have : (2:ℝ)⁻¹ + (2:ℝ)⁻¹ = 1 := by ring,
linarith },
linarith
end
/-- The function `λ t, inverse (x + t)` is O(1) as `t → 0`. -/
lemma inverse_add_norm (x : units R) : is_O (λ t, inverse (↑x + t)) (λ t, (1:ℝ)) (𝓝 (0:R)) :=
begin
nontriviality R,
simp only [is_O_iff, norm_one, mul_one],
cases is_O_iff.mp (@inverse_one_sub_norm R _ _) with C hC,
use C * ∥((x⁻¹:units R):R)∥,
have hzero : tendsto (λ t, - (↑x⁻¹ : R) * t) (𝓝 0) (𝓝 0),
{ convert ((mul_left_continuous (-↑x⁻¹ : R)).tendsto 0).comp tendsto_id,
simp },
refine (inverse_add x).mp ((hzero.eventually hC).mp (eventually_of_forall _)),
intros t bound iden,
rw iden,
simp at bound,
have hmul := norm_mul_le (inverse (1 + ↑x⁻¹ * t)) ↑x⁻¹,
nlinarith [norm_nonneg (↑x⁻¹ : R)]
end
/-- The function
`λ t, inverse (x + t) - (∑ i in range n, (- x⁻¹ * t) ^ i) * x⁻¹`
is `O(t ^ n)` as `t → 0`. -/
lemma inverse_add_norm_diff_nth_order (x : units R) (n : ℕ) :
is_O (λ (t : R), inverse (↑x + t) - (∑ i in range n, (- ↑x⁻¹ * t) ^ i) * ↑x⁻¹)
(λ t, ∥t∥ ^ n) (𝓝 (0:R)) :=
begin
by_cases h : n = 0,
{ simpa [h] using inverse_add_norm x },
have hn : 0 < n := nat.pos_of_ne_zero h,
simp [is_O_iff],
cases (is_O_iff.mp (inverse_add_norm x)) with C hC,
use C * ∥(1:ℝ)∥ * ∥(↑x⁻¹ : R)∥ ^ n,
have h : eventually_eq (𝓝 (0:R))
(λ t, inverse (↑x + t) - (∑ i in range n, (- ↑x⁻¹ * t) ^ i) * ↑x⁻¹)
(λ t, ((- ↑x⁻¹ * t) ^ n) * inverse (x + t)),
{ refine (inverse_add_nth_order x n).mp (eventually_of_forall _),
intros t ht,
convert congr_arg (λ a, a - (range n).sum (pow (-↑x⁻¹ * t)) * ↑x⁻¹) ht,
simp },
refine h.mp (hC.mp (eventually_of_forall _)),
intros t _ hLHS,
simp only [neg_mul_eq_neg_mul_symm] at hLHS,
rw hLHS,
refine le_trans (norm_mul_le _ _ ) _,
have h' : ∥(-(↑x⁻¹ * t)) ^ n∥ ≤ ∥(↑x⁻¹ : R)∥ ^ n * ∥t∥ ^ n,
{ calc ∥(-(↑x⁻¹ * t)) ^ n∥ ≤ ∥(-(↑x⁻¹ * t))∥ ^ n : norm_pow_le' _ hn
... = ∥↑x⁻¹ * t∥ ^ n : by rw norm_neg
... ≤ (∥(↑x⁻¹ : R)∥ * ∥t∥) ^ n : _
... = ∥(↑x⁻¹ : R)∥ ^ n * ∥t∥ ^ n : mul_pow _ _ n,
exact pow_le_pow_of_le_left (norm_nonneg _) (norm_mul_le ↑x⁻¹ t) n },
have h'' : 0 ≤ ∥(↑x⁻¹ : R)∥ ^ n * ∥t∥ ^ n,
{ refine mul_nonneg _ _;
exact pow_nonneg (norm_nonneg _) n },
nlinarith [norm_nonneg (inverse (↑x + t))],
end
/-- The function `λ t, inverse (x + t) - x⁻¹` is `O(t)` as `t → 0`. -/
lemma inverse_add_norm_diff_first_order (x : units R) :
is_O (λ t, inverse (↑x + t) - ↑x⁻¹) (λ t, ∥t∥) (𝓝 (0:R)) :=
by { convert inverse_add_norm_diff_nth_order x 1; simp }
/-- The function
`λ t, inverse (x + t) - x⁻¹ + x⁻¹ * t * x⁻¹`
is `O(t ^ 2)` as `t → 0`. -/
lemma inverse_add_norm_diff_second_order (x : units R) :
is_O (λ t, inverse (↑x + t) - ↑x⁻¹ + ↑x⁻¹ * t * ↑x⁻¹) (λ t, ∥t∥ ^ 2) (𝓝 (0:R)) :=
begin
convert inverse_add_norm_diff_nth_order x 2,
ext t,
simp only [range_succ, range_one, sum_insert, mem_singleton, sum_singleton, not_false_iff,
one_ne_zero, pow_zero, add_mul],
abel,
simp
end
/-- The function `inverse` is continuous at each unit of `R`. -/
lemma inverse_continuous_at (x : units R) : continuous_at inverse (x : R) :=
begin
have h_is_o : is_o (λ (t : R), ∥inverse (↑x + t) - ↑x⁻¹∥) (λ (t : R), (1:ℝ)) (𝓝 0),
{ refine is_o_norm_left.mpr ((inverse_add_norm_diff_first_order x).trans_is_o _),
exact is_o_norm_left.mpr (is_o_id_const one_ne_zero) },
have h_lim : tendsto (λ (y:R), y - x) (𝓝 x) (𝓝 0),
{ refine tendsto_zero_iff_norm_tendsto_zero.mpr _,
exact tendsto_iff_norm_tendsto_zero.mp tendsto_id },
simp only [continuous_at],
rw [tendsto_iff_norm_tendsto_zero, inverse_unit],
convert h_is_o.tendsto_0.comp h_lim,
ext, simp
end
end normed_ring
|
d5ad95680841b092ecb26acfc21c0150435387df
|
a45212b1526d532e6e83c44ddca6a05795113ddc
|
/src/category_theory/limits/limits.lean
|
3cd53489a99dd70488720145232d315b056b2b04
|
[
"Apache-2.0"
] |
permissive
|
fpvandoorn/mathlib
|
b21ab4068db079cbb8590b58fda9cc4bc1f35df4
|
b3433a51ea8bc07c4159c1073838fc0ee9b8f227
|
refs/heads/master
| 1,624,791,089,608
| 1,556,715,231,000
| 1,556,715,231,000
| 165,722,980
| 5
| 0
|
Apache-2.0
| 1,552,657,455,000
| 1,547,494,646,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 27,902
|
lean
|
-- Copyright (c) 2018 Scott Morrison. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
-- Authors: Reid Barton, Mario Carneiro, Scott Morrison
import category_theory.whiskering
import category_theory.yoneda
import category_theory.limits.cones
open category_theory category_theory.category category_theory.functor
namespace category_theory.limits
universes v u u' u'' w -- declare the `v`'s first; see `category_theory.category` for an explanation
-- See the notes at the top of cones.lean, explaining why we can't allow `J : Prop` here.
variables {J : Type v} [small_category J]
variables {C : Sort u} [𝒞 : category.{v+1} C]
include 𝒞
variables {F : J ⥤ C}
/-- A cone `t` on `F` is a limit cone if each cone on `F` admits a unique
cone morphism to `t`. -/
structure is_limit (t : cone F) :=
(lift : Π (s : cone F), s.X ⟶ t.X)
(fac' : ∀ (s : cone F) (j : J), lift s ≫ t.π.app j = s.π.app j . obviously)
(uniq' : ∀ (s : cone F) (m : s.X ⟶ t.X) (w : ∀ j : J, m ≫ t.π.app j = s.π.app j),
m = lift s . obviously)
restate_axiom is_limit.fac'
attribute [simp] is_limit.fac
restate_axiom is_limit.uniq'
namespace is_limit
instance subsingleton {t : cone F} : subsingleton (is_limit t) :=
⟨by intros P Q; cases P; cases Q; congr; ext; solve_by_elim⟩
/- Repackaging the definition in terms of cone morphisms. -/
def lift_cone_morphism {t : cone F} (h : is_limit t) (s : cone F) : s ⟶ t :=
{ hom := h.lift s }
lemma uniq_cone_morphism {s t : cone F} (h : is_limit t) {f f' : s ⟶ t} :
f = f' :=
have ∀ {g : s ⟶ t}, g = h.lift_cone_morphism s, by intro g; ext; exact h.uniq _ _ g.w,
this.trans this.symm
def mk_cone_morphism {t : cone F}
(lift : Π (s : cone F), s ⟶ t)
(uniq' : ∀ (s : cone F) (m : s ⟶ t), m = lift s) : is_limit t :=
{ lift := λ s, (lift s).hom,
uniq' := λ s m w,
have cone_morphism.mk m w = lift s, by apply uniq',
congr_arg cone_morphism.hom this }
/-- Limit cones on `F` are unique up to isomorphism. -/
def unique {s t : cone F} (P : is_limit s) (Q : is_limit t) : s ≅ t :=
{ hom := Q.lift_cone_morphism s,
inv := P.lift_cone_morphism t,
hom_inv_id' := P.uniq_cone_morphism,
inv_hom_id' := Q.uniq_cone_morphism }
def of_iso_limit {r t : cone F} (P : is_limit r) (i : r ≅ t) : is_limit t :=
is_limit.mk_cone_morphism
(λ s, P.lift_cone_morphism s ≫ i.hom)
(λ s m, by rw ←i.comp_inv_eq; apply P.uniq_cone_morphism)
variables {t : cone F}
lemma hom_lift (h : is_limit t) {W : C} (m : W ⟶ t.X) :
m = h.lift { X := W, π := { app := λ b, m ≫ t.π.app b } } :=
h.uniq { X := W, π := { app := λ b, m ≫ t.π.app b } } m (λ b, rfl)
/-- Two morphisms into a limit are equal if their compositions with
each cone morphism are equal. -/
lemma hom_ext (h : is_limit t) {W : C} {f f' : W ⟶ t.X}
(w : ∀ j, f ≫ t.π.app j = f' ≫ t.π.app j) : f = f' :=
by rw [h.hom_lift f, h.hom_lift f']; congr; exact funext w
/-- The universal property of a limit cone: a map `W ⟶ X` is the same as
a cone on `F` with vertex `W`. -/
def hom_iso (h : is_limit t) (W : C) : (W ⟶ t.X) ≅ ((const J).obj W ⟶ F) :=
{ hom := λ f, (t.extend f).π,
inv := λ π, h.lift { X := W, π := π },
hom_inv_id' := by ext f; apply h.hom_ext; intro j; simp; dsimp; refl }
@[simp] lemma hom_iso_hom (h : is_limit t) {W : C} (f : W ⟶ t.X) :
(is_limit.hom_iso h W).hom f = (t.extend f).π := rfl
/-- The limit of `F` represents the functor taking `W` to
the set of cones on `F` with vertex `W`. -/
def nat_iso (h : is_limit t) : yoneda.obj t.X ≅ F.cones :=
nat_iso.of_components (λ W, is_limit.hom_iso h (unop W)) (by tidy)
def hom_iso' (h : is_limit t) (W : C) :
((W ⟶ t.X) : Type v) ≅ { p : Π j, W ⟶ F.obj j // ∀ {j j'} (f : j ⟶ j'), p j ≫ F.map f = p j' } :=
h.hom_iso W ≪≫
{ hom := λ π,
⟨λ j, π.app j, λ j j' f,
by convert ←(π.naturality f).symm; apply id_comp⟩,
inv := λ p,
{ app := λ j, p.1 j,
naturality' := λ j j' f, begin dsimp, rw [id_comp], exact (p.2 f).symm end } }
/-- If G : C → D is a faithful functor which sends t to a limit cone,
then it suffices to check that the induced maps for the image of t
can be lifted to maps of C. -/
def of_faithful {t : cone F} {D : Sort u'} [category.{v+1} D] (G : C ⥤ D) [faithful G]
(ht : is_limit (G.map_cone t)) (lift : Π (s : cone F), s.X ⟶ t.X)
(h : ∀ s, G.map (lift s) = ht.lift (G.map_cone s)) : is_limit t :=
{ lift := lift,
fac' := λ s j, by apply G.injectivity; rw [G.map_comp, h]; apply ht.fac,
uniq' := λ s m w, begin
apply G.injectivity, rw h,
refine ht.uniq (G.map_cone s) _ (λ j, _),
convert ←congr_arg (λ f, G.map f) (w j),
apply G.map_comp
end }
end is_limit
def is_limit_iso_unique_cone_morphism {t : cone F} :
is_limit t ≅ Π s, unique (s ⟶ t) :=
{ hom := λ h s,
{ default := h.lift_cone_morphism s,
uniq := λ _, h.uniq_cone_morphism },
inv := λ h,
{ lift := λ s, (h s).default.hom,
uniq' := λ s f w, congr_arg cone_morphism.hom ((h s).uniq ⟨f, w⟩) } }
/-- A cocone `t` on `F` is a colimit cocone if each cocone on `F` admits a unique
cocone morphism from `t`. -/
structure is_colimit (t : cocone F) :=
(desc : Π (s : cocone F), t.X ⟶ s.X)
(fac' : ∀ (s : cocone F) (j : J), t.ι.app j ≫ desc s = s.ι.app j . obviously)
(uniq' : ∀ (s : cocone F) (m : t.X ⟶ s.X) (w : ∀ j : J, t.ι.app j ≫ m = s.ι.app j),
m = desc s . obviously)
restate_axiom is_colimit.fac'
attribute [simp] is_colimit.fac
restate_axiom is_colimit.uniq'
namespace is_colimit
instance subsingleton {t : cocone F} : subsingleton (is_colimit t) :=
⟨by intros P Q; cases P; cases Q; congr; ext; solve_by_elim⟩
/- Repackaging the definition in terms of cone morphisms. -/
def desc_cocone_morphism {t : cocone F} (h : is_colimit t) (s : cocone F) : t ⟶ s :=
{ hom := h.desc s }
lemma uniq_cocone_morphism {s t : cocone F} (h : is_colimit t) {f f' : t ⟶ s} :
f = f' :=
have ∀ {g : t ⟶ s}, g = h.desc_cocone_morphism s, by intro g; ext; exact h.uniq _ _ g.w,
this.trans this.symm
def mk_cocone_morphism {t : cocone F}
(desc : Π (s : cocone F), t ⟶ s)
(uniq' : ∀ (s : cocone F) (m : t ⟶ s), m = desc s) : is_colimit t :=
{ desc := λ s, (desc s).hom,
uniq' := λ s m w,
have cocone_morphism.mk m w = desc s, by apply uniq',
congr_arg cocone_morphism.hom this }
/-- Limit cones on `F` are unique up to isomorphism. -/
def unique {s t : cocone F} (P : is_colimit s) (Q : is_colimit t) : s ≅ t :=
{ hom := P.desc_cocone_morphism t,
inv := Q.desc_cocone_morphism s,
hom_inv_id' := P.uniq_cocone_morphism,
inv_hom_id' := Q.uniq_cocone_morphism }
def of_iso_colimit {r t : cocone F} (P : is_colimit r) (i : r ≅ t) : is_colimit t :=
is_colimit.mk_cocone_morphism
(λ s, i.inv ≫ P.desc_cocone_morphism s)
(λ s m, by rw i.eq_inv_comp; apply P.uniq_cocone_morphism)
variables {t : cocone F}
lemma hom_desc (h : is_colimit t) {W : C} (m : t.X ⟶ W) :
m = h.desc { X := W, ι := { app := λ b, t.ι.app b ≫ m,
naturality' := by intros; erw [←assoc, t.ι.naturality, comp_id, comp_id] } } :=
h.uniq { X := W, ι := { app := λ b, t.ι.app b ≫ m, naturality' := _ } } m (λ b, rfl)
/-- Two morphisms out of a colimit are equal if their compositions with
each cocone morphism are equal. -/
lemma hom_ext (h : is_colimit t) {W : C} {f f' : t.X ⟶ W}
(w : ∀ j, t.ι.app j ≫ f = t.ι.app j ≫ f') : f = f' :=
by rw [h.hom_desc f, h.hom_desc f']; congr; exact funext w
/-- The universal property of a colimit cocone: a map `X ⟶ W` is the same as
a cocone on `F` with vertex `W`. -/
def hom_iso (h : is_colimit t) (W : C) : (t.X ⟶ W) ≅ (F ⟶ (const J).obj W) :=
{ hom := λ f, (t.extend f).ι,
inv := λ ι, h.desc { X := W, ι := ι },
hom_inv_id' := by ext f; apply h.hom_ext; intro j; simp; dsimp; refl }
@[simp] lemma hom_iso_hom (h : is_colimit t) {W : C} (f : t.X ⟶ W) :
(is_colimit.hom_iso h W).hom f = (t.extend f).ι := rfl
/-- The colimit of `F` represents the functor taking `W` to
the set of cocones on `F` with vertex `W`. -/
def nat_iso (h : is_colimit t) : coyoneda.obj (op t.X) ≅ F.cocones :=
nat_iso.of_components (is_colimit.hom_iso h) (by intros; ext; dsimp; rw ←assoc; refl)
def hom_iso' (h : is_colimit t) (W : C) :
((t.X ⟶ W) : Type v) ≅ { p : Π j, F.obj j ⟶ W // ∀ {j j' : J} (f : j ⟶ j'), F.map f ≫ p j' = p j } :=
h.hom_iso W ≪≫
{ hom := λ ι,
⟨λ j, ι.app j, λ j j' f,
by convert ←(ι.naturality f); apply comp_id⟩,
inv := λ p,
{ app := λ j, p.1 j,
naturality' := λ j j' f, begin dsimp, rw [comp_id], exact (p.2 f) end } }
/-- If G : C → D is a faithful functor which sends t to a colimit cocone,
then it suffices to check that the induced maps for the image of t
can be lifted to maps of C. -/
def of_faithful {t : cocone F} {D : Sort u'} [category.{v+1} D] (G : C ⥤ D) [faithful G]
(ht : is_colimit (G.map_cocone t)) (desc : Π (s : cocone F), t.X ⟶ s.X)
(h : ∀ s, G.map (desc s) = ht.desc (G.map_cocone s)) : is_colimit t :=
{ desc := desc,
fac' := λ s j, by apply G.injectivity; rw [G.map_comp, h]; apply ht.fac,
uniq' := λ s m w, begin
apply G.injectivity, rw h,
refine ht.uniq (G.map_cocone s) _ (λ j, _),
convert ←congr_arg (λ f, G.map f) (w j),
apply G.map_comp
end }
end is_colimit
def is_colimit_iso_unique_cocone_morphism {t : cocone F} :
is_colimit t ≅ Π s, unique (t ⟶ s) :=
{ hom := λ h s,
{ default := h.desc_cocone_morphism s,
uniq := λ _, h.uniq_cocone_morphism },
inv := λ h,
{ desc := λ s, (h s).default.hom,
uniq' := λ s f w, congr_arg cocone_morphism.hom ((h s).uniq ⟨f, w⟩) } }
section limit
/-- `has_limit F` represents a particular chosen limit of the diagram `F`. -/
class has_limit (F : J ⥤ C) :=
(cone : cone F)
(is_limit : is_limit cone)
variables (J C)
/-- `C` has limits of shape `J` if we have chosen a particular limit of
every functor `F : J ⥤ C`. -/
class has_limits_of_shape :=
(has_limit : Π F : J ⥤ C, has_limit F)
/-- `C` has all (small) limits if it has limits of every shape. -/
class has_limits :=
(has_limits_of_shape : Π (J : Type v) [𝒥 : small_category J], has_limits_of_shape J C)
variables {J C}
instance has_limit_of_has_limits_of_shape
{J : Type v} [small_category J] [H : has_limits_of_shape J C] (F : J ⥤ C) : has_limit F :=
has_limits_of_shape.has_limit F
instance has_limits_of_shape_of_has_limits
{J : Type v} [small_category J] [H : has_limits.{v} C] : has_limits_of_shape J C :=
has_limits.has_limits_of_shape C J
/- Interface to the `has_limit` class. -/
def limit.cone (F : J ⥤ C) [has_limit F] : cone F := has_limit.cone F
def limit (F : J ⥤ C) [has_limit F] := (limit.cone F).X
def limit.π (F : J ⥤ C) [has_limit F] (j : J) : limit F ⟶ F.obj j :=
(limit.cone F).π.app j
@[simp] lemma limit.cone_π {F : J ⥤ C} [has_limit F] (j : J) :
(limit.cone F).π.app j = limit.π _ j := rfl
@[simp] lemma limit.w (F : J ⥤ C) [has_limit F] {j j' : J} (f : j ⟶ j') :
limit.π F j ≫ F.map f = limit.π F j' := (limit.cone F).w f
def limit.is_limit (F : J ⥤ C) [has_limit F] : is_limit (limit.cone F) :=
has_limit.is_limit.{v} F
def limit.lift (F : J ⥤ C) [has_limit F] (c : cone F) : c.X ⟶ limit F :=
(limit.is_limit F).lift c
@[simp] lemma limit.is_limit_lift {F : J ⥤ C} [has_limit F] (c : cone F) :
(limit.is_limit F).lift c = limit.lift F c := rfl
@[simp] lemma limit.lift_π {F : J ⥤ C} [has_limit F] (c : cone F) (j : J) :
limit.lift F c ≫ limit.π F j = c.π.app j :=
is_limit.fac _ c j
def limit.cone_morphism {F : J ⥤ C} [has_limit F] (c : cone F) :
cone_morphism c (limit.cone F) :=
(limit.is_limit F).lift_cone_morphism c
@[simp] lemma limit.cone_morphism_hom {F : J ⥤ C} [has_limit F] (c : cone F) :
(limit.cone_morphism c).hom = limit.lift F c := rfl
@[simp] lemma limit.cone_morphism_π {F : J ⥤ C} [has_limit F] (c : cone F) (j : J) :
(limit.cone_morphism c).hom ≫ limit.π F j = c.π.app j :=
by erw is_limit.fac
@[extensionality] lemma limit.hom_ext {F : J ⥤ C} [has_limit F] {X : C} {f f' : X ⟶ limit F}
(w : ∀ j, f ≫ limit.π F j = f' ≫ limit.π F j) : f = f' :=
(limit.is_limit F).hom_ext w
def limit.hom_iso (F : J ⥤ C) [has_limit F] (W : C) : (W ⟶ limit F) ≅ (F.cones.obj (op W)) :=
(limit.is_limit F).hom_iso W
@[simp] lemma limit.hom_iso_hom (F : J ⥤ C) [has_limit F] {W : C} (f : W ⟶ limit F):
(limit.hom_iso F W).hom f = (const J).map f ≫ (limit.cone F).π :=
(limit.is_limit F).hom_iso_hom f
def limit.hom_iso' (F : J ⥤ C) [has_limit F] (W : C) :
((W ⟶ limit F) : Type v) ≅ { p : Π j, W ⟶ F.obj j // ∀ {j j' : J} (f : j ⟶ j'), p j ≫ F.map f = p j' } :=
(limit.is_limit F).hom_iso' W
lemma limit.lift_extend {F : J ⥤ C} [has_limit F] (c : cone F) {X : C} (f : X ⟶ c.X) :
limit.lift F (c.extend f) = f ≫ limit.lift F c :=
by obviously
section pre
variables {K : Type v} [small_category K]
variables (F) [has_limit F] (E : K ⥤ J) [has_limit (E ⋙ F)]
def limit.pre : limit F ⟶ limit (E ⋙ F) :=
limit.lift (E ⋙ F)
{ X := limit F,
π := { app := λ k, limit.π F (E.obj k) } }
@[simp] lemma limit.pre_π (k : K) : limit.pre F E ≫ limit.π (E ⋙ F) k = limit.π F (E.obj k) :=
by erw is_limit.fac
@[simp] lemma limit.lift_pre (c : cone F) :
limit.lift F c ≫ limit.pre F E = limit.lift (E ⋙ F) (c.whisker E) :=
by ext; simp
variables {L : Type v} [small_category L]
variables (D : L ⥤ K) [has_limit (D ⋙ E ⋙ F)]
@[simp] lemma limit.pre_pre : limit.pre F E ≫ limit.pre (E ⋙ F) D = limit.pre F (D ⋙ E) :=
by ext j; erw [assoc, limit.pre_π, limit.pre_π, limit.pre_π]; refl
end pre
section post
variables {D : Sort u'} [𝒟 : category.{v+1} D]
include 𝒟
variables (F) [has_limit F] (G : C ⥤ D) [has_limit (F ⋙ G)]
def limit.post : G.obj (limit F) ⟶ limit (F ⋙ G) :=
limit.lift (F ⋙ G)
{ X := G.obj (limit F),
π :=
{ app := λ j, G.map (limit.π F j),
naturality' :=
by intros j j' f; erw [←G.map_comp, limits.cone.w, id_comp]; refl } }
@[simp] lemma limit.post_π (j : J) : limit.post F G ≫ limit.π (F ⋙ G) j = G.map (limit.π F j) :=
by erw is_limit.fac
@[simp] lemma limit.lift_post (c : cone F) :
G.map (limit.lift F c) ≫ limit.post F G = limit.lift (F ⋙ G) (G.map_cone c) :=
by ext; rw [assoc, limit.post_π, ←G.map_comp, limit.lift_π, limit.lift_π]; refl
@[simp] lemma limit.post_post
{E : Sort u''} [category.{v+1} E] (H : D ⥤ E) [has_limit ((F ⋙ G) ⋙ H)] :
/- H G (limit F) ⟶ H (limit (F ⋙ G)) ⟶ limit ((F ⋙ G) ⋙ H) equals -/
/- H G (limit F) ⟶ limit (F ⋙ (G ⋙ H)) -/
H.map (limit.post F G) ≫ limit.post (F ⋙ G) H = limit.post F (G ⋙ H) :=
by ext; erw [assoc, limit.post_π, ←H.map_comp, limit.post_π, limit.post_π]; refl
end post
lemma limit.pre_post {K : Type v} [small_category K] {D : Sort u'} [category.{v+1} D]
(E : K ⥤ J) (F : J ⥤ C) (G : C ⥤ D)
[has_limit F] [has_limit (E ⋙ F)] [has_limit (F ⋙ G)] [has_limit ((E ⋙ F) ⋙ G)] :
/- G (limit F) ⟶ G (limit (E ⋙ F)) ⟶ limit ((E ⋙ F) ⋙ G) vs -/
/- G (limit F) ⟶ limit F ⋙ G ⟶ limit (E ⋙ (F ⋙ G)) or -/
G.map (limit.pre F E) ≫ limit.post (E ⋙ F) G = limit.post F G ≫ limit.pre (F ⋙ G) E :=
by ext; erw [assoc, limit.post_π, ←G.map_comp, limit.pre_π, assoc, limit.pre_π, limit.post_π]; refl
section lim_functor
variables [has_limits_of_shape J C]
/-- `limit F` is functorial in `F`, when `C` has all limits of shape `J`. -/
def lim : (J ⥤ C) ⥤ C :=
{ obj := λ F, limit F,
map := λ F G α, limit.lift G
{ X := limit F,
π :=
{ app := λ j, limit.π F j ≫ α.app j,
naturality' := λ j j' f,
by erw [id_comp, assoc, ←α.naturality, ←assoc, limit.w] } },
map_comp' := λ F G H α β,
by ext; erw [assoc, is_limit.fac, is_limit.fac, ←assoc, is_limit.fac, assoc]; refl }
variables {F} {G : J ⥤ C} (α : F ⟶ G)
@[simp] lemma lim.map_π (j : J) : lim.map α ≫ limit.π G j = limit.π F j ≫ α.app j :=
by apply is_limit.fac
@[simp] lemma limit.lift_map (c : cone F) :
limit.lift F c ≫ lim.map α = limit.lift G ((cones.postcompose α).obj c) :=
by ext; rw [assoc, lim.map_π, ←assoc, limit.lift_π, limit.lift_π]; refl
lemma limit.map_pre {K : Type v} [small_category K] [has_limits_of_shape K C] (E : K ⥤ J) :
lim.map α ≫ limit.pre G E = limit.pre F E ≫ lim.map (whisker_left E α) :=
by ext; rw [assoc, limit.pre_π, lim.map_π, assoc, lim.map_π, ←assoc, limit.pre_π]; refl
lemma limit.map_pre' {K : Type v} [small_category K] [has_limits_of_shape.{v} K C]
(F : J ⥤ C) {E₁ E₂ : K ⥤ J} (α : E₁ ⟶ E₂) :
limit.pre F E₂ = limit.pre F E₁ ≫ lim.map (whisker_right α F) :=
by ext1; simp [(category.assoc _ _ _ _).symm]
lemma limit.id_pre (F : J ⥤ C) :
limit.pre F (functor.id _) = lim.map (functor.left_unitor F).inv := by tidy
lemma limit.map_post {D : Sort u'} [category.{v+1} D] [has_limits_of_shape J D] (H : C ⥤ D) :
/- H (limit F) ⟶ H (limit G) ⟶ limit (G ⋙ H) vs
H (limit F) ⟶ limit (F ⋙ H) ⟶ limit (G ⋙ H) -/
H.map (lim.map α) ≫ limit.post G H = limit.post F H ≫ lim.map (whisker_right α H) :=
begin
ext,
rw [assoc, limit.post_π, ←H.map_comp, lim.map_π, H.map_comp],
rw [assoc, lim.map_π, ←assoc, limit.post_π],
refl
end
def lim_yoneda : lim ⋙ yoneda ≅ category_theory.cones J C :=
nat_iso.of_components (λ F, nat_iso.of_components (λ W, limit.hom_iso F (unop W)) (by tidy))
(by tidy)
end lim_functor
end limit
section colimit
/-- `has_colimit F` represents a particular chosen colimit of the diagram `F`. -/
class has_colimit (F : J ⥤ C) :=
(cocone : cocone F)
(is_colimit : is_colimit cocone)
variables (J C)
/-- `C` has colimits of shape `J` if we have chosen a particular colimit of
every functor `F : J ⥤ C`. -/
class has_colimits_of_shape :=
(has_colimit : Π F : J ⥤ C, has_colimit F)
/-- `C` has all (small) colimits if it has colimits of every shape. -/
class has_colimits :=
(has_colimits_of_shape : Π (J : Type v) [𝒥 : small_category J], has_colimits_of_shape J C)
variables {J C}
instance has_colimit_of_has_colimits_of_shape
{J : Type v} [small_category J] [H : has_colimits_of_shape J C] (F : J ⥤ C) : has_colimit F :=
has_colimits_of_shape.has_colimit F
instance has_colimits_of_shape_of_has_colimits
{J : Type v} [small_category J] [H : has_colimits.{v} C] : has_colimits_of_shape J C :=
has_colimits.has_colimits_of_shape C J
/- Interface to the `has_colimit` class. -/
def colimit.cocone (F : J ⥤ C) [has_colimit F] : cocone F := has_colimit.cocone F
def colimit (F : J ⥤ C) [has_colimit F] := (colimit.cocone F).X
def colimit.ι (F : J ⥤ C) [has_colimit F] (j : J) : F.obj j ⟶ colimit F :=
(colimit.cocone F).ι.app j
@[simp] lemma colimit.cocone_ι {F : J ⥤ C} [has_colimit F] (j : J) :
(colimit.cocone F).ι.app j = colimit.ι _ j := rfl
@[simp] lemma colimit.w (F : J ⥤ C) [has_colimit F] {j j' : J} (f : j ⟶ j') :
F.map f ≫ colimit.ι F j' = colimit.ι F j := (colimit.cocone F).w f
def colimit.is_colimit (F : J ⥤ C) [has_colimit F] : is_colimit (colimit.cocone F) :=
has_colimit.is_colimit.{v} F
def colimit.desc (F : J ⥤ C) [has_colimit F] (c : cocone F) : colimit F ⟶ c.X :=
(colimit.is_colimit F).desc c
@[simp] lemma colimit.is_colimit_desc {F : J ⥤ C} [has_colimit F] (c : cocone F) :
(colimit.is_colimit F).desc c = colimit.desc F c := rfl
@[simp] lemma colimit.ι_desc {F : J ⥤ C} [has_colimit F] (c : cocone F) (j : J) :
colimit.ι F j ≫ colimit.desc F c = c.ι.app j :=
is_colimit.fac _ c j
/--
We have lots of lemmas describing how to simplify `colimit.ι F j ≫ _`,
and combined with `colimit.ext` we rely on these lemmas for many calculations.
However, since `category.assoc` is a `@[simp]` lemma, often expressions are
right associated, and it's hard to apply these lemmas about `colimit.ι`.
We thus define some additional `@[simp]` lemmas, with an arbitrary extra morphism.
-/
@[simp] lemma colimit.ι_desc_assoc {F : J ⥤ C} [has_colimit F] (c : cocone F) (j : J) {Y : C} (f : c.X ⟶ Y) :
colimit.ι F j ≫ colimit.desc F c ≫ f = c.ι.app j ≫ f :=
by rw [←category.assoc, colimit.ι_desc]
def colimit.cocone_morphism {F : J ⥤ C} [has_colimit F] (c : cocone F) :
cocone_morphism (colimit.cocone F) c :=
(colimit.is_colimit F).desc_cocone_morphism c
@[simp] lemma colimit.cocone_morphism_hom {F : J ⥤ C} [has_colimit F] (c : cocone F) :
(colimit.cocone_morphism c).hom = colimit.desc F c := rfl
@[simp] lemma colimit.ι_cocone_morphism {F : J ⥤ C} [has_colimit F] (c : cocone F) (j : J) :
colimit.ι F j ≫ (colimit.cocone_morphism c).hom = c.ι.app j :=
by erw is_colimit.fac
@[extensionality] lemma colimit.hom_ext {F : J ⥤ C} [has_colimit F] {X : C} {f f' : colimit F ⟶ X}
(w : ∀ j, colimit.ι F j ≫ f = colimit.ι F j ≫ f') : f = f' :=
(colimit.is_colimit F).hom_ext w
def colimit.hom_iso (F : J ⥤ C) [has_colimit F] (W : C) : (colimit F ⟶ W) ≅ (F.cocones.obj W) :=
(colimit.is_colimit F).hom_iso W
@[simp] lemma colimit.hom_iso_hom (F : J ⥤ C) [has_colimit F] {W : C} (f : colimit F ⟶ W):
(colimit.hom_iso F W).hom f = (colimit.cocone F).ι ≫ (const J).map f :=
(colimit.is_colimit F).hom_iso_hom f
def colimit.hom_iso' (F : J ⥤ C) [has_colimit F] (W : C) :
((colimit F ⟶ W) : Type v) ≅ { p : Π j, F.obj j ⟶ W // ∀ {j j'} (f : j ⟶ j'), F.map f ≫ p j' = p j } :=
(colimit.is_colimit F).hom_iso' W
lemma colimit.desc_extend (F : J ⥤ C) [has_colimit F] (c : cocone F) {X : C} (f : c.X ⟶ X) :
colimit.desc F (c.extend f) = colimit.desc F c ≫ f :=
begin
ext1, rw [←category.assoc], simp
end
section pre
variables {K : Type v} [small_category K]
variables (F) [has_colimit F] (E : K ⥤ J) [has_colimit (E ⋙ F)]
def colimit.pre : colimit (E ⋙ F) ⟶ colimit F :=
colimit.desc (E ⋙ F)
{ X := colimit F,
ι := { app := λ k, colimit.ι F (E.obj k) } }
@[simp] lemma colimit.ι_pre (k : K) : colimit.ι (E ⋙ F) k ≫ colimit.pre F E = colimit.ι F (E.obj k) :=
by erw is_colimit.fac
@[simp] lemma colimit.ι_pre_assoc (k : K) {Z : C} (f : colimit F ⟶ Z):
colimit.ι (E ⋙ F) k ≫ (colimit.pre F E) ≫ f = ((colimit.ι F (E.obj k)) : (E ⋙ F).obj k ⟶ colimit F) ≫ f :=
by rw [←category.assoc, colimit.ι_pre]
@[simp] lemma colimit.pre_desc (c : cocone F) :
colimit.pre F E ≫ colimit.desc F c = colimit.desc (E ⋙ F) (c.whisker E) :=
by ext; rw [←assoc, colimit.ι_pre]; simp
variables {L : Type v} [small_category L]
variables (D : L ⥤ K) [has_colimit (D ⋙ E ⋙ F)]
@[simp] lemma colimit.pre_pre : colimit.pre (E ⋙ F) D ≫ colimit.pre F E = colimit.pre F (D ⋙ E) :=
begin
ext j,
rw [←assoc, colimit.ι_pre, colimit.ι_pre],
letI : has_colimit ((D ⋙ E) ⋙ F) := show has_colimit (D ⋙ E ⋙ F), by apply_instance,
exact (colimit.ι_pre F (D ⋙ E) j).symm
end
end pre
section post
variables {D : Sort u'} [𝒟 : category.{v+1} D]
include 𝒟
variables (F) [has_colimit F] (G : C ⥤ D) [has_colimit (F ⋙ G)]
def colimit.post : colimit (F ⋙ G) ⟶ G.obj (colimit F) :=
colimit.desc (F ⋙ G)
{ X := G.obj (colimit F),
ι :=
{ app := λ j, G.map (colimit.ι F j),
naturality' :=
by intros j j' f; erw [←G.map_comp, limits.cocone.w, comp_id]; refl } }
@[simp] lemma colimit.ι_post (j : J) : colimit.ι (F ⋙ G) j ≫ colimit.post F G = G.map (colimit.ι F j) :=
by erw is_colimit.fac
@[simp] lemma colimit.ι_post_assoc (j : J) {Y : D} (f : G.obj (colimit F) ⟶ Y):
colimit.ι (F ⋙ G) j ≫ colimit.post F G ≫ f = G.map (colimit.ι F j) ≫ f :=
by rw [←category.assoc, colimit.ι_post]
@[simp] lemma colimit.post_desc (c : cocone F) :
colimit.post F G ≫ G.map (colimit.desc F c) = colimit.desc (F ⋙ G) (G.map_cocone c) :=
by ext; rw [←assoc, colimit.ι_post, ←G.map_comp, colimit.ι_desc, colimit.ι_desc]; refl
@[simp] lemma colimit.post_post
{E : Sort u''} [category.{v+1} E] (H : D ⥤ E) [has_colimit ((F ⋙ G) ⋙ H)] :
/- H G (colimit F) ⟶ H (colimit (F ⋙ G)) ⟶ colimit ((F ⋙ G) ⋙ H) equals -/
/- H G (colimit F) ⟶ colimit (F ⋙ (G ⋙ H)) -/
colimit.post (F ⋙ G) H ≫ H.map (colimit.post F G) = colimit.post F (G ⋙ H) :=
begin
ext,
rw [←assoc, colimit.ι_post, ←H.map_comp, colimit.ι_post],
exact (colimit.ι_post F (G ⋙ H) j).symm
end
end post
lemma colimit.pre_post {K : Type v} [small_category K] {D : Sort u'} [category.{v+1} D]
(E : K ⥤ J) (F : J ⥤ C) (G : C ⥤ D)
[has_colimit F] [has_colimit (E ⋙ F)] [has_colimit (F ⋙ G)] [has_colimit ((E ⋙ F) ⋙ G)] :
/- G (colimit F) ⟶ G (colimit (E ⋙ F)) ⟶ colimit ((E ⋙ F) ⋙ G) vs -/
/- G (colimit F) ⟶ colimit F ⋙ G ⟶ colimit (E ⋙ (F ⋙ G)) or -/
colimit.post (E ⋙ F) G ≫ G.map (colimit.pre F E) = colimit.pre (F ⋙ G) E ≫ colimit.post F G :=
begin
ext,
rw [←assoc, colimit.ι_post, ←G.map_comp, colimit.ι_pre, ←assoc],
letI : has_colimit (E ⋙ F ⋙ G) := show has_colimit ((E ⋙ F) ⋙ G), by apply_instance,
erw [colimit.ι_pre (F ⋙ G) E j, colimit.ι_post]
end
section colim_functor
variables [has_colimits_of_shape J C]
/-- `colimit F` is functorial in `F`, when `C` has all colimits of shape `J`. -/
def colim : (J ⥤ C) ⥤ C :=
{ obj := λ F, colimit F,
map := λ F G α, colimit.desc F
{ X := colimit G,
ι :=
{ app := λ j, α.app j ≫ colimit.ι G j,
naturality' := λ j j' f,
by erw [comp_id, ←assoc, α.naturality, assoc, colimit.w] } },
map_comp' := λ F G H α β,
by ext; erw [←assoc, is_colimit.fac, is_colimit.fac, assoc, is_colimit.fac, ←assoc]; refl }
variables {F} {G : J ⥤ C} (α : F ⟶ G)
@[simp] lemma colim.ι_map (j : J) : colimit.ι F j ≫ colim.map α = α.app j ≫ colimit.ι G j :=
by apply is_colimit.fac
@[simp] lemma colim.ι_map_assoc (j : J) {Y : C} (f : colimit G ⟶ Y):
colimit.ι F j ≫ colim.map α ≫ f = α.app j ≫ colimit.ι G j ≫ f :=
by rw [←category.assoc, colim.ι_map, category.assoc]
@[simp] lemma colimit.map_desc (c : cocone G) :
colim.map α ≫ colimit.desc G c = colimit.desc F ((cocones.precompose α).obj c) :=
by ext; rw [←assoc, colim.ι_map, assoc, colimit.ι_desc, colimit.ι_desc]; refl
lemma colimit.pre_map {K : Type v} [small_category K] [has_colimits_of_shape K C] (E : K ⥤ J) :
colimit.pre F E ≫ colim.map α = colim.map (whisker_left E α) ≫ colimit.pre G E :=
by ext; rw [←assoc, colimit.ι_pre, colim.ι_map, ←assoc, colim.ι_map, assoc, colimit.ι_pre]; refl
lemma colimit.pre_map' {K : Type v} [small_category K] [has_colimits_of_shape.{v} K C]
(F : J ⥤ C) {E₁ E₂ : K ⥤ J} (α : E₁ ⟶ E₂) :
colimit.pre F E₁ = colim.map (whisker_right α F) ≫ colimit.pre F E₂ :=
by ext1; simp [(category.assoc _ _ _ _).symm]
lemma colimit.pre_id (F : J ⥤ C) :
colimit.pre F (functor.id _) = colim.map (functor.left_unitor F).hom := by tidy
lemma colimit.map_post {D : Sort u'} [category.{v+1} D] [has_colimits_of_shape J D] (H : C ⥤ D) :
/- H (colimit F) ⟶ H (colimit G) ⟶ colimit (G ⋙ H) vs
H (colimit F) ⟶ colimit (F ⋙ H) ⟶ colimit (G ⋙ H) -/
colimit.post F H ≫ H.map (colim.map α) = colim.map (whisker_right α H) ≫ colimit.post G H:=
begin
ext,
rw [←assoc, colimit.ι_post, ←H.map_comp, colim.ι_map, H.map_comp],
rw [←assoc, colim.ι_map, assoc, colimit.ι_post],
refl
end
def colim_coyoneda : colim.op ⋙ coyoneda ≅ category_theory.cocones J C :=
nat_iso.of_components (λ F, nat_iso.of_components (colimit.hom_iso (unop F)) (by tidy))
(by tidy)
end colim_functor
end colimit
end category_theory.limits
|
9663efe98c9ecf6c4706480e9be193ca00a3311c
|
92b50235facfbc08dfe7f334827d47281471333b
|
/library/data/nat/div.lean
|
37c1a0342e059d8f2a1c9e7e09522c876be3cb0c
|
[
"Apache-2.0"
] |
permissive
|
htzh/lean
|
24f6ed7510ab637379ec31af406d12584d31792c
|
d70c79f4e30aafecdfc4a60b5d3512199200ab6e
|
refs/heads/master
| 1,607,677,731,270
| 1,437,089,952,000
| 1,437,089,952,000
| 37,078,816
| 0
| 0
| null | 1,433,780,956,000
| 1,433,780,955,000
| null |
UTF-8
|
Lean
| false
| false
| 24,410
|
lean
|
/-
Copyright (c) 2014 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
Definitions and properties of div and mod. Much of the development follows Isabelle's library.
-/
import data.nat.sub
open eq.ops well_founded decidable prod
namespace nat
/- div -/
-- auxiliary lemma used to justify div
private definition div_rec_lemma {x y : nat} (H : 0 < y ∧ y ≤ x) : x - y < x :=
and.rec_on H (λ ypos ylex, sub_lt (lt_of_lt_of_le ypos ylex) ypos)
private definition div.F (x : nat) (f : Π x₁, x₁ < x → nat → nat) (y : nat) : nat :=
if H : 0 < y ∧ y ≤ x then f (x - y) (div_rec_lemma H) y + 1 else zero
definition divide (x y : nat) := fix div.F x y
notation a div b := divide a b
theorem divide_def (x y : nat) : divide x y = if 0 < y ∧ y ≤ x then divide (x - y) y + 1 else 0 :=
congr_fun (fix_eq div.F x) y
theorem div_zero (a : ℕ) : a div 0 = 0 :=
divide_def a 0 ⬝ if_neg (!not_and_of_not_left (lt.irrefl 0))
theorem div_eq_zero_of_lt {a b : ℕ} (h : a < b) : a div b = 0 :=
divide_def a b ⬝ if_neg (!not_and_of_not_right (not_le_of_gt h))
theorem zero_div (b : ℕ) : 0 div b = 0 :=
divide_def 0 b ⬝ if_neg (λ h, and.rec_on h (λ l r, absurd (lt_of_lt_of_le l r) (lt.irrefl 0)))
theorem div_eq_succ_sub_div {a b : ℕ} (h₁ : b > 0) (h₂ : a ≥ b) : a div b = succ ((a - b) div b) :=
divide_def a b ⬝ if_pos (and.intro h₁ h₂)
theorem add_div_self (x : ℕ) {z : ℕ} (H : z > 0) : (x + z) div z = succ (x div z) :=
calc
(x + z) div z = if 0 < z ∧ z ≤ x + z then (x + z - z) div z + 1 else 0 : !divide_def
... = (x + z - z) div z + 1 : if_pos (and.intro H (le_add_left z x))
... = succ (x div z) : {!add_sub_cancel}
theorem add_div_self_left {x : ℕ} (z : ℕ) (H : x > 0) : (x + z) div x = succ (z div x) :=
!add.comm ▸ !add_div_self H
theorem add_mul_div_self {x y z : ℕ} (H : z > 0) : (x + y * z) div z = x div z + y :=
nat.induction_on y
(calc (x + zero * z) div z = (x + zero) div z : zero_mul
... = x div z : add_zero
... = x div z + zero : add_zero)
(take y,
assume IH : (x + y * z) div z = x div z + y, calc
(x + succ y * z) div z = (x + y * z + z) div z : by simp
... = succ ((x + y * z) div z) : !add_div_self H
... = x div z + succ y : by simp)
theorem add_mul_div_self_left (x z : ℕ) {y : ℕ} (H : y > 0) : (x + y * z) div y = x div y + z :=
!mul.comm ▸ add_mul_div_self H
theorem mul_div_cancel (m : ℕ) {n : ℕ} (H : n > 0) : m * n div n = m :=
calc
m * n div n = (0 + m * n) div n : zero_add
... = 0 div n + m : add_mul_div_self H
... = 0 + m : zero_div
... = m : zero_add
theorem mul_div_cancel_left {m : ℕ} (n : ℕ) (H : m > 0) : m * n div m = n :=
!mul.comm ▸ !mul_div_cancel H
/- mod -/
private definition mod.F (x : nat) (f : Π x₁, x₁ < x → nat → nat) (y : nat) : nat :=
if H : 0 < y ∧ y ≤ x then f (x - y) (div_rec_lemma H) y else x
definition modulo (x y : nat) := fix mod.F x y
notation a mod b := modulo a b
notation a `≡` b `[mod`:100 c `]`:0 := a mod c = b mod c
theorem modulo_def (x y : nat) : modulo x y = if 0 < y ∧ y ≤ x then modulo (x - y) y else x :=
congr_fun (fix_eq mod.F x) y
theorem mod_zero (a : ℕ) : a mod 0 = a :=
modulo_def a 0 ⬝ if_neg (!not_and_of_not_left (lt.irrefl 0))
theorem mod_eq_of_lt {a b : ℕ} (h : a < b) : a mod b = a :=
modulo_def a b ⬝ if_neg (!not_and_of_not_right (not_le_of_gt h))
theorem zero_mod (b : ℕ) : 0 mod b = 0 :=
modulo_def 0 b ⬝ if_neg (λ h, and.rec_on h (λ l r, absurd (lt_of_lt_of_le l r) (lt.irrefl 0)))
theorem mod_eq_sub_mod {a b : ℕ} (h₁ : b > 0) (h₂ : a ≥ b) : a mod b = (a - b) mod b :=
modulo_def a b ⬝ if_pos (and.intro h₁ h₂)
theorem add_mod_self (x z : ℕ) : (x + z) mod z = x mod z :=
by_cases_zero_pos z
(by rewrite add_zero)
(take z, assume H : z > 0,
calc
(x + z) mod z = if 0 < z ∧ z ≤ x + z then (x + z - z) mod z else _ : modulo_def
... = (x + z - z) mod z : if_pos (and.intro H (le_add_left z x))
... = x mod z : add_sub_cancel)
theorem add_mod_self_left (x z : ℕ) : (x + z) mod x = z mod x :=
!add.comm ▸ !add_mod_self
theorem add_mul_mod_self (x y z : ℕ) : (x + y * z) mod z = x mod z :=
nat.induction_on y
(calc (x + zero * z) mod z = (x + zero) mod z : zero_mul
... = x mod z : add_zero)
(take y,
assume IH : (x + y * z) mod z = x mod z,
calc
(x + succ y * z) mod z = (x + (y * z + z)) mod z : succ_mul
... = (x + y * z + z) mod z : add.assoc
... = (x + y * z) mod z : !add_mod_self
... = x mod z : IH)
theorem add_mul_mod_self_left (x y z : ℕ) : (x + y * z) mod y = x mod y :=
!mul.comm ▸ !add_mul_mod_self
theorem mul_mod_left (m n : ℕ) : (m * n) mod n = 0 :=
by rewrite [-zero_add (m * n), add_mul_mod_self, zero_mod]
theorem mul_mod_right (m n : ℕ) : (m * n) mod m = 0 :=
!mul.comm ▸ !mul_mod_left
theorem mod_lt (x : ℕ) {y : ℕ} (H : y > 0) : x mod y < y :=
nat.case_strong_induction_on x
(show 0 mod y < y, from !zero_mod⁻¹ ▸ H)
(take x,
assume IH : ∀x', x' ≤ x → x' mod y < y,
show succ x mod y < y, from
by_cases -- (succ x < y)
(assume H1 : succ x < y,
have H2 : succ x mod y = succ x, from mod_eq_of_lt H1,
show succ x mod y < y, from H2⁻¹ ▸ H1)
(assume H1 : ¬ succ x < y,
have H2 : y ≤ succ x, from le_of_not_gt H1,
have H3 : succ x mod y = (succ x - y) mod y, from mod_eq_sub_mod H H2,
have H4 : succ x - y < succ x, from sub_lt !succ_pos H,
have H5 : succ x - y ≤ x, from le_of_lt_succ H4,
show succ x mod y < y, from H3⁻¹ ▸ IH _ H5))
theorem mod_one (n : ℕ) : n mod 1 = 0 :=
have H1 : n mod 1 < 1, from !mod_lt !succ_pos,
eq_zero_of_le_zero (le_of_lt_succ H1)
/- properties of div and mod -/
-- the quotient / remainder theorem
theorem eq_div_mul_add_mod (x y : ℕ) : x = x div y * y + x mod y :=
by_cases_zero_pos y
(show x = x div 0 * 0 + x mod 0, from
(calc
x div 0 * 0 + x mod 0 = 0 + x mod 0 : mul_zero
... = x mod 0 : zero_add
... = x : mod_zero)⁻¹)
(take y,
assume H : y > 0,
show x = x div y * y + x mod y, from
nat.case_strong_induction_on x
(show 0 = (0 div y) * y + 0 mod y, by simp)
(take x,
assume IH : ∀x', x' ≤ x → x' = x' div y * y + x' mod y,
show succ x = succ x div y * y + succ x mod y, from
by_cases -- (succ x < y)
(assume H1 : succ x < y,
have H2 : succ x div y = 0, from div_eq_zero_of_lt H1,
have H3 : succ x mod y = succ x, from mod_eq_of_lt H1,
by simp)
(assume H1 : ¬ succ x < y,
have H2 : y ≤ succ x, from le_of_not_gt H1,
have H3 : succ x div y = succ ((succ x - y) div y), from div_eq_succ_sub_div H H2,
have H4 : succ x mod y = (succ x - y) mod y, from mod_eq_sub_mod H H2,
have H5 : succ x - y < succ x, from sub_lt !succ_pos H,
have H6 : succ x - y ≤ x, from le_of_lt_succ H5,
(calc
succ x div y * y + succ x mod y =
succ ((succ x - y) div y) * y + succ x mod y : H3
... = ((succ x - y) div y) * y + y + succ x mod y : succ_mul
... = ((succ x - y) div y) * y + y + (succ x - y) mod y : H4
... = ((succ x - y) div y) * y + (succ x - y) mod y + y : add.right_comm
... = succ x - y + y : {!(IH _ H6)⁻¹}
... = succ x : sub_add_cancel H2)⁻¹)))
theorem mod_eq_sub_div_mul (x y : ℕ) : x mod y = x - x div y * y :=
eq_sub_of_add_eq (!add.comm ▸ !eq_div_mul_add_mod)⁻¹
theorem mod_add_mod (m n k : ℕ) : (m mod n + k) mod n = (m + k) mod n :=
by rewrite [eq_div_mul_add_mod m n at {2}, add.assoc, add.comm (m div n * n), add_mul_mod_self]
theorem add_mod_mod (m n k : ℕ) : (m + n mod k) mod k = (m + n) mod k :=
by rewrite [add.comm, mod_add_mod, add.comm]
theorem add_mod_eq_add_mod_right {m n k : ℕ} (i : ℕ) (H : m mod n = k mod n) :
(m + i) mod n = (k + i) mod n :=
by rewrite [-mod_add_mod, -mod_add_mod k, H]
theorem add_mod_eq_add_mod_left {m n k : ℕ} (i : ℕ) (H : m mod n = k mod n) :
(i + m) mod n = (i + k) mod n :=
by rewrite [add.comm, add_mod_eq_add_mod_right _ H, add.comm]
theorem mod_eq_mod_of_add_mod_eq_add_mod_right {m n k i : ℕ} :
(m + i) mod n = (k + i) mod n → m mod n = k mod n :=
by_cases_zero_pos n
(by rewrite [*mod_zero]; apply eq_of_add_eq_add_right)
(take n,
assume npos : n > 0,
assume H1 : (m + i) mod n = (k + i) mod n,
have H2 : (m + i mod n) mod n = (k + i mod n) mod n, by rewrite [*add_mod_mod, H1],
assert H3 : (m + i mod n + (n - i mod n)) mod n = (k + i mod n + (n - i mod n)) mod n,
from add_mod_eq_add_mod_right _ H2,
begin
revert H3,
rewrite [*add.assoc, add_sub_of_le (le_of_lt (!mod_lt npos)), *add_mod_self],
intros, assumption
end)
theorem mod_eq_mod_of_add_mod_eq_add_mod_left {m n k i : ℕ} :
(i + m) mod n = (i + k) mod n → m mod n = k mod n :=
by rewrite [add.comm i m, add.comm i k]; apply mod_eq_mod_of_add_mod_eq_add_mod_right
theorem mod_le {x y : ℕ} : x mod y ≤ x :=
!eq_div_mul_add_mod⁻¹ ▸ !le_add_left
theorem eq_remainder {q1 r1 q2 r2 y : ℕ} (H1 : r1 < y) (H2 : r2 < y)
(H3 : q1 * y + r1 = q2 * y + r2) : r1 = r2 :=
calc
r1 = r1 mod y : mod_eq_of_lt H1
... = (r1 + q1 * y) mod y : !add_mul_mod_self⁻¹
... = (q1 * y + r1) mod y : add.comm
... = (r2 + q2 * y) mod y : by rewrite [H3, add.comm]
... = r2 mod y : !add_mul_mod_self
... = r2 : mod_eq_of_lt H2
theorem eq_quotient {q1 r1 q2 r2 y : ℕ} (H1 : r1 < y) (H2 : r2 < y)
(H3 : q1 * y + r1 = q2 * y + r2) : q1 = q2 :=
have H4 : q1 * y + r2 = q2 * y + r2, from (eq_remainder H1 H2 H3) ▸ H3,
have H5 : q1 * y = q2 * y, from add.cancel_right H4,
have H6 : y > 0, from lt_of_le_of_lt !zero_le H1,
show q1 = q2, from eq_of_mul_eq_mul_right H6 H5
theorem mul_div_mul_left {z : ℕ} (x y : ℕ) (zpos : z > 0) : (z * x) div (z * y) = x div y :=
by_cases -- (y = 0)
(assume H : y = 0, by simp)
(assume H : y ≠ 0,
have ypos : y > 0, from pos_of_ne_zero H,
have zypos : z * y > 0, from mul_pos zpos ypos,
have H1 : (z * x) mod (z * y) < z * y, from !mod_lt zypos,
have H2 : z * (x mod y) < z * y, from mul_lt_mul_of_pos_left (!mod_lt ypos) zpos,
eq_quotient H1 H2
(calc
((z * x) div (z * y)) * (z * y) + (z * x) mod (z * y) = z * x : eq_div_mul_add_mod
... = z * (x div y * y + x mod y) : eq_div_mul_add_mod
... = z * (x div y * y) + z * (x mod y) : mul.left_distrib
... = (x div y) * (z * y) + z * (x mod y) : mul.left_comm))
theorem mul_div_mul_right {x z y : ℕ} (zpos : z > 0) : (x * z) div (y * z) = x div y :=
!mul.comm ▸ !mul.comm ▸ !mul_div_mul_left zpos
theorem mul_mod_mul_left (z x y : ℕ) : (z * x) mod (z * y) = z * (x mod y) :=
or.elim (eq_zero_or_pos z)
(assume H : z = 0,
calc
(z * x) mod (z * y) = (0 * x) mod (z * y) : by subst z
... = 0 mod (z * y) : zero_mul
... = 0 : zero_mod
... = 0 * (x mod y) : zero_mul
... = z * (x mod y) : by subst z)
(assume zpos : z > 0,
or.elim (eq_zero_or_pos y)
(assume H : y = 0, by rewrite [H, mul_zero, *mod_zero])
(assume ypos : y > 0,
have zypos : z * y > 0, from mul_pos zpos ypos,
have H1 : (z * x) mod (z * y) < z * y, from !mod_lt zypos,
have H2 : z * (x mod y) < z * y, from mul_lt_mul_of_pos_left (!mod_lt ypos) zpos,
eq_remainder H1 H2
(calc
((z * x) div (z * y)) * (z * y) + (z * x) mod (z * y) = z * x : eq_div_mul_add_mod
... = z * (x div y * y + x mod y) : eq_div_mul_add_mod
... = z * (x div y * y) + z * (x mod y) : mul.left_distrib
... = (x div y) * (z * y) + z * (x mod y) : mul.left_comm)))
theorem mul_mod_mul_right (x z y : ℕ) : (x * z) mod (y * z) = (x mod y) * z :=
mul.comm z x ▸ mul.comm z y ▸ !mul.comm ▸ !mul_mod_mul_left
theorem mod_self (n : ℕ) : n mod n = 0 :=
nat.cases_on n (by simp)
(take n,
have H : (succ n * 1) mod (succ n * 1) = succ n * (1 mod 1),
from !mul_mod_mul_left,
(by simp) ▸ H)
theorem mul_mod_eq_mod_mul_mod (m n k : nat) : (m * n) mod k = ((m mod k) * n) mod k :=
calc
(m * n) mod k = (((m div k) * k + m mod k) * n) mod k : eq_div_mul_add_mod
... = ((m mod k) * n) mod k :
by rewrite [mul.right_distrib, mul.right_comm, add.comm, add_mul_mod_self]
theorem mul_mod_eq_mul_mod_mod (m n k : nat) : (m * n) mod k = (m * (n mod k)) mod k :=
!mul.comm ▸ !mul.comm ▸ !mul_mod_eq_mod_mul_mod
theorem div_one (n : ℕ) : n div 1 = n :=
have H : n div 1 * 1 + n mod 1 = n, from !eq_div_mul_add_mod⁻¹,
(by simp) ▸ H
theorem div_self {n : ℕ} (H : n > 0) : n div n = 1 :=
have H1 : (n * 1) div (n * 1) = 1 div 1, from !mul_div_mul_left H,
(by simp) ▸ H1
theorem div_mul_cancel_of_mod_eq_zero {m n : ℕ} (H : m mod n = 0) : m div n * n = m :=
by rewrite [eq_div_mul_add_mod m n at {2}, H, add_zero]
theorem mul_div_cancel_of_mod_eq_zero {m n : ℕ} (H : m mod n = 0) : n * (m div n) = m :=
!mul.comm ▸ div_mul_cancel_of_mod_eq_zero H
/- dvd -/
theorem dvd_of_mod_eq_zero {m n : ℕ} (H : n mod m = 0) : m ∣ n :=
dvd.intro (!mul.comm ▸ div_mul_cancel_of_mod_eq_zero H)
theorem mod_eq_zero_of_dvd {m n : ℕ} (H : m ∣ n) : n mod m = 0 :=
dvd.elim H (take z, assume H1 : n = m * z, H1⁻¹ ▸ !mul_mod_right)
theorem dvd_iff_mod_eq_zero (m n : ℕ) : m ∣ n ↔ n mod m = 0 :=
iff.intro mod_eq_zero_of_dvd dvd_of_mod_eq_zero
definition dvd.decidable_rel [instance] : decidable_rel dvd :=
take m n, decidable_of_decidable_of_iff _ (iff.symm !dvd_iff_mod_eq_zero)
theorem div_mul_cancel {m n : ℕ} (H : n ∣ m) : m div n * n = m :=
div_mul_cancel_of_mod_eq_zero (mod_eq_zero_of_dvd H)
theorem mul_div_cancel' {m n : ℕ} (H : n ∣ m) : n * (m div n) = m :=
!mul.comm ▸ div_mul_cancel H
theorem dvd_of_dvd_add_left {m n₁ n₂ : ℕ} (H₁ : m ∣ n₁ + n₂) (H₂ : m ∣ n₁) : m ∣ n₂ :=
obtain (c₁ : nat) (Hc₁ : n₁ + n₂ = m * c₁), from H₁,
obtain (c₂ : nat) (Hc₂ : n₁ = m * c₂), from H₂,
have aux : m * (c₁ - c₂) = n₂, from calc
m * (c₁ - c₂) = m * c₁ - m * c₂ : mul_sub_left_distrib
... = n₁ + n₂ - m * c₂ : Hc₁
... = n₁ + n₂ - n₁ : Hc₂
... = n₂ : add_sub_cancel_left,
dvd.intro aux
theorem dvd_of_dvd_add_right {m n₁ n₂ : ℕ} (H : m ∣ n₁ + n₂) : m ∣ n₂ → m ∣ n₁ :=
dvd_of_dvd_add_left (!add.comm ▸ H)
theorem dvd_sub {m n₁ n₂ : ℕ} (H1 : m ∣ n₁) (H2 : m ∣ n₂) : m ∣ n₁ - n₂ :=
by_cases
(assume H3 : n₁ ≥ n₂,
have H4 : n₁ = n₁ - n₂ + n₂, from (sub_add_cancel H3)⁻¹,
show m ∣ n₁ - n₂, from dvd_of_dvd_add_right (H4 ▸ H1) H2)
(assume H3 : ¬ (n₁ ≥ n₂),
have H4 : n₁ - n₂ = 0, from sub_eq_zero_of_le (le_of_lt (lt_of_not_ge H3)),
show m ∣ n₁ - n₂, from H4⁻¹ ▸ dvd_zero _)
theorem dvd.antisymm {m n : ℕ} : m ∣ n → n ∣ m → m = n :=
by_cases_zero_pos n
(assume H1, assume H2 : 0 ∣ m, eq_zero_of_zero_dvd H2)
(take n,
assume Hpos : n > 0,
assume H1 : m ∣ n,
assume H2 : n ∣ m,
obtain k (Hk : n = m * k), from exists_eq_mul_right_of_dvd H1,
obtain l (Hl : m = n * l), from exists_eq_mul_right_of_dvd H2,
have H3 : n * (l * k) = n, from !mul.assoc ▸ Hl ▸ Hk⁻¹,
have H4 : l * k = 1, from eq_one_of_mul_eq_self_right Hpos H3,
have H5 : k = 1, from eq_one_of_mul_eq_one_left H4,
show m = n, from (mul_one m)⁻¹ ⬝ (H5 ▸ Hk⁻¹))
theorem mul_div_assoc (m : ℕ) {n k : ℕ} (H : k ∣ n) : m * n div k = m * (n div k) :=
or.elim (eq_zero_or_pos k)
(assume H1 : k = 0,
calc
m * n div k = m * n div 0 : H1
... = 0 : div_zero
... = m * 0 : mul_zero m
... = m * (n div 0) : div_zero
... = m * (n div k) : H1)
(assume H1 : k > 0,
have H2 : n = n div k * k, from (div_mul_cancel H)⁻¹,
calc
m * n div k = m * (n div k * k) div k : H2
... = m * (n div k) * k div k : mul.assoc
... = m * (n div k) : mul_div_cancel _ H1)
theorem dvd_of_mul_dvd_mul_left {m n k : ℕ} (kpos : k > 0) (H : k * m ∣ k * n) : m ∣ n :=
dvd.elim H
(take l,
assume H1 : k * n = k * m * l,
have H2 : n = m * l, from eq_of_mul_eq_mul_left kpos (H1 ⬝ !mul.assoc),
dvd.intro H2⁻¹)
theorem dvd_of_mul_dvd_mul_right {m n k : ℕ} (kpos : k > 0) (H : m * k ∣ n * k) : m ∣ n :=
dvd_of_mul_dvd_mul_left kpos (!mul.comm ▸ !mul.comm ▸ H)
lemma dvd_of_eq_mul (i j n : nat) : n = j*i → j ∣ n :=
begin intros, subst n, apply dvd_mul_right end
theorem div_dvd_div {k m n : ℕ} (H1 : k ∣ m) (H2 : m ∣ n) : m div k ∣ n div k :=
have H3 : m = m div k * k, from (div_mul_cancel H1)⁻¹,
have H4 : n = n div k * k, from (div_mul_cancel (dvd.trans H1 H2))⁻¹,
or.elim (eq_zero_or_pos k)
(assume H5 : k = 0,
have H6: n div k = 0, from (congr_arg _ H5 ⬝ !div_zero),
H6⁻¹ ▸ !dvd_zero)
(assume H5 : k > 0,
dvd_of_mul_dvd_mul_right H5 (H3 ▸ H4 ▸ H2))
theorem div_eq_iff_eq_mul_right {m n : ℕ} (k : ℕ) (H : n > 0) (H' : n ∣ m) :
m div n = k ↔ m = n * k :=
iff.intro
(assume H1, by rewrite [-H1, mul_div_cancel' H'])
(assume H1, by rewrite [H1, !mul_div_cancel_left H])
theorem div_eq_iff_eq_mul_left {m n : ℕ} (k : ℕ) (H : n > 0) (H' : n ∣ m) :
m div n = k ↔ m = k * n :=
!mul.comm ▸ !div_eq_iff_eq_mul_right H H'
theorem eq_mul_of_div_eq_right {m n k : ℕ} (H1 : n ∣ m) (H2 : m div n = k) :
m = n * k :=
calc
m = n * (m div n) : mul_div_cancel' H1
... = n * k : H2
theorem div_eq_of_eq_mul_right {m n k : ℕ} (H1 : n > 0) (H2 : m = n * k) :
m div n = k :=
calc
m div n = n * k div n : H2
... = k : !mul_div_cancel_left H1
theorem eq_mul_of_div_eq_left {m n k : ℕ} (H1 : n ∣ m) (H2 : m div n = k) :
m = k * n :=
!mul.comm ▸ !eq_mul_of_div_eq_right H1 H2
theorem div_eq_of_eq_mul_left {m n k : ℕ} (H1 : n > 0) (H2 : m = k * n) :
m div n = k :=
!div_eq_of_eq_mul_right H1 (!mul.comm ▸ H2)
lemma add_mod_eq_of_dvd (i j n : nat) : n ∣ j → (i + j) mod n = i mod n :=
assume h,
obtain k (hk : j = n * k), from exists_eq_mul_right_of_dvd h,
begin
subst j, rewrite mul.comm,
apply add_mul_mod_self
end
/- div and ordering -/
lemma le_of_dvd {m n} : n > 0 → m ∣ n → m ≤ n :=
assume (h₁ : n > 0) (h₂ : m ∣ n),
assert h₃ : n mod m = 0, from mod_eq_zero_of_dvd h₂,
by_contradiction
(λ nle : ¬ m ≤ n,
have h₄ : m > n, from lt_of_not_ge nle,
assert h₅ : n mod m = n, from mod_eq_of_lt h₄,
begin
rewrite h₃ at h₅, subst n,
exact absurd h₁ (lt.irrefl 0)
end)
theorem div_mul_le (m n : ℕ) : m div n * n ≤ m :=
calc
m = m div n * n + m mod n : eq_div_mul_add_mod
... ≥ m div n * n : le_add_right
theorem div_le_of_le_mul {m n k : ℕ} (H : m ≤ n * k) : m div k ≤ n :=
or.elim (eq_zero_or_pos k)
(assume H1 : k = 0,
calc
m div k = m div 0 : H1
... = 0 : div_zero
... ≤ n : zero_le)
(assume H1 : k > 0,
le_of_mul_le_mul_right (calc
m div k * k ≤ m div k * k + m mod k : le_add_right
... = m : eq_div_mul_add_mod
... ≤ n * k : H) H1)
theorem div_le_self (m n : ℕ) : m div n ≤ m :=
nat.cases_on n (!div_zero⁻¹ ▸ !zero_le)
take n,
have H : m ≤ m * succ n, from calc
m = m * 1 : mul_one
... ≤ m * succ n : !mul_le_mul_left (succ_le_succ !zero_le),
div_le_of_le_mul H
theorem mul_le_of_le_div {m n k : ℕ} (H : m ≤ n div k) : m * k ≤ n :=
calc
m * k ≤ n div k * k : !mul_le_mul_right H
... ≤ n : div_mul_le
theorem le_div_of_mul_le {m n k : ℕ} (H1 : k > 0) (H2 : m * k ≤ n) : m ≤ n div k :=
have H3 : m * k < (succ (n div k)) * k, from
calc
m * k ≤ n : H2
... = n div k * k + n mod k : eq_div_mul_add_mod
... < n div k * k + k : add_lt_add_left (!mod_lt H1)
... = (succ (n div k)) * k : succ_mul,
le_of_lt_succ (lt_of_mul_lt_mul_right H3)
theorem le_div_iff_mul_le {m n k : ℕ} (H : k > 0) : m ≤ n div k ↔ m * k ≤ n :=
iff.intro !mul_le_of_le_div (!le_div_of_mul_le H)
theorem div_le_div {m n : ℕ} (k : ℕ) (H : m ≤ n) : m div k ≤ n div k :=
by_cases_zero_pos k
(by rewrite [*div_zero])
(take k, assume H1 : k > 0, le_div_of_mul_le H1 (le.trans !div_mul_le H))
theorem div_lt_of_lt_mul {m n k : ℕ} (H : m < n * k) : m div k < n :=
lt_of_mul_lt_mul_right (calc
m div k * k ≤ m div k * k + m mod k : le_add_right
... = m : eq_div_mul_add_mod
... < n * k : H)
theorem lt_mul_of_div_lt {m n k : ℕ} (H1 : k > 0) (H2 : m div k < n) : m < n * k :=
assert H3 : succ (m div k) * k ≤ n * k, from !mul_le_mul_right (succ_le_of_lt H2),
have H4 : m div k * k + k ≤ n * k, by rewrite [succ_mul at H3]; apply H3,
calc
m = m div k * k + m mod k : eq_div_mul_add_mod
... < m div k * k + k : add_lt_add_left (!mod_lt H1)
... ≤ n * k : H4
theorem div_lt_iff_lt_mul {m n k : ℕ} (H : k > 0) : m div k < n ↔ m < n * k :=
iff.intro (!lt_mul_of_div_lt H) !div_lt_of_lt_mul
theorem div_le_iff_le_mul_of_div {m n : ℕ} (k : ℕ) (H : n > 0) (H' : n ∣ m) :
m div n ≤ k ↔ m ≤ k * n :=
by rewrite [propext (!le_iff_mul_le_mul_right H), !div_mul_cancel H']
theorem le_mul_of_div_le_of_div {m n k : ℕ} (H1 : n > 0) (H2 : n ∣ m) (H3 : m div n ≤ k) :
m ≤ k * n :=
iff.mp (!div_le_iff_le_mul_of_div H1 H2) H3
-- needed for integer division
theorem mul_sub_div_of_lt {m n k : ℕ} (H : k < m * n) :
(m * n - (k + 1)) div m = n - k div m - 1 :=
have H1 : k div m < n, from div_lt_of_lt_mul (!mul.comm ▸ H),
have H2 : n - k div m ≥ 1, from
le_sub_of_add_le (calc
1 + k div m = succ (k div m) : add.comm
... ≤ n : succ_le_of_lt H1),
assert H3 : n - k div m = n - k div m - 1 + 1, from (sub_add_cancel H2)⁻¹,
assert H4 : m > 0, from pos_of_ne_zero (assume H': m = 0, not_lt_zero _ (!zero_mul ▸ H' ▸ H)),
have H5 : k mod m + 1 ≤ m, from succ_le_of_lt (!mod_lt H4),
assert H6 : m - (k mod m + 1) < m, from sub_lt_self H4 !succ_pos,
calc
(m * n - (k + 1)) div m = (m * n - (k div m * m + k mod m + 1)) div m : eq_div_mul_add_mod
... = (m * n - k div m * m - (k mod m + 1)) div m : by rewrite [*sub_sub]
... = ((n - k div m) * m - (k mod m + 1)) div m :
by rewrite [mul.comm m, mul_sub_right_distrib]
... = ((n - k div m - 1) * m + m - (k mod m + 1)) div m :
by rewrite [H3 at {1}, mul.right_distrib, nat.one_mul]
... = ((n - k div m - 1) * m + (m - (k mod m + 1))) div m : {add_sub_assoc H5 _}
... = (m - (k mod m + 1)) div m + (n - k div m - 1) :
by rewrite [add.comm, (add_mul_div_self H4)]
... = n - k div m - 1 :
by rewrite [div_eq_zero_of_lt H6, zero_add]
end nat
|
d68e8566b8a88fbd27dab19e054a07447fff38af
|
c45b34bfd44d8607a2e8762c926e3cfaa7436201
|
/uexp/src/uexp/TDP.lean
|
5f39c4f3468dc1bec0b4a558ae32e79c7c484b54
|
[
"BSD-2-Clause"
] |
permissive
|
Shamrock-Frost/Cosette
|
b477c442c07e45082348a145f19ebb35a7f29392
|
24cbc4adebf627f13f5eac878f04ffa20d1209af
|
refs/heads/master
| 1,619,721,304,969
| 1,526,082,841,000
| 1,526,082,841,000
| 121,695,605
| 1
| 0
| null | 1,518,737,210,000
| 1,518,737,210,000
| null |
UTF-8
|
Lean
| false
| false
| 10,631
|
lean
|
import .u_semiring
import .cosette_tactics
import .ucongr
import .extra_constants
import .sql
section TDP
open tactic
meta def swap_ith_sigma_forward (i : nat)
: usr_sigma_repr → tactic unit
| ⟨xs, body⟩ := do
swapped_schemas ← list.swap_ith_forward i xs,
-- We have to subtract because the de Bruijn indices are inside-out
let num_free_vars := list.length xs,
let swapped_body := expr.swap_free_vars (num_free_vars - 1 - i) (num_free_vars - 1 - nat.succ i) body,
let swapped_repr := usr_sigma_repr.mk swapped_schemas swapped_body,
normal_expr ← sigma_repr_to_sigma_expr ⟨xs, body⟩,
swapped_expr ← sigma_repr_to_sigma_expr swapped_repr,
equality_lemma ← to_expr ``(%%normal_expr = %%swapped_expr),
eq_lemma_name ← mk_fresh_name,
tactic.assert eq_lemma_name equality_lemma,
repeat_n i $ applyc `congr_arg >> funext,
applyc `sig_commute,
eq_lemma ← resolve_name eq_lemma_name >>= to_expr,
rewrite_target eq_lemma,
clear eq_lemma
meta def move_once (i: nat) : tactic unit := do
lr ← get_lhs_sigma_repr,
swap_ith_sigma_forward i lr
meta def move_to_front (i : nat) : tactic unit :=
let loop : ℕ → tactic unit → tactic unit :=
λ iter_num next_iter, do
lr ← get_lhs_sigma_repr,
swap_ith_sigma_forward iter_num lr,
next_iter
in nat.repeat loop i $ return ()
--move back i to j
meta def move_sig_back (i: nat) (j: nat) :=
let loop: nat → tactic unit → tactic unit :=
λ num next, do
next,
move_once (i+num)
in nat.repeat loop (j-i) $ return ()
meta def break_p : expr → tactic (list expr)
| `(%%a ≃ %%b) :=
match a, b with
| `((%%a1, %%a2)) , `((%%b1, %%b2)) := do
x1 ← to_expr ``((%%a1 ≃ %%b1)),
x2 ← to_expr ``((%%a2 ≃ %%b2)),
l ← break_p x1,
r ← break_p x2,
return (l ++ r)
| _, _ := do x ← to_expr ``((%%a ≃ %%b)), return [x]
end
| x := return [x]
meta def split_p (ex : expr) : tactic (list expr) :=
match ex with
| `(%%a ≃ %%b) :=
match a, b with
| `((%%a1, %%a2)) , `((%%b1, %%b2)) := do
x1 ← to_expr ``((%%a).1 ≃ (%%b).1),
x2 ← to_expr ``((%%a).2 ≃ (%%b).2),
return [x1, x2]
| _, _ := return [ex]
end
| x := return [x]
end
meta def split_l (ex : expr) : tactic (list expr) :=
match ex with
| `(%%a ≃ %%b) :=
match a, b with
| _ , `((%%c, %%d)) := do
/-
ty ← infer_type a,
let args := expr.get_app_args ty,
(r1, r2) ← match args.nth 0 with
| some `(%%r1 ++ %%r2) := return (r1, r2)
| _ := failure
end,
trace (r1, r2),
ty2 ← to_expr ``((@cast %%ty (Tuple %%r1 × Tuple %%r2) (by tactic.reflexivity) (%%a)).1),
-/
x1 ← to_expr ``((%%a).1 ≃ %%c),
x2 ← to_expr ``((%%a).2 ≃ %%d),
return [x1, x2]
| _, _ := return [ex]
end
| x := return [x]
end
meta def split_r (ex : expr) : tactic (list expr) :=
match ex with
| `(%%a ≃ %%b) :=
match a, b with
| `((%%c, %%d)), _ := do
ty ← infer_type b,
let args := expr.get_app_args ty,
r ← match args.nth 0 with
| some `(%%r ++ _) := return r
| _ := failure
end,
ty2 ← to_expr ``((@cast %%ty (Tuple %%r × Tuple %%r) (by tactic.reflexivity) (%%b)).1),
x1 ← to_expr ``(%%c ≃ (%%b).1),
x2 ← to_expr ``(%%d ≃ (%%b).2),
return [x1, x2]
| _, _ := return [ex]
end
| x := return [x]
end
meta def flatmap_in_repr (f: expr → tactic (list expr)): list expr → tactic (list expr)
| [x] := f x
| (h::t) := do h' ← f h,
t' ← flatmap_in_repr t,
return (h' ++ t')
| [] := return []
meta def split_pair_in_repr (r: list expr) : tactic (list expr) := do
r1 ← flatmap_in_repr split_p r,
s' ← flatmap_in_repr split_l r1,
r ← flatmap_in_repr split_r s',
return r
meta def normalize_step (n: nat) : tactic unit := do
repeat_n n $ tactic.applyc `congr_arg >> tactic.funext,
split_pairs
-- normalize body of a sigma
meta def normalize_sig_body : tactic unit := do
`[try {unfold pair}],
lr ← get_lhs_sigma_repr,
lr_body_closed ← sigma_repr_to_closed_body_expr lr,
match lr_body_closed with
| ⟨body, names⟩ := do
le ← product_to_repr body,
s1 ← split_pair_in_repr le,
body' ← repr_to_product s1,
origin ← get_lhs,
let abstracted := expr.abstract_locals body' (list.reverse names),
new ← sigma_repr_to_sigma_expr ⟨lr.var_schemas, abstracted⟩,
eq_lemma ← tactic.to_expr ``(%%origin = %%new),
lemma_name ← tactic.mk_fresh_name,
tactic.assert lemma_name eq_lemma,
tactic.focus1 $ normalize_step lr.var_schemas.length,
tactic.try tactic.ac_refl,
eq_lemma_name ← tactic.resolve_name lemma_name >>= tactic.to_expr,
tactic.rewrite_target eq_lemma_name,
tactic.clear eq_lemma_name
end
-- a single step removal
meta def removal_step : tactic unit := do
lr ← get_lhs_sigma_repr,
match lr with
| ⟨xs, body⟩ := do
le ← ra_product_to_repr body,
match list.head le with
| `(%%a ≃ %%b) :=
match a, b with
| (expr.var n), e := do
let l := (list.length xs),
move_sig_back (l - 1 - n) (l - 1),
lr' ← get_lhs_sigma_repr,
match lr' with
| ⟨xs', body'⟩ := do
match body' with
| `((%%c ≃ %%d) * %%f) := do
let sub_expr := expr.subst_var' c d f,
let new_body := expr.lower_vars sub_expr 1 1,
old_expr ← sigma_repr_to_sigma_expr lr',
new_expr ← sigma_repr_to_sigma_expr ⟨list.take (l-1) xs', new_body⟩,
eq_lemma ← tactic.to_expr ``(%%old_expr = %%new_expr),
lemma_name ← tactic.mk_fresh_name,
tactic.assert lemma_name eq_lemma,
repeat_n (l-1) $ tactic.applyc `congr_arg >> tactic.funext,
tactic.applyc `sig_eq_subst_r,
eq_lemma_name ← tactic.resolve_name lemma_name >>= tactic.to_expr,
tactic.rewrite_target eq_lemma_name,
tactic.clear eq_lemma_name
| _ := tactic.fail "fail in removal step"
end
end
| e, (expr.var n) := do
let l := (list.length xs),
move_sig_back (l - 1 - n) (l - 1),
lr' ← get_lhs_sigma_repr,
match lr' with
| ⟨xs', body'⟩ := do
match body' with
| `((%%c ≃ %%d) * %%f) := do
let sub_expr := expr.subst_var' d c f,
let new_body := expr.lower_vars sub_expr 1 1,
old_expr ← sigma_repr_to_sigma_expr lr',
new_expr ← sigma_repr_to_sigma_expr ⟨list.take (l-1) xs', new_body⟩,
eq_lemma ← tactic.to_expr ``(%%old_expr = %%new_expr),
lemma_name ← tactic.mk_fresh_name,
tactic.assert lemma_name eq_lemma,
repeat_n (l-1) $ tactic.applyc `congr_arg >> tactic.funext,
tactic.applyc `sig_eq_subst_l,
eq_lemma_name ← tactic.resolve_name lemma_name >>= tactic.to_expr,
tactic.rewrite_target eq_lemma_name,
tactic.clear eq_lemma_name
| _ := tactic.fail "fail in removal step"
end
end
| _, _ := return ()
end
| _ := return ()
end
end
meta def remove_dup_sigs : tactic unit := do
-- this is a workround, this unnest 3 levels of pair
-- repeat_n 3 $ normalize_sig_body >> try dsimp_target,
repeat_n 3 normalize_sig_body,
lhs ← get_lhs,
s ← sig_body_size,
final ← let loop : ℕ → ℕ → (tactic ℕ) := λ s n, do
forward_i_to_j_in_sig n 0,
removal_step,
sig_body_size
in repeat_if_progress loop s s,
return ()
meta def is_sig (e: expr) : bool :=
match e with
| `(usr.sig _) := tt
| _ := ff
end
/- step 1: move sigs inside front -/
meta def unify_sigs_step_1: tactic unit := do
lr ← get_lhs_sigma_repr,
lr_body_closed ← sigma_repr_to_closed_body_expr lr,
match lr_body_closed with
| ⟨body, names⟩ := do
le ← product_to_repr body,
sl ← return $ list.qsort
(λ e1 e2, if is_sig e1 then tt else if is_sig e2 then ff else tt)
le,
body' ← repr_to_product sl,
origin ← get_lhs,
let abstracted := expr.abstract_locals body' (list.reverse names),
new ← sigma_repr_to_sigma_expr ⟨lr.var_schemas, abstracted⟩,
eq_lemma ← tactic.to_expr ``(%%origin = %%new),
lemma_name ← tactic.mk_fresh_name,
tactic.assert lemma_name eq_lemma,
repeat_n lr.var_schemas.length $ tactic.applyc `congr_arg >> tactic.funext,
tactic.try tactic.ac_refl,
eq_lemma_name ← tactic.resolve_name lemma_name >>= tactic.to_expr,
tactic.rewrite_target eq_lemma_name,
tactic.clear eq_lemma_name
end
/- step 2: distribute sumation -/
meta def unify_sigs_step_2: tactic unit := do
lr ← get_lhs_sigma_repr,
lr_body_closed ← sigma_repr_to_closed_body_expr lr,
match lr_body_closed with
| ⟨body, names⟩ := do
le ← product_to_repr body,
h ← return $ list.head le,
t ← return $ list.tail le,
if ¬ (is_sig h) then tactic.failed -- if there is no sig inside, terminate
else do
sr ← return $ sigma_expr_to_sigma_repr h,
(b, n) ← sigma_repr_to_closed_body_expr sr,
body' ← repr_to_product (b::t),
names' ← return $ (n ++ names),
let abstracted := expr.abstract_locals body' (list.reverse names'),
new ← sigma_repr_to_sigma_expr ⟨(lr.var_schemas) ++ (sr.var_schemas), abstracted⟩,
origin ← get_lhs,
eq_lemma ← tactic.to_expr ``(%%origin = %%new),
lemma_name ← tactic.mk_fresh_name,
tactic.assert lemma_name eq_lemma,
repeat_n lr.var_schemas.length $ tactic.applyc `congr_arg >> tactic.funext,
tactic.applyc `sig_distr_time_r,
eq_lemma_name ← tactic.resolve_name lemma_name >>= tactic.to_expr,
tactic.rewrite_target eq_lemma_name,
tactic.clear eq_lemma_name
end
meta def unify_sigs : tactic unit :=
tactic.repeat $ unify_sigs_step_1 >> unify_sigs_step_2
meta def TDP' (easy_case_solver : tactic unit) : tactic unit :=
let loop (iter_num : ℕ) (next_iter : tactic unit) : tactic unit :=
next_iter <|> do
move_to_front iter_num,
to_expr ``(congr_arg usr.sig) >>= apply,
funext,
easy_case_solver <|> TDP'
in do
unify_sigs,
remove_dup_sigs,
applyc `ueq_symm,
remove_dup_sigs,
num_vars ← list.length <$> usr_sigma_repr.var_schemas <$> get_lhs_sigma_repr,
nat.repeat loop num_vars easy_case_solver
meta def TDP := TDP' $ ac_refl <|> ucongr
end TDP
|
b062dbf5c724289e329f0a2e464d4e21d2ac6f1b
|
94e33a31faa76775069b071adea97e86e218a8ee
|
/src/computability/tm_to_partrec.lean
|
0e6f2ea8725c6080372c3ace044e74038462cddb
|
[
"Apache-2.0"
] |
permissive
|
urkud/mathlib
|
eab80095e1b9f1513bfb7f25b4fa82fa4fd02989
|
6379d39e6b5b279df9715f8011369a301b634e41
|
refs/heads/master
| 1,658,425,342,662
| 1,658,078,703,000
| 1,658,078,703,000
| 186,910,338
| 0
| 0
|
Apache-2.0
| 1,568,512,083,000
| 1,557,958,709,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 80,700
|
lean
|
/-
Copyright (c) 2020 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import computability.halting
import computability.turing_machine
import data.num.lemmas
import tactic.derive_fintype
/-!
# Modelling partial recursive functions using Turing machines
This file defines a simplified basis for partial recursive functions, and a `turing.TM2` model
Turing machine for evaluating these functions. This amounts to a constructive proof that every
`partrec` function can be evaluated by a Turing machine.
## Main definitions
* `to_partrec.code`: a simplified basis for partial recursive functions, valued in
`list ℕ →. list ℕ`.
* `to_partrec.code.eval`: semantics for a `to_partrec.code` program
* `partrec_to_TM2.tr`: A TM2 turing machine which can evaluate `code` programs
-/
open function (update)
open relation
namespace turing
/-!
## A simplified basis for partrec
This section constructs the type `code`, which is a data type of programs with `list ℕ` input and
output, with enough expressivity to write any partial recursive function. The primitives are:
* `zero'` appends a `0` to the input. That is, `zero' v = 0 :: v`.
* `succ` returns the successor of the head of the input, defaulting to zero if there is no head:
* `succ [] = [1]`
* `succ (n :: v) = [n + 1]`
* `tail` returns the tail of the input
* `tail [] = []`
* `tail (n :: v) = v`
* `cons f fs` calls `f` and `fs` on the input and conses the results:
* `cons f fs v = (f v).head :: fs v`
* `comp f g` calls `f` on the output of `g`:
* `comp f g v = f (g v)`
* `case f g` cases on the head of the input, calling `f` or `g` depending on whether it is zero or
a successor (similar to `nat.cases_on`).
* `case f g [] = f []`
* `case f g (0 :: v) = f v`
* `case f g (n+1 :: v) = g (n :: v)`
* `fix f` calls `f` repeatedly, using the head of the result of `f` to decide whether to call `f`
again or finish:
* `fix f v = []` if `f v = []`
* `fix f v = w` if `f v = 0 :: w`
* `fix f v = fix f w` if `f v = n+1 :: w` (the exact value of `n` is discarded)
This basis is convenient because it is closer to the Turing machine model - the key operations are
splitting and merging of lists of unknown length, while the messy `n`-ary composition operation
from the traditional basis for partial recursive functions is absent - but it retains a
compositional semantics. The first step in transitioning to Turing machines is to make a sequential
evaluator for this basis, which we take up in the next section.
-/
namespace to_partrec
/-- The type of codes for primitive recursive functions. Unlike `nat.partrec.code`, this uses a set
of operations on `list ℕ`. See `code.eval` for a description of the behavior of the primitives. -/
@[derive [decidable_eq, inhabited]]
inductive code
| zero'
| succ
| tail
| cons : code → code → code
| comp : code → code → code
| case : code → code → code
| fix : code → code
/-- The semantics of the `code` primitives, as partial functions `list ℕ →. list ℕ`. By convention
we functions that return a single result return a singleton `[n]`, or in some cases `n :: v` where
`v` will be ignored by a subsequent function.
* `zero'` appends a `0` to the input. That is, `zero' v = 0 :: v`.
* `succ` returns the successor of the head of the input, defaulting to zero if there is no head:
* `succ [] = [1]`
* `succ (n :: v) = [n + 1]`
* `tail` returns the tail of the input
* `tail [] = []`
* `tail (n :: v) = v`
* `cons f fs` calls `f` and `fs` on the input and conses the results:
* `cons f fs v = (f v).head :: fs v`
* `comp f g` calls `f` on the output of `g`:
* `comp f g v = f (g v)`
* `case f g` cases on the head of the input, calling `f` or `g` depending on whether it is zero or
a successor (similar to `nat.cases_on`).
* `case f g [] = f []`
* `case f g (0 :: v) = f v`
* `case f g (n+1 :: v) = g (n :: v)`
* `fix f` calls `f` repeatedly, using the head of the result of `f` to decide whether to call `f`
again or finish:
* `fix f v = []` if `f v = []`
* `fix f v = w` if `f v = 0 :: w`
* `fix f v = fix f w` if `f v = n+1 :: w` (the exact value of `n` is discarded)
-/
@[simp] def code.eval : code → list ℕ →. list ℕ
| code.zero' := λ v, pure (0 :: v)
| code.succ := λ v, pure [v.head.succ]
| code.tail := λ v, pure v.tail
| (code.cons f fs) := λ v, do n ← code.eval f v, ns ← code.eval fs v, pure (n.head :: ns)
| (code.comp f g) := λ v, g.eval v >>= f.eval
| (code.case f g) := λ v, v.head.elim (f.eval v.tail) (λ y _, g.eval (y :: v.tail))
| (code.fix f) := pfun.fix $ λ v, (f.eval v).map $ λ v,
if v.head = 0 then sum.inl v.tail else sum.inr v.tail
namespace code
/-- `nil` is the constant nil function: `nil v = []`. -/
def nil : code := tail.comp succ
@[simp] theorem nil_eval (v) : nil.eval v = pure [] := by simp [nil]
/-- `id` is the identity function: `id v = v`. -/
def id : code := tail.comp zero'
@[simp] theorem id_eval (v) : id.eval v = pure v := by simp [id]
/-- `head` gets the head of the input list: `head [] = [0]`, `head (n :: v) = [n]`. -/
def head : code := cons id nil
@[simp] theorem head_eval (v) : head.eval v = pure [v.head] := by simp [head]
/-- `zero` is the constant zero function: `zero v = [0]`. -/
def zero : code := cons zero' nil
@[simp] theorem zero_eval (v) : zero.eval v = pure [0] := by simp [zero]
/-- `pred` returns the predecessor of the head of the input:
`pred [] = [0]`, `pred (0 :: v) = [0]`, `pred (n+1 :: v) = [n]`. -/
def pred : code := case zero head
@[simp] theorem pred_eval (v) : pred.eval v = pure [v.head.pred] :=
by simp [pred]; cases v.head; simp
/-- `rfind f` performs the function of the `rfind` primitive of partial recursive functions.
`rfind f v` returns the smallest `n` such that `(f (n :: v)).head = 0`.
It is implemented as:
rfind f v = pred (fix (λ (n::v), f (n::v) :: n+1 :: v) (0 :: v))
The idea is that the initial state is `0 :: v`, and the `fix` keeps `n :: v` as its internal state;
it calls `f (n :: v)` as the exit test and `n+1 :: v` as the next state. At the end we get
`n+1 :: v` where `n` is the desired output, and `pred (n+1 :: v) = [n]` returns the result.
-/
def rfind (f : code) : code := comp pred $ comp (fix $ cons f $ cons succ tail) zero'
/-- `prec f g` implements the `prec` (primitive recursion) operation of partial recursive
functions. `prec f g` evaluates as:
* `prec f g [] = [f []]`
* `prec f g (0 :: v) = [f v]`
* `prec f g (n+1 :: v) = [g (n :: prec f g (n :: v) :: v)]`
It is implemented as:
G (a :: b :: IH :: v) = (b :: a+1 :: b-1 :: g (a :: IH :: v) :: v)
F (0 :: f_v :: v) = (f_v :: v)
F (n+1 :: f_v :: v) = (fix G (0 :: n :: f_v :: v)).tail.tail
prec f g (a :: v) = [(F (a :: f v :: v)).head]
Because `fix` always evaluates its body at least once, we must special case the `0` case to avoid
calling `g` more times than necessary (which could be bad if `g` diverges). If the input is
`0 :: v`, then `F (0 :: f v :: v) = (f v :: v)` so we return `[f v]`. If the input is `n+1 :: v`,
we evaluate the function from the bottom up, with initial state `0 :: n :: f v :: v`. The first
number counts up, providing arguments for the applications to `g`, while the second number counts
down, providing the exit condition (this is the initial `b` in the return value of `G`, which is
stripped by `fix`). After the `fix` is complete, the final state is `n :: 0 :: res :: v` where
`res` is the desired result, and the rest reduces this to `[res]`. -/
def prec (f g : code) : code :=
let G := cons tail $ cons succ $ cons (comp pred tail) $
cons (comp g $ cons id $ comp tail tail) $ comp tail $ comp tail tail in
let F := case id $ comp (comp (comp tail tail) (fix G)) zero' in
cons (comp F (cons head $ cons (comp f tail) tail)) nil
local attribute [-simp] part.bind_eq_bind part.map_eq_map part.pure_eq_some
theorem exists_code.comp {m n} {f : vector ℕ n →. ℕ} {g : fin n → vector ℕ m →. ℕ}
(hf : ∃ c : code, ∀ v : vector ℕ n, c.eval v.1 = pure <$> f v)
(hg : ∀ i, ∃ c : code, ∀ v : vector ℕ m, c.eval v.1 = pure <$> g i v) :
∃ c : code, ∀ v : vector ℕ m, c.eval v.1 = pure <$> (vector.m_of_fn (λ i, g i v) >>= f) :=
begin
suffices : ∃ c : code, ∀ v : vector ℕ m,
c.eval v.1 = subtype.val <$> vector.m_of_fn (λ i, g i v),
{ obtain ⟨cf, hf⟩ := hf, obtain ⟨cg, hg⟩ := this,
exact ⟨cf.comp cg, λ v,
by { simp [hg, hf, map_bind, seq_bind_eq, (∘), -subtype.val_eq_coe], refl }⟩ },
clear hf f, induction n with n IH,
{ exact ⟨nil, λ v, by simp [vector.m_of_fn]; refl⟩ },
{ obtain ⟨cg, hg₁⟩ := hg 0, obtain ⟨cl, hl⟩ := IH (λ i, hg i.succ),
exact ⟨cons cg cl, λ v, by { simp [vector.m_of_fn, hg₁, map_bind,
seq_bind_eq, bind_assoc, (∘), hl, -subtype.val_eq_coe], refl }⟩ },
end
theorem exists_code {n} {f : vector ℕ n →. ℕ} (hf : nat.partrec' f) :
∃ c : code, ∀ v : vector ℕ n, c.eval v.1 = pure <$> f v :=
begin
induction hf with n f hf,
induction hf,
case prim zero { exact ⟨zero', λ ⟨[], _⟩, rfl⟩ },
case prim succ { exact ⟨succ, λ ⟨[v], _⟩, rfl⟩ },
case prim nth : n i
{ refine fin.succ_rec (λ n, _) (λ n i IH, _) i,
{ exact ⟨head, λ ⟨list.cons a as, _⟩, by simp; refl⟩ },
{ obtain ⟨c, h⟩ := IH,
exact ⟨c.comp tail, λ v, by simpa [← vector.nth_tail] using h v.tail⟩ } },
case prim comp : m n f g hf hg IHf IHg
{ simpa [part.bind_eq_bind] using exists_code.comp IHf IHg },
case prim prec : n f g hf hg IHf IHg
{ obtain ⟨cf, hf⟩ := IHf, obtain ⟨cg, hg⟩ := IHg,
simp only [part.map_eq_map, part.map_some, pfun.coe_val] at hf hg,
refine ⟨prec cf cg, λ v, _⟩, rw ← v.cons_head_tail,
specialize hf v.tail, replace hg := λ a b, hg (a ::ᵥ b ::ᵥ v.tail),
simp only [vector.cons_val, vector.tail_val] at hf hg,
simp only [part.map_eq_map, part.map_some, vector.cons_val,
vector.cons_tail, vector.cons_head, pfun.coe_val, vector.tail_val],
simp only [← part.pure_eq_some] at hf hg ⊢,
induction v.head with n IH; simp [prec, hf, bind_assoc, ← part.map_eq_map,
← bind_pure_comp_eq_map, show ∀ x, pure x = [x], from λ _, rfl, -subtype.val_eq_coe],
suffices : ∀ a b, a + b = n →
(n.succ :: 0 :: g
(n ::ᵥ (nat.elim (f v.tail) (λ y IH, g (y ::ᵥ IH ::ᵥ v.tail)) n) ::ᵥ v.tail)
:: v.val.tail : list ℕ) ∈
pfun.fix (λ v : list ℕ, do
x ← cg.eval (v.head :: v.tail.tail),
pure $ if v.tail.head = 0
then sum.inl (v.head.succ :: v.tail.head.pred :: x.head :: v.tail.tail.tail : list ℕ)
else sum.inr (v.head.succ :: v.tail.head.pred :: x.head :: v.tail.tail.tail))
(a :: b :: nat.elim (f v.tail)
(λ y IH, g (y ::ᵥ IH ::ᵥ v.tail)) a :: v.val.tail),
{ rw (_ : pfun.fix _ _ = pure _), swap, exact part.eq_some_iff.2 (this 0 n (zero_add n)),
simp only [list.head, pure_bind, list.tail_cons] },
intros a b e, induction b with b IH generalizing a e,
{ refine pfun.mem_fix_iff.2 (or.inl $ part.eq_some_iff.1 _),
simp only [hg, ← e, pure_bind, list.tail_cons], refl },
{ refine pfun.mem_fix_iff.2 (or.inr ⟨_, _, IH (a+1) (by rwa add_right_comm)⟩),
simp only [hg, eval, pure_bind, nat.elim_succ, list.tail],
exact part.mem_some_iff.2 rfl } },
case comp : m n f g hf hg IHf IHg { exact exists_code.comp IHf IHg },
case rfind : n f hf IHf
{ obtain ⟨cf, hf⟩ := IHf, refine ⟨rfind cf, λ v, _⟩,
replace hf := λ a, hf (a ::ᵥ v),
simp only [part.map_eq_map, part.map_some, vector.cons_val, pfun.coe_val,
show ∀ x, pure x = [x], from λ _, rfl] at hf ⊢,
refine part.ext (λ x, _),
simp only [rfind, part.bind_eq_bind, part.pure_eq_some, part.map_eq_map,
part.bind_some, exists_prop, eval, list.head, pred_eval, part.map_some,
bool.ff_eq_to_bool_iff, part.mem_bind_iff, list.length,
part.mem_map_iff, nat.mem_rfind, list.tail, bool.tt_eq_to_bool_iff,
part.mem_some_iff, part.map_bind],
split,
{ rintro ⟨v', h1, rfl⟩,
suffices : ∀ (v₁ : list ℕ), v' ∈ pfun.fix
(λ v, (cf.eval v).bind $ λ y, part.some $ if y.head = 0 then
sum.inl (v.head.succ :: v.tail) else sum.inr (v.head.succ :: v.tail)) v₁ →
∀ n, v₁ = n :: v.val → (∀ m < n, ¬f (m ::ᵥ v) = 0) →
(∃ (a : ℕ), (f (a ::ᵥ v) = 0 ∧ ∀ {m : ℕ}, m < a → ¬f (m ::ᵥ v) = 0) ∧
[a] = [v'.head.pred]),
{ exact this _ h1 0 rfl (by rintro _ ⟨⟩) },
clear h1, intros v₀ h1,
refine pfun.fix_induction h1 (λ v₁ h2 IH, _), clear h1,
rintro n rfl hm,
have := pfun.mem_fix_iff.1 h2,
simp only [hf, part.bind_some] at this,
split_ifs at this,
{ simp only [list.head, exists_false, or_false, part.mem_some_iff,
list.tail_cons, false_and] at this,
subst this, exact ⟨_, ⟨h, hm⟩, rfl⟩ },
{ refine IH (n.succ :: v.val) (by simp * at *) _ rfl (λ m h', _),
obtain h|rfl := nat.lt_succ_iff_lt_or_eq.1 h', exacts [hm _ h, h] } },
{ rintro ⟨n, ⟨hn, hm⟩, rfl⟩, refine ⟨n.succ :: v.1, _, rfl⟩,
have : (n.succ :: v.1 : list ℕ) ∈ pfun.fix
(λ v, (cf.eval v).bind $ λ y, part.some $ if y.head = 0 then
sum.inl (v.head.succ :: v.tail) else sum.inr (v.head.succ :: v.tail)) (n :: v.val) :=
pfun.mem_fix_iff.2 (or.inl (by simp [hf, hn, -subtype.val_eq_coe])),
generalize_hyp : (n.succ :: v.1 : list ℕ) = w at this ⊢, clear hn,
induction n with n IH, {exact this},
refine IH (λ m h', hm (nat.lt_succ_of_lt h')) (pfun.mem_fix_iff.2 (or.inr ⟨_, _, this⟩)),
simp only [hf, hm n.lt_succ_self, part.bind_some, list.head, eq_self_iff_true,
if_false, part.mem_some_iff, and_self, list.tail_cons] } }
end
end code
/-!
## From compositional semantics to sequential semantics
Our initial sequential model is designed to be as similar as possible to the compositional
semantics in terms of its primitives, but it is a sequential semantics, meaning that rather than
defining an `eval c : list ℕ →. list ℕ` function for each program, defined by recursion on
programs, we have a type `cfg` with a step function `step : cfg → option cfg` that provides a
deterministic evaluation order. In order to do this, we introduce the notion of a *continuation*,
which can be viewed as a `code` with a hole in it where evaluation is currently taking place.
Continuations can be assigned a `list ℕ →. list ℕ` semantics as well, with the interpretation
being that given a `list ℕ` result returned from the code in the hole, the remainder of the
program will evaluate to a `list ℕ` final value.
The continuations are:
* `halt`: the empty continuation: the hole is the whole program, whatever is returned is the
final result. In our notation this is just `_`.
* `cons₁ fs v k`: evaluating the first part of a `cons`, that is `k (_ :: fs v)`, where `k` is the
outer continuation.
* `cons₂ ns k`: evaluating the second part of a `cons`: `k (ns.head :: _)`. (Technically we don't
need to hold on to all of `ns` here since we are already committed to taking the head, but this
is more regular.)
* `comp f k`: evaluating the first part of a composition: `k (f _)`.
* `fix f k`: waiting for the result of `f` in a `fix f` expression:
`k (if _.head = 0 then _.tail else fix f (_.tail))`
The type `cfg` of evaluation states is:
* `ret k v`: we have received a result, and are now evaluating the continuation `k` with result
`v`; that is, `k v` where `k` is ready to evaluate.
* `halt v`: we are done and the result is `v`.
The main theorem of this section is that for each code `c`, the state `step_normal c halt v` steps
to `v'` in finitely many steps if and only if `code.eval c v = some v'`.
-/
/-- The type of continuations, built up during evaluation of a `code` expression. -/
@[derive inhabited]
inductive cont
| halt
| cons₁ : code → list ℕ → cont → cont
| cons₂ : list ℕ → cont → cont
| comp : code → cont → cont
| fix : code → cont → cont
/-- The semantics of a continuation. -/
def cont.eval : cont → list ℕ →. list ℕ
| cont.halt := pure
| (cont.cons₁ fs as k) := λ v, do ns ← code.eval fs as, cont.eval k (v.head :: ns)
| (cont.cons₂ ns k) := λ v, cont.eval k (ns.head :: v)
| (cont.comp f k) := λ v, code.eval f v >>= cont.eval k
| (cont.fix f k) := λ v, if v.head = 0 then k.eval v.tail else f.fix.eval v.tail >>= k.eval
/-- The set of configurations of the machine:
* `halt v`: The machine is about to stop and `v : list ℕ` is the result.
* `ret k v`: The machine is about to pass `v : list ℕ` to continuation `k : cont`.
We don't have a state corresponding to normal evaluation because these are evaluated immediately
to a `ret` "in zero steps" using the `step_normal` function. -/
@[derive inhabited]
inductive cfg
| halt : list ℕ → cfg
| ret : cont → list ℕ → cfg
/-- Evaluating `c : code` in a continuation `k : cont` and input `v : list ℕ`. This goes by
recursion on `c`, building an augmented continuation and a value to pass to it.
* `zero' v = 0 :: v` evaluates immediately, so we return it to the parent continuation
* `succ v = [v.head.succ]` evaluates immediately, so we return it to the parent continuation
* `tail v = v.tail` evaluates immediately, so we return it to the parent continuation
* `cons f fs v = (f v).head :: fs v` requires two sub-evaluations, so we evaluate
`f v` in the continuation `k (_.head :: fs v)` (called `cont.cons₁ fs v k`)
* `comp f g v = f (g v)` requires two sub-evaluations, so we evaluate
`g v` in the continuation `k (f _)` (called `cont.comp f k`)
* `case f g v = v.head.cases_on (f v.tail) (λ n, g (n :: v.tail))` has the information needed to
evaluate the case statement, so we do that and transition to either `f v` or `g (n :: v.tail)`.
* `fix f v = let v' := f v in if v'.head = 0 then k v'.tail else fix f v'.tail`
needs to first evaluate `f v`, so we do that and leave the rest for the continuation (called
`cont.fix f k`)
-/
def step_normal : code → cont → list ℕ → cfg
| code.zero' k v := cfg.ret k (0 :: v)
| code.succ k v := cfg.ret k [v.head.succ]
| code.tail k v := cfg.ret k v.tail
| (code.cons f fs) k v := step_normal f (cont.cons₁ fs v k) v
| (code.comp f g) k v := step_normal g (cont.comp f k) v
| (code.case f g) k v :=
v.head.elim (step_normal f k v.tail) (λ y _, step_normal g k (y :: v.tail))
| (code.fix f) k v := step_normal f (cont.fix f k) v
/-- Evaluating a continuation `k : cont` on input `v : list ℕ`. This is the second part of
evaluation, when we receive results from continuations built by `step_normal`.
* `cont.halt v = v`, so we are done and transition to the `cfg.halt v` state
* `cont.cons₁ fs as k v = k (v.head :: fs as)`, so we evaluate `fs as` now with the continuation
`k (v.head :: _)` (called `cons₂ v k`).
* `cont.cons₂ ns k v = k (ns.head :: v)`, where we now have everything we need to evaluate
`ns.head :: v`, so we return it to `k`.
* `cont.comp f k v = k (f v)`, so we call `f v` with `k` as the continuation.
* `cont.fix f k v = k (if v.head = 0 then k v.tail else fix f v.tail)`, where `v` is a value,
so we evaluate the if statement and either call `k` with `v.tail`, or call `fix f v` with `k` as
the continuation (which immediately calls `f` with `cont.fix f k` as the continuation).
-/
def step_ret : cont → list ℕ → cfg
| cont.halt v := cfg.halt v
| (cont.cons₁ fs as k) v := step_normal fs (cont.cons₂ v k) as
| (cont.cons₂ ns k) v := step_ret k (ns.head :: v)
| (cont.comp f k) v := step_normal f k v
| (cont.fix f k) v := if v.head = 0 then step_ret k v.tail else
step_normal f (cont.fix f k) v.tail
/-- If we are not done (in `cfg.halt` state), then we must be still stuck on a continuation, so
this main loop calls `step_ret` with the new continuation. The overall `step` function transitions
from one `cfg` to another, only halting at the `cfg.halt` state. -/
def step : cfg → option cfg
| (cfg.halt _) := none
| (cfg.ret k v) := some (step_ret k v)
/-- In order to extract a compositional semantics from the sequential execution behavior of
configurations, we observe that continuations have a monoid structure, with `cont.halt` as the unit
and `cont.then` as the multiplication. `cont.then k₁ k₂` runs `k₁` until it halts, and then takes
the result of `k₁` and passes it to `k₂`.
We will not prove it is associative (although it is), but we are instead interested in the
associativity law `k₂ (eval c k₁) = eval c (k₁.then k₂)`. This holds at both the sequential and
compositional levels, and allows us to express running a machine without the ambient continuation
and relate it to the original machine's evaluation steps. In the literature this is usually
where one uses Turing machines embedded inside other Turing machines, but this approach allows us
to avoid changing the ambient type `cfg` in the middle of the recursion.
-/
def cont.then : cont → cont → cont
| cont.halt k' := k'
| (cont.cons₁ fs as k) k' := cont.cons₁ fs as (k.then k')
| (cont.cons₂ ns k) k' := cont.cons₂ ns (k.then k')
| (cont.comp f k) k' := cont.comp f (k.then k')
| (cont.fix f k) k' := cont.fix f (k.then k')
theorem cont.then_eval {k k' : cont} {v} : (k.then k').eval v = k.eval v >>= k'.eval :=
begin
induction k generalizing v; simp only [cont.eval, cont.then, bind_assoc, pure_bind, *],
{ simp only [← k_ih] },
{ split_ifs; [refl, simp only [← k_ih, bind_assoc]] }
end
/-- The `then k` function is a "configuration homomorphism". Its operation on states is to append
`k` to the continuation of a `cfg.ret` state, and to run `k` on `v` if we are in the `cfg.halt v`
state. -/
def cfg.then : cfg → cont → cfg
| (cfg.halt v) k' := step_ret k' v
| (cfg.ret k v) k' := cfg.ret (k.then k') v
/-- The `step_normal` function respects the `then k'` homomorphism. Note that this is an exact
equality, not a simulation; the original and embedded machines move in lock-step until the
embedded machine reaches the halt state. -/
theorem step_normal_then (c) (k k' : cont) (v) :
step_normal c (k.then k') v = (step_normal c k v).then k' :=
begin
induction c with generalizing k v;
simp only [cont.then, step_normal, cfg.then, *] {constructor_eq := ff},
case turing.to_partrec.code.cons : c c' ih ih'
{ rw [← ih, cont.then] },
case turing.to_partrec.code.comp : c c' ih ih'
{ rw [← ih', cont.then] },
{ cases v.head; simp only [nat.elim] },
case turing.to_partrec.code.fix : c ih
{ rw [← ih, cont.then] },
end
/-- The `step_ret` function respects the `then k'` homomorphism. Note that this is an exact
equality, not a simulation; the original and embedded machines move in lock-step until the
embedded machine reaches the halt state. -/
theorem step_ret_then {k k' : cont} {v} :
step_ret (k.then k') v = (step_ret k v).then k' :=
begin
induction k generalizing v;
simp only [cont.then, step_ret, cfg.then, *],
{ rw ← step_normal_then, refl },
{ rw ← step_normal_then },
{ split_ifs, {rw ← k_ih}, {rw ← step_normal_then, refl} },
end
/-- This is a temporary definition, because we will prove in `code_is_ok` that it always holds.
It asserts that `c` is semantically correct; that is, for any `k` and `v`,
`eval (step_normal c k v) = eval (cfg.ret k (code.eval c v))`, as an equality of partial values
(so one diverges iff the other does).
In particular, we can let `k = cont.halt`, and then this asserts that `step_normal c cont.halt v`
evaluates to `cfg.halt (code.eval c v)`. -/
def code.ok (c : code) :=
∀ k v, eval step (step_normal c k v) = code.eval c v >>= λ v, eval step (cfg.ret k v)
theorem code.ok.zero {c} (h : code.ok c) {v} :
eval step (step_normal c cont.halt v) = cfg.halt <$> code.eval c v :=
begin
rw [h, ← bind_pure_comp_eq_map], congr, funext v,
exact part.eq_some_iff.2 (mem_eval.2 ⟨refl_trans_gen.single rfl, rfl⟩),
end
theorem step_normal.is_ret (c k v) : ∃ k' v', step_normal c k v = cfg.ret k' v' :=
begin
induction c generalizing k v,
iterate 3 { exact ⟨_, _, rfl⟩ },
case cons : f fs IHf IHfs { apply IHf },
case comp : f g IHf IHg { apply IHg },
case case : f g IHf IHg
{ rw step_normal, cases v.head; simp only [nat.elim]; [apply IHf, apply IHg] },
case fix : f IHf { apply IHf },
end
theorem cont_eval_fix {f k v} (fok : code.ok f) :
eval step (step_normal f (cont.fix f k) v) = f.fix.eval v >>= λ v, eval step (cfg.ret k v) :=
begin
refine part.ext (λ x, _),
simp only [part.bind_eq_bind, part.mem_bind_iff],
split,
{ suffices :
∀ c, x ∈ eval step c →
∀ v c', c = cfg.then c' (cont.fix f k) → reaches step (step_normal f cont.halt v) c' →
∃ v₁ ∈ f.eval v,
∃ v₂ ∈ (if list.head v₁ = 0 then pure v₁.tail else f.fix.eval v₁.tail),
x ∈ eval step (cfg.ret k v₂),
{ intro h,
obtain ⟨v₁, hv₁, v₂, hv₂, h₃⟩ :=
this _ h _ _ (step_normal_then _ cont.halt _ _) refl_trans_gen.refl,
refine ⟨v₂, pfun.mem_fix_iff.2 _, h₃⟩,
simp only [part.eq_some_iff.2 hv₁, part.map_some],
split_ifs at hv₂ ⊢,
{ rw part.mem_some_iff.1 hv₂, exact or.inl (part.mem_some _) },
{ exact or.inr ⟨_, part.mem_some _, hv₂⟩ } },
refine λ c he, eval_induction he (λ y h IH, _),
rintro v (⟨v'⟩ | ⟨k',v'⟩) rfl hr; rw cfg.then at h IH,
{ have := mem_eval.2 ⟨hr, rfl⟩,
rw [fok, part.bind_eq_bind, part.mem_bind_iff] at this,
obtain ⟨v'', h₁, h₂⟩ := this,
rw reaches_eval at h₂, swap, exact refl_trans_gen.single rfl,
cases part.mem_unique h₂ (mem_eval.2 ⟨refl_trans_gen.refl, rfl⟩),
refine ⟨v', h₁, _⟩, rw [step_ret] at h,
revert h, by_cases he : v'.head = 0; simp only [exists_prop, if_pos, if_false, he]; intro h,
{ refine ⟨_, part.mem_some _, _⟩,
rw reaches_eval, exact h, exact refl_trans_gen.single rfl },
{ obtain ⟨k₀, v₀, e₀⟩ := step_normal.is_ret f cont.halt v'.tail,
have e₁ := step_normal_then f cont.halt (cont.fix f k) v'.tail,
rw [e₀, cont.then, cfg.then] at e₁,
obtain ⟨v₁, hv₁, v₂, hv₂, h₃⟩ :=
IH (step_ret (k₀.then (cont.fix f k)) v₀) _ v'.tail _ step_ret_then _,
{ refine ⟨_, pfun.mem_fix_iff.2 _, h₃⟩,
simp only [part.eq_some_iff.2 hv₁, part.map_some, part.mem_some_iff],
split_ifs at hv₂ ⊢; [exact or.inl (part.mem_some_iff.1 hv₂),
exact or.inr ⟨_, rfl, hv₂⟩] },
{ rw [step_ret, if_neg he, e₁], refl },
{ apply refl_trans_gen.single, rw e₀, exact rfl } } },
{ exact IH _ rfl _ _ step_ret_then (refl_trans_gen.tail hr rfl) } },
{ rintro ⟨v', he, hr⟩,
rw reaches_eval at hr, swap, exact refl_trans_gen.single rfl,
refine pfun.fix_induction he (λ v (he : v' ∈ f.fix.eval v) IH, _),
rw [fok, part.bind_eq_bind, part.mem_bind_iff],
obtain he | ⟨v'', he₁', _⟩ := pfun.mem_fix_iff.1 he,
{ obtain ⟨v', he₁, he₂⟩ := (part.mem_map_iff _).1 he, split_ifs at he₂; cases he₂,
refine ⟨_, he₁, _⟩,
rw reaches_eval, swap, exact refl_trans_gen.single rfl,
rwa [step_ret, if_pos h] },
{ obtain ⟨v₁, he₁, he₂⟩ := (part.mem_map_iff _).1 he₁', split_ifs at he₂; cases he₂,
clear he₂ he₁',
refine ⟨_, he₁, _⟩,
rw reaches_eval, swap, exact refl_trans_gen.single rfl,
rwa [step_ret, if_neg h],
exact IH v₁.tail ((part.mem_map_iff _).2 ⟨_, he₁, if_neg h⟩) } }
end
theorem code_is_ok (c) : code.ok c :=
begin
induction c; intros k v; rw step_normal,
iterate 3 { simp only [code.eval, pure_bind] },
case cons : f fs IHf IHfs
{ rw [code.eval, IHf],
simp only [bind_assoc, cont.eval, pure_bind], congr, funext v,
rw [reaches_eval], swap, exact refl_trans_gen.single rfl,
rw [step_ret, IHfs], congr, funext v',
refine eq.trans _ (eq.symm _);
try {exact reaches_eval (refl_trans_gen.single rfl)} },
case comp : f g IHf IHg
{ rw [code.eval, IHg],
simp only [bind_assoc, cont.eval, pure_bind], congr, funext v,
rw [reaches_eval], swap, exact refl_trans_gen.single rfl,
rw [step_ret, IHf] },
case case : f g IHf IHg
{ simp only [code.eval], cases v.head; simp only [nat.elim, code.eval];
[apply IHf, apply IHg] },
case fix : f IHf { rw cont_eval_fix IHf },
end
theorem step_normal_eval (c v) : eval step (step_normal c cont.halt v) = cfg.halt <$> c.eval v :=
(code_is_ok c).zero
theorem step_ret_eval {k v} : eval step (step_ret k v) = cfg.halt <$> k.eval v :=
begin
induction k generalizing v,
case halt :
{ simp only [mem_eval, cont.eval, map_pure],
exact part.eq_some_iff.2 (mem_eval.2 ⟨refl_trans_gen.refl, rfl⟩) },
case cons₁ : fs as k IH
{ rw [cont.eval, step_ret, code_is_ok],
simp only [← bind_pure_comp_eq_map, bind_assoc], congr, funext v',
rw [reaches_eval], swap, exact refl_trans_gen.single rfl,
rw [step_ret, IH, bind_pure_comp_eq_map] },
case cons₂ : ns k IH { rw [cont.eval, step_ret], exact IH },
case comp : f k IH
{ rw [cont.eval, step_ret, code_is_ok],
simp only [← bind_pure_comp_eq_map, bind_assoc], congr, funext v',
rw [reaches_eval], swap, exact refl_trans_gen.single rfl,
rw [IH, bind_pure_comp_eq_map] },
case fix : f k IH
{ rw [cont.eval, step_ret], simp only [bind_pure_comp_eq_map],
split_ifs, { exact IH },
simp only [← bind_pure_comp_eq_map, bind_assoc, cont_eval_fix (code_is_ok _)],
congr, funext, rw [bind_pure_comp_eq_map, ← IH],
exact reaches_eval (refl_trans_gen.single rfl) },
end
end to_partrec
/-!
## Simulating sequentialized partial recursive functions in TM2
At this point we have a sequential model of partial recursive functions: the `cfg` type and
`step : cfg → option cfg` function from the previous section. The key feature of this model is that
it does a finite amount of computation (in fact, an amount which is statically bounded by the size
of the program) between each step, and no individual step can diverge (unlike the compositional
semantics, where every sub-part of the computation is potentially divergent). So we can utilize the
same techniques as in the other TM simulations in `computability.turing_machine` to prove that
each step corresponds to a finite number of steps in a lower level model. (We don't prove it here,
but in anticipation of the complexity class P, the simulation is actually polynomial-time as well.)
The target model is `turing.TM2`, which has a fixed finite set of stacks, a bit of local storage,
with programs selected from a potentially infinite (but finitely accessible) set of program
positions, or labels `Λ`, each of which executes a finite sequence of basic stack commands.
For this program we will need four stacks, each on an alphabet `Γ'` like so:
inductive Γ' | Cons | cons | bit0 | bit1
We represent a number as a bit sequence, lists of numbers by putting `cons` after each element, and
lists of lists of natural numbers by putting `Cons` after each list. For example:
0 ~> []
1 ~> [bit1]
6 ~> [bit0, bit1, bit1]
[1, 2] ~> [bit1, cons, bit0, bit1, cons]
[[], [1, 2]] ~> [Cons, bit1, cons, bit0, bit1, cons, Cons]
The four stacks are `main`, `rev`, `aux`, `stack`. In normal mode, `main` contains the input to the
current program (a `list ℕ`) and `stack` contains data (a `list (list ℕ)`) associated to the
current continuation, and in `ret` mode `main` contains the value that is being passed to the
continuation and `stack` contains the data for the continuation. The `rev` and `aux` stacks are
usually empty; `rev` is used to store reversed data when e.g. moving a value from one stack to
another, while `aux` is used as a temporary for a `main`/`stack` swap that happens during `cons₁`
evaluation.
The only local store we need is `option Γ'`, which stores the result of the last pop
operation. (Most of our working data are natural numbers, which are too large to fit in the local
store.)
The continuations from the previous section are data-carrying, containing all the values that have
been computed and are awaiting other arguments. In order to have only a finite number of
continuations appear in the program so that they can be used in machine states, we separate the
data part (anything with type `list ℕ`) from the `cont` type, producing a `cont'` type that lacks
this information. The data is kept on the `stack` stack.
Because we want to have subroutines for e.g. moving an entire stack to another place, we use an
infinite inductive type `Λ'` so that we can execute a program and then return to do something else
without having to define too many different kinds of intermediate states. (We must nevertheless
prove that only finitely many labels are accessible.) The labels are:
* `move p k₁ k₂ q`: move elements from stack `k₁` to `k₂` while `p` holds of the value being moved.
The last element, that fails `p`, is placed in neither stack but left in the local store.
At the end of the operation, `k₂` will have the elements of `k₁` in reverse order. Then do `q`.
* `clear p k q`: delete elements from stack `k` until `p` is true. Like `move`, the last element is
left in the local storage. Then do `q`.
* `copy q`: Move all elements from `rev` to both `main` and `stack` (in reverse order),
then do `q`. That is, it takes `(a, b, c, d)` to `(b.reverse ++ a, [], c, b.reverse ++ d)`.
* `push k f q`: push `f s`, where `s` is the local store, to stack `k`, then do `q`. This is a
duplicate of the `push` instruction that is part of the TM2 model, but by having a subroutine
just for this purpose we can build up programs to execute inside a `goto` statement, where we
have the flexibility to be general recursive.
* `read (f : option Γ' → Λ')`: go to state `f s` where `s` is the local store. Again this is only
here for convenience.
* `succ q`: perform a successor operation. Assuming `[n]` is encoded on `main` before,
`[n+1]` will be on main after. This implements successor for binary natural numbers.
* `pred q₁ q₂`: perform a predecessor operation or `case` statement. If `[]` is encoded on
`main` before, then we transition to `q₁` with `[]` on main; if `(0 :: v)` is on `main` before
then `v` will be on `main` after and we transition to `q₁`; and if `(n+1 :: v)` is on `main`
before then `n :: v` will be on `main` after and we transition to `q₂`.
* `ret k`: call continuation `k`. Each continuation has its own interpretation of the data in
`stack` and sets up the data for the next continuation.
* `ret (cons₁ fs k)`: `v :: k_data` on `stack` and `ns` on `main`, and the next step expects
`v` on `main` and `ns :: k_data` on `stack`. So we have to do a little dance here with six
reverse-moves using the `aux` stack to perform a three-point swap, each of which involves two
reversals.
* `ret (cons₂ k)`: `ns :: k_data` is on `stack` and `v` is on `main`, and we have to put
`ns.head :: v` on `main` and `k_data` on `stack`. This is done using the `head` subroutine.
* `ret (fix f k)`: This stores no data, so we just check if `main` starts with `0` and
if so, remove it and call `k`, otherwise `clear` the first value and call `f`.
* `ret halt`: the stack is empty, and `main` has the output. Do nothing and halt.
In addition to these basic states, we define some additional subroutines that are used in the
above:
* `push'`, `peek'`, `pop'` are special versions of the builtins that use the local store to supply
inputs and outputs.
* `unrev`: special case `move ff rev main` to move everything from `rev` back to `main`. Used as a
cleanup operation in several functions.
* `move_excl p k₁ k₂ q`: same as `move` but pushes the last value read back onto the source stack.
* `move₂ p k₁ k₂ q`: double `move`, so that the result comes out in the right order at the target
stack. Implemented as `move_excl p k rev; move ff rev k₂`. Assumes that neither `k₁` nor `k₂` is
`rev` and `rev` is initially empty.
* `head k q`: get the first natural number from stack `k` and reverse-move it to `rev`, then clear
the rest of the list at `k` and then `unrev` to reverse-move the head value to `main`. This is
used with `k = main` to implement regular `head`, i.e. if `v` is on `main` before then `[v.head]`
will be on `main` after; and also with `k = stack` for the `cons` operation, which has `v` on
`main` and `ns :: k_data` on `stack`, and results in `k_data` on `stack` and `ns.head :: v` on
`main`.
* `tr_normal` is the main entry point, defining states that perform a given `code` computation.
It mostly just dispatches to functions written above.
The main theorem of this section is `tr_eval`, which asserts that for each that for each code `c`,
the state `init c v` steps to `halt v'` in finitely many steps if and only if
`code.eval c v = some v'`.
-/
namespace partrec_to_TM2
section
open to_partrec
/-- The alphabet for the stacks in the program. `bit0` and `bit1` are used to represent `ℕ` values
as lists of binary digits, `cons` is used to separate `list ℕ` values, and `Cons` is used to
separate `list (list ℕ)` values. See the section documentation. -/
@[derive [decidable_eq, inhabited, fintype]]
inductive Γ' | Cons | cons | bit0 | bit1
/-- The four stacks used by the program. `main` is used to store the input value in `tr_normal`
mode and the output value in `Λ'.ret` mode, while `stack` is used to keep all the data for the
continuations. `rev` is used to store reversed lists when transferring values between stacks, and
`aux` is only used once in `cons₁`. See the section documentation. -/
@[derive [decidable_eq, inhabited]]
inductive K' | main | rev | aux | stack
open K'
/-- Continuations as in `to_partrec.cont` but with the data removed. This is done because we want
the set of all continuations in the program to be finite (so that it can ultimately be encoded into
the finite state machine of a Turing machine), but a continuation can handle a potentially infinite
number of data values during execution. -/
@[derive [decidable_eq, inhabited]]
inductive cont'
| halt
| cons₁ : code → cont' → cont'
| cons₂ : cont' → cont'
| comp : code → cont' → cont'
| fix : code → cont' → cont'
/-- The set of program positions. We make extensive use of inductive types here to let us describe
"subroutines"; for example `clear p k q` is a program that clears stack `k`, then does `q` where
`q` is another label. In order to prevent this from resulting in an infinite number of distinct
accessible states, we are careful to be non-recursive (although loops are okay). See the section
documentation for a description of all the programs. -/
inductive Λ'
| move (p : Γ' → bool) (k₁ k₂ : K') (q : Λ')
| clear (p : Γ' → bool) (k : K') (q : Λ')
| copy (q : Λ')
| push (k : K') (s : option Γ' → option Γ') (q : Λ')
| read (f : option Γ' → Λ')
| succ (q : Λ')
| pred (q₁ q₂ : Λ')
| ret (k : cont')
instance : inhabited Λ' := ⟨Λ'.ret cont'.halt⟩
instance : decidable_eq Λ' :=
λ a b, begin
induction a generalizing b; cases b; try { apply decidable.is_false, rintro ⟨⟨⟩⟩, done },
all_goals { exactI decidable_of_iff' _ (by simp [function.funext_iff]) },
end
/-- The type of TM2 statements used by this machine. -/
@[derive inhabited]
def stmt' := TM2.stmt (λ _:K', Γ') Λ' (option Γ')
/-- The type of TM2 configurations used by this machine. -/
@[derive inhabited]
def cfg' := TM2.cfg (λ _:K', Γ') Λ' (option Γ')
open TM2.stmt
/-- A predicate that detects the end of a natural number, either `Γ'.cons` or `Γ'.Cons` (or
implicitly the end of the list), for use in predicate-taking functions like `move` and `clear`. -/
@[simp]
def nat_end : Γ' → bool
| Γ'.Cons := tt
| Γ'.cons := tt
| _ := ff
/-- Pop a value from the stack and place the result in local store. -/
@[simp] def pop' (k : K') : stmt' → stmt' := pop k (λ x v, v)
/-- Peek a value from the stack and place the result in local store. -/
@[simp] def peek' (k : K') : stmt' → stmt' := peek k (λ x v, v)
/-- Push the value in the local store to the given stack. -/
@[simp] def push' (k : K') : stmt' → stmt' := push k (λ x, x.iget)
/-- Move everything from the `rev` stack to the `main` stack (reversed). -/
def unrev := Λ'.move (λ _, ff) rev main
/-- Move elements from `k₁` to `k₂` while `p` holds, with the last element being left on `k₁`. -/
def move_excl (p k₁ k₂ q) :=
Λ'.move p k₁ k₂ $ Λ'.push k₁ id q
/-- Move elements from `k₁` to `k₂` without reversion, by performing a double move via the `rev`
stack. -/
def move₂ (p k₁ k₂ q) := move_excl p k₁ rev $ Λ'.move (λ _, ff) rev k₂ q
/-- Assuming `tr_list v` is on the front of stack `k`, remove it, and push `v.head` onto `main`.
See the section documentation. -/
def head (k : K') (q : Λ') : Λ' :=
Λ'.move nat_end k rev $
Λ'.push rev (λ _, some Γ'.cons) $
Λ'.read $ λ s,
(if s = some Γ'.Cons then id else Λ'.clear (λ x, x = Γ'.Cons) k) $
unrev q
/-- The program that evaluates code `c` with continuation `k`. This expects an initial state where
`tr_list v` is on `main`, `tr_cont_stack k` is on `stack`, and `aux` and `rev` are empty.
See the section documentation for details. -/
@[simp] def tr_normal : code → cont' → Λ'
| code.zero' k := Λ'.push main (λ _, some Γ'.cons) $ Λ'.ret k
| code.succ k := head main $ Λ'.succ $ Λ'.ret k
| code.tail k := Λ'.clear nat_end main $ Λ'.ret k
| (code.cons f fs) k :=
Λ'.push stack (λ _, some Γ'.Cons) $
Λ'.move (λ _, ff) main rev $ Λ'.copy $
tr_normal f (cont'.cons₁ fs k)
| (code.comp f g) k := tr_normal g (cont'.comp f k)
| (code.case f g) k := Λ'.pred (tr_normal f k) (tr_normal g k)
| (code.fix f) k := tr_normal f (cont'.fix f k)
/-- The main program. See the section documentation for details. -/
@[simp] def tr : Λ' → stmt'
| (Λ'.move p k₁ k₂ q) :=
pop' k₁ $ branch (λ s, s.elim tt p)
( goto $ λ _, q )
( push' k₂ $ goto $ λ _, Λ'.move p k₁ k₂ q )
| (Λ'.push k f q) :=
branch (λ s, (f s).is_some)
( push k (λ s, (f s).iget) $ goto $ λ _, q )
( goto $ λ _, q )
| (Λ'.read q) := goto q
| (Λ'.clear p k q) :=
pop' k $ branch (λ s, s.elim tt p)
( goto $ λ _, q )
( goto $ λ _, Λ'.clear p k q )
| (Λ'.copy q) :=
pop' rev $ branch option.is_some
( push' main $ push' stack $ goto $ λ _, Λ'.copy q )
( goto $ λ _, q )
| (Λ'.succ q) :=
pop' main $ branch (λ s, s = some Γ'.bit1)
( push rev (λ _, Γ'.bit0) $
goto $ λ _, Λ'.succ q ) $
branch (λ s, s = some Γ'.cons)
( push main (λ _, Γ'.cons) $
push main (λ _, Γ'.bit1) $
goto $ λ _, unrev q )
( push main (λ _, Γ'.bit1) $
goto $ λ _, unrev q )
| (Λ'.pred q₁ q₂) :=
pop' main $ branch (λ s, s = some Γ'.bit0)
( push rev (λ _, Γ'.bit1) $
goto $ λ _, Λ'.pred q₁ q₂ ) $
branch (λ s, nat_end s.iget)
( goto $ λ _, q₁ )
( peek' main $ branch (λ s, nat_end s.iget)
( goto $ λ _, unrev q₂ )
( push rev (λ _, Γ'.bit0) $
goto $ λ _, unrev q₂ ) )
| (Λ'.ret (cont'.cons₁ fs k)) := goto $ λ _,
move₂ (λ _, ff) main aux $
move₂ (λ s, s = Γ'.Cons) stack main $
move₂ (λ _, ff) aux stack $
tr_normal fs (cont'.cons₂ k)
| (Λ'.ret (cont'.cons₂ k)) := goto $ λ _, head stack $ Λ'.ret k
| (Λ'.ret (cont'.comp f k)) := goto $ λ _, tr_normal f k
| (Λ'.ret (cont'.fix f k)) :=
pop' main $ goto $ λ s,
cond (nat_end s.iget) (Λ'.ret k) $
Λ'.clear nat_end main $ tr_normal f (cont'.fix f k)
| (Λ'.ret cont'.halt) := load (λ _, none) $ halt
/-- Translating a `cont` continuation to a `cont'` continuation simply entails dropping all the
data. This data is instead encoded in `tr_cont_stack` in the configuration. -/
def tr_cont : cont → cont'
| cont.halt := cont'.halt
| (cont.cons₁ c _ k) := cont'.cons₁ c (tr_cont k)
| (cont.cons₂ _ k) := cont'.cons₂ (tr_cont k)
| (cont.comp c k) := cont'.comp c (tr_cont k)
| (cont.fix c k) := cont'.fix c (tr_cont k)
/-- We use `pos_num` to define the translation of binary natural numbers. A natural number is
represented as a little-endian list of `bit0` and `bit1` elements:
1 = [bit1]
2 = [bit0, bit1]
3 = [bit1, bit1]
4 = [bit0, bit0, bit1]
In particular, this representation guarantees no trailing `bit0`'s at the end of the list. -/
def tr_pos_num : pos_num → list Γ'
| pos_num.one := [Γ'.bit1]
| (pos_num.bit0 n) := Γ'.bit0 :: tr_pos_num n
| (pos_num.bit1 n) := Γ'.bit1 :: tr_pos_num n
/-- We use `num` to define the translation of binary natural numbers. Positive numbers are
translated using `tr_pos_num`, and `tr_num 0 = []`. So there are never any trailing `bit0`'s in
a translated `num`.
0 = []
1 = [bit1]
2 = [bit0, bit1]
3 = [bit1, bit1]
4 = [bit0, bit0, bit1]
-/
def tr_num : num → list Γ'
| num.zero := []
| (num.pos n) := tr_pos_num n
/-- Because we use binary encoding, we define `tr_nat` in terms of `tr_num`, using `num`, which are
binary natural numbers. (We could also use `nat.binary_rec_on`, but `num` and `pos_num` make for
easy inductions.) -/
def tr_nat (n : ℕ) : list Γ' := tr_num n
@[simp] theorem tr_nat_zero : tr_nat 0 = [] := by rw [tr_nat, nat.cast_zero]; refl
@[simp] theorem tr_nat_default : tr_nat default = [] := tr_nat_zero
/-- Lists are translated with a `cons` after each encoded number.
For example:
[] = []
[0] = [cons]
[1] = [bit1, cons]
[6, 0] = [bit0, bit1, bit1, cons, cons]
-/
@[simp] def tr_list : list ℕ → list Γ'
| [] := []
| (n :: ns) := tr_nat n ++ Γ'.cons :: tr_list ns
/-- Lists of lists are translated with a `Cons` after each encoded list.
For example:
[] = []
[[]] = [Cons]
[[], []] = [Cons, Cons]
[[0]] = [cons, Cons]
[[1, 2], [0]] = [bit1, cons, bit0, bit1, cons, Cons, cons, Cons]
-/
@[simp] def tr_llist : list (list ℕ) → list Γ'
| [] := []
| (l :: ls) := tr_list l ++ Γ'.Cons :: tr_llist ls
/-- The data part of a continuation is a list of lists, which is encoded on the `stack` stack
using `tr_llist`. -/
@[simp] def cont_stack : cont → list (list ℕ)
| cont.halt := []
| (cont.cons₁ _ ns k) := ns :: cont_stack k
| (cont.cons₂ ns k) := ns :: cont_stack k
| (cont.comp _ k) := cont_stack k
| (cont.fix _ k) := cont_stack k
/-- The data part of a continuation is a list of lists, which is encoded on the `stack` stack
using `tr_llist`. -/
def tr_cont_stack (k : cont) := tr_llist (cont_stack k)
/-- This is the nondependent eliminator for `K'`, but we use it specifically here in order to
represent the stack data as four lists rather than as a function `K' → list Γ'`, because this makes
rewrites easier. The theorems `K'.elim_update_main` et. al. show how such a function is updated
after an `update` to one of the components. -/
@[simp] def K'.elim (a b c d : list Γ') : K' → list Γ'
| K'.main := a
| K'.rev := b
| K'.aux := c
| K'.stack := d
@[simp] theorem K'.elim_update_main {a b c d a'} :
update (K'.elim a b c d) main a' = K'.elim a' b c d := by funext x; cases x; refl
@[simp] theorem K'.elim_update_rev {a b c d b'} :
update (K'.elim a b c d) rev b' = K'.elim a b' c d := by funext x; cases x; refl
@[simp] theorem K'.elim_update_aux {a b c d c'} :
update (K'.elim a b c d) aux c' = K'.elim a b c' d := by funext x; cases x; refl
@[simp] theorem K'.elim_update_stack {a b c d d'} :
update (K'.elim a b c d) stack d' = K'.elim a b c d' := by funext x; cases x; refl
/-- The halting state corresponding to a `list ℕ` output value. -/
def halt (v : list ℕ) : cfg' := ⟨none, none, K'.elim (tr_list v) [] [] []⟩
/-- The `cfg` states map to `cfg'` states almost one to one, except that in normal operation the
local store contains an arbitrary garbage value. To make the final theorem cleaner we explicitly
clear it in the halt state so that there is exactly one configuration corresponding to output `v`.
-/
def tr_cfg : cfg → cfg' → Prop
| (cfg.ret k v) c' := ∃ s, c' =
⟨some (Λ'.ret (tr_cont k)), s, K'.elim (tr_list v) [] [] (tr_cont_stack k)⟩
| (cfg.halt v) c' := c' = halt v
/-- This could be a general list definition, but it is also somewhat specialized to this
application. `split_at_pred p L` will search `L` for the first element satisfying `p`.
If it is found, say `L = l₁ ++ a :: l₂` where `a` satisfies `p` but `l₁` does not, then it returns
`(l₁, some a, l₂)`. Otherwise, if there is no such element, it returns `(L, none, [])`. -/
def split_at_pred {α} (p : α → bool) : list α → list α × option α × list α
| [] := ([], none, [])
| (a :: as) := cond (p a) ([], some a, as) $
let ⟨l₁, o, l₂⟩ := split_at_pred as in ⟨a :: l₁, o, l₂⟩
theorem split_at_pred_eq {α} (p : α → bool) : ∀ L l₁ o l₂,
(∀ x ∈ l₁, p x = ff) →
option.elim (L = l₁ ∧ l₂ = []) (λ a, p a = tt ∧ L = l₁ ++ a :: l₂) o →
split_at_pred p L = (l₁, o, l₂)
| [] _ none _ _ ⟨rfl, rfl⟩ := rfl
| [] l₁ (some o) l₂ h₁ ⟨h₂, h₃⟩ := by simp at h₃; contradiction
| (a :: L) l₁ o l₂ h₁ h₂ := begin
rw [split_at_pred],
have IH := split_at_pred_eq L,
cases o,
{ cases l₁ with a' l₁; rcases h₂ with ⟨⟨⟩, rfl⟩,
rw [h₁ a (or.inl rfl), cond, IH L none [] _ ⟨rfl, rfl⟩], refl,
exact λ x h, h₁ x (or.inr h) },
{ cases l₁ with a' l₁; rcases h₂ with ⟨h₂, ⟨⟩⟩, {rw [h₂, cond]},
rw [h₁ a (or.inl rfl), cond, IH l₁ (some o) l₂ _ ⟨h₂, _⟩]; try {refl},
exact λ x h, h₁ x (or.inr h) },
end
theorem split_at_pred_ff {α} (L : list α) : split_at_pred (λ _, ff) L = (L, none, []) :=
split_at_pred_eq _ _ _ _ _ (λ _ _, rfl) ⟨rfl, rfl⟩
theorem move_ok {p k₁ k₂ q s L₁ o L₂} {S : K' → list Γ'}
(h₁ : k₁ ≠ k₂) (e : split_at_pred p (S k₁) = (L₁, o, L₂)) :
reaches₁ (TM2.step tr)
⟨some (Λ'.move p k₁ k₂ q), s, S⟩
⟨some q, o, update (update S k₁ L₂) k₂ (L₁.reverse_core (S k₂))⟩ :=
begin
induction L₁ with a L₁ IH generalizing S s,
{ rw [(_ : [].reverse_core _ = _), function.update_eq_self],
swap, { rw function.update_noteq h₁.symm, refl },
refine trans_gen.head' rfl _,
simp, cases S k₁ with a Sk, {cases e, refl},
simp [split_at_pred] at e ⊢,
cases p a; simp at e ⊢,
{ revert e, rcases split_at_pred p Sk with ⟨_, _, _⟩, rintro ⟨⟩ },
{ simp only [e] } },
{ refine trans_gen.head rfl _, simp,
cases e₁ : S k₁ with a' Sk; rw [e₁, split_at_pred] at e, {cases e},
cases e₂ : p a'; simp only [e₂, cond] at e, swap, {cases e},
rcases e₃ : split_at_pred p Sk with ⟨_, _, _⟩, rw [e₃, split_at_pred] at e, cases e,
simp [e₂],
convert @IH (update (update S k₁ Sk) k₂ (a :: S k₂)) _ _ using 2;
simp [function.update_noteq, h₁, h₁.symm, e₃, list.reverse_core],
simp [function.update_comm h₁.symm] }
end
theorem unrev_ok {q s} {S : K' → list Γ'} :
reaches₁ (TM2.step tr) ⟨some (unrev q), s, S⟩
⟨some q, none, update (update S rev []) main (list.reverse_core (S rev) (S main))⟩ :=
move_ok dec_trivial $ split_at_pred_ff _
theorem move₂_ok {p k₁ k₂ q s L₁ o L₂} {S : K' → list Γ'}
(h₁ : k₁ ≠ rev ∧ k₂ ≠ rev ∧ k₁ ≠ k₂) (h₂ : S rev = [])
(e : split_at_pred p (S k₁) = (L₁, o, L₂)) :
reaches₁ (TM2.step tr)
⟨some (move₂ p k₁ k₂ q), s, S⟩
⟨some q, none, update (update S k₁ (o.elim id list.cons L₂)) k₂ (L₁ ++ S k₂)⟩ :=
begin
refine (move_ok h₁.1 e).trans (trans_gen.head rfl _),
cases o; simp only [option.elim, tr, id.def],
{ convert move_ok h₁.2.1.symm (split_at_pred_ff _) using 2,
simp only [function.update_comm h₁.1, function.update_idem],
rw show update S rev [] = S, by rw [← h₂, function.update_eq_self],
simp only [function.update_noteq h₁.2.2.symm, function.update_noteq h₁.2.1,
function.update_noteq h₁.1.symm, list.reverse_core_eq, h₂,
function.update_same, list.append_nil, list.reverse_reverse] },
{ convert move_ok h₁.2.1.symm (split_at_pred_ff _) using 2,
simp only [h₂, function.update_comm h₁.1,
list.reverse_core_eq, function.update_same, list.append_nil, function.update_idem],
rw show update S rev [] = S, by rw [← h₂, function.update_eq_self],
simp only [function.update_noteq h₁.1.symm,
function.update_noteq h₁.2.2.symm, function.update_noteq h₁.2.1,
function.update_same, list.reverse_reverse] },
end
theorem clear_ok {p k q s L₁ o L₂} {S : K' → list Γ'}
(e : split_at_pred p (S k) = (L₁, o, L₂)) :
reaches₁ (TM2.step tr) ⟨some (Λ'.clear p k q), s, S⟩ ⟨some q, o, update S k L₂⟩ :=
begin
induction L₁ with a L₁ IH generalizing S s,
{ refine trans_gen.head' rfl _,
simp, cases S k with a Sk, {cases e, refl},
simp [split_at_pred] at e ⊢,
cases p a; simp at e ⊢,
{ revert e, rcases split_at_pred p Sk with ⟨_, _, _⟩, rintro ⟨⟩ },
{ simp only [e] } },
{ refine trans_gen.head rfl _, simp,
cases e₁ : S k with a' Sk; rw [e₁, split_at_pred] at e, {cases e},
cases e₂ : p a'; simp only [e₂, cond] at e, swap, {cases e},
rcases e₃ : split_at_pred p Sk with ⟨_, _, _⟩, rw [e₃, split_at_pred] at e, cases e,
simp [e₂],
convert @IH (update S k Sk) _ _ using 2; simp [e₃] }
end
theorem copy_ok (q s a b c d) :
reaches₁ (TM2.step tr)
⟨some (Λ'.copy q), s, K'.elim a b c d⟩
⟨some q, none, K'.elim (list.reverse_core b a) [] c (list.reverse_core b d)⟩ :=
begin
induction b with x b IH generalizing a d s,
{ refine trans_gen.single _, simp, refl },
refine trans_gen.head rfl _, simp, exact IH _ _ _,
end
theorem tr_pos_num_nat_end : ∀ n (x ∈ tr_pos_num n), nat_end x = ff
| pos_num.one _ (or.inl rfl) := rfl
| (pos_num.bit0 n) _ (or.inl rfl) := rfl
| (pos_num.bit0 n) _ (or.inr h) := tr_pos_num_nat_end n _ h
| (pos_num.bit1 n) _ (or.inl rfl) := rfl
| (pos_num.bit1 n) _ (or.inr h) := tr_pos_num_nat_end n _ h
theorem tr_num_nat_end : ∀ n (x ∈ tr_num n), nat_end x = ff
| (num.pos n) x h := tr_pos_num_nat_end n x h
theorem tr_nat_nat_end (n) : ∀ x ∈ tr_nat n, nat_end x = ff := tr_num_nat_end _
theorem tr_list_ne_Cons : ∀ l (x ∈ tr_list l), x ≠ Γ'.Cons
| (a :: l) x h := begin
simp [tr_list] at h,
obtain h | rfl | h := h,
{ rintro rfl, cases tr_nat_nat_end _ _ h },
{ rintro ⟨⟩ },
{ exact tr_list_ne_Cons l _ h }
end
theorem head_main_ok {q s L} {c d : list Γ'} :
reaches₁ (TM2.step tr)
⟨some (head main q), s, K'.elim (tr_list L) [] c d⟩
⟨some q, none, K'.elim (tr_list [L.head]) [] c d⟩ :=
begin
let o : option Γ' := list.cases_on L none (λ _ _, some Γ'.cons),
refine (move_ok dec_trivial
(split_at_pred_eq _ _ (tr_nat L.head) o (tr_list L.tail) (tr_nat_nat_end _) _)).trans
(trans_gen.head rfl (trans_gen.head rfl _)),
{ cases L; simp },
simp,
rw if_neg (show o ≠ some Γ'.Cons, by cases L; rintro ⟨⟩),
refine (clear_ok (split_at_pred_eq _ _ _ none [] _ ⟨rfl, rfl⟩)).trans _,
{ exact λ x h, (to_bool_ff (tr_list_ne_Cons _ _ h)) },
convert unrev_ok, simp [list.reverse_core_eq],
end
theorem head_stack_ok {q s L₁ L₂ L₃} :
reaches₁ (TM2.step tr)
⟨some (head stack q), s, K'.elim (tr_list L₁) [] [] (tr_list L₂ ++ Γ'.Cons :: L₃)⟩
⟨some q, none, K'.elim (tr_list (L₂.head :: L₁)) [] [] L₃⟩ :=
begin
cases L₂ with a L₂,
{ refine trans_gen.trans (move_ok dec_trivial
(split_at_pred_eq _ _ [] (some Γ'.Cons) L₃ (by rintro _ ⟨⟩) ⟨rfl, rfl⟩))
(trans_gen.head rfl (trans_gen.head rfl _)),
convert unrev_ok, simp, refl },
{ refine trans_gen.trans (move_ok dec_trivial
(split_at_pred_eq _ _ (tr_nat a) (some Γ'.cons)
(tr_list L₂ ++ Γ'.Cons :: L₃) (tr_nat_nat_end _) ⟨rfl, by simp⟩))
(trans_gen.head rfl (trans_gen.head rfl _)),
simp,
refine trans_gen.trans (clear_ok
(split_at_pred_eq _ _ (tr_list L₂) (some Γ'.Cons) L₃
(λ x h, (to_bool_ff (tr_list_ne_Cons _ _ h))) ⟨rfl, by simp⟩)) _,
convert unrev_ok, simp [list.reverse_core_eq] },
end
theorem succ_ok {q s n} {c d : list Γ'} :
reaches₁ (TM2.step tr)
⟨some (Λ'.succ q), s, K'.elim (tr_list [n]) [] c d⟩
⟨some q, none, K'.elim (tr_list [n.succ]) [] c d⟩ :=
begin
simp [tr_nat, num.add_one],
cases (n:num) with a,
{ refine trans_gen.head rfl _, simp,
rw if_neg, swap, rintro ⟨⟩, rw if_pos, swap, refl,
convert unrev_ok, simp, refl },
simp [num.succ, tr_num, num.succ'],
suffices : ∀ l₁,
∃ l₁' l₂' s', list.reverse_core l₁ (tr_pos_num a.succ) = list.reverse_core l₁' l₂' ∧
reaches₁ (TM2.step tr)
⟨some q.succ, s, K'.elim (tr_pos_num a ++ [Γ'.cons]) l₁ c d⟩
⟨some (unrev q), s', K'.elim (l₂' ++ [Γ'.cons]) l₁' c d⟩,
{ obtain ⟨l₁', l₂', s', e, h⟩ := this [], simp [list.reverse_core] at e,
refine h.trans _, convert unrev_ok using 2, simp [e, list.reverse_core_eq] },
induction a with m IH m IH generalizing s; intro l₁,
{ refine ⟨Γ'.bit0 :: l₁, [Γ'.bit1], some Γ'.cons, rfl, trans_gen.head rfl (trans_gen.single _)⟩,
simp [tr_pos_num] },
{ obtain ⟨l₁', l₂', s', e, h⟩ := IH (Γ'.bit0 :: l₁),
refine ⟨l₁', l₂', s', e, trans_gen.head _ h⟩, swap,
simp [pos_num.succ, tr_pos_num] },
{ refine ⟨l₁, _, some Γ'.bit0, rfl, trans_gen.single _⟩, simp, refl },
end
theorem pred_ok (q₁ q₂ s v) (c d : list Γ') :
∃ s', reaches₁ (TM2.step tr)
⟨some (Λ'.pred q₁ q₂), s, K'.elim (tr_list v) [] c d⟩
(v.head.elim
⟨some q₁, s', K'.elim (tr_list v.tail) [] c d⟩
(λ n _, ⟨some q₂, s', K'.elim (tr_list (n :: v.tail)) [] c d⟩)) :=
begin
rcases v with _|⟨_|n, v⟩,
{ refine ⟨none, trans_gen.single _⟩, simp, refl },
{ refine ⟨some Γ'.cons, trans_gen.single _⟩, simp },
refine ⟨none, _⟩, simp [tr_nat, num.add_one, num.succ, tr_num],
cases (n:num) with a,
{ simp [tr_pos_num, tr_num, show num.zero.succ' = pos_num.one, from rfl],
refine trans_gen.head rfl _, convert unrev_ok, simp, refl },
simp [tr_num, num.succ'],
suffices : ∀ l₁,
∃ l₁' l₂' s', list.reverse_core l₁ (tr_pos_num a) = list.reverse_core l₁' l₂' ∧
reaches₁ (TM2.step tr)
⟨some (q₁.pred q₂), s, K'.elim (tr_pos_num a.succ ++ Γ'.cons :: tr_list v) l₁ c d⟩
⟨some (unrev q₂), s', K'.elim (l₂' ++ Γ'.cons :: tr_list v) l₁' c d⟩,
{ obtain ⟨l₁', l₂', s', e, h⟩ := this [], simp [list.reverse_core] at e,
refine h.trans _, convert unrev_ok using 2, simp [e, list.reverse_core_eq] },
induction a with m IH m IH generalizing s; intro l₁,
{ refine ⟨Γ'.bit1 :: l₁, [], some Γ'.cons, rfl, trans_gen.head rfl (trans_gen.single _)⟩,
simp [tr_pos_num, show pos_num.one.succ = pos_num.one.bit0, from rfl] },
{ obtain ⟨l₁', l₂', s', e, h⟩ := IH (some Γ'.bit0) (Γ'.bit1 :: l₁),
refine ⟨l₁', l₂', s', e, trans_gen.head _ h⟩, simp, refl },
{ obtain ⟨a, l, e, h⟩ : ∃ a l, tr_pos_num m = a :: l ∧ nat_end a = ff,
{ cases m; refine ⟨_, _, rfl, rfl⟩ },
refine ⟨Γ'.bit0 :: l₁, _, some a, rfl, trans_gen.single _⟩,
simp [tr_pos_num, pos_num.succ, e, h, nat_end,
show some Γ'.bit1 ≠ some Γ'.bit0, from dec_trivial] },
end
theorem tr_normal_respects (c k v s) : ∃ b₂, tr_cfg (step_normal c k v) b₂ ∧
reaches₁ (TM2.step tr)
⟨some (tr_normal c (tr_cont k)), s, K'.elim (tr_list v) [] [] (tr_cont_stack k)⟩ b₂ :=
begin
induction c generalizing k v s,
case zero' : { refine ⟨_, ⟨s, rfl⟩, trans_gen.single _⟩, simp },
case succ : { refine ⟨_, ⟨none, rfl⟩, head_main_ok.trans succ_ok⟩ },
case tail :
{ let o : option Γ' := list.cases_on v none (λ _ _, some Γ'.cons),
refine ⟨_, ⟨o, rfl⟩, _⟩, convert clear_ok _, simp, swap,
refine split_at_pred_eq _ _ (tr_nat v.head) _ _ (tr_nat_nat_end _) _,
cases v; simp },
case cons : f fs IHf IHfs
{ obtain ⟨c, h₁, h₂⟩ := IHf (cont.cons₁ fs v k) v none,
refine ⟨c, h₁, trans_gen.head rfl $ (move_ok dec_trivial (split_at_pred_ff _)).trans _⟩,
simp [step_normal],
refine (copy_ok _ none [] (tr_list v).reverse _ _).trans _,
convert h₂ using 2,
simp [list.reverse_core_eq, tr_cont_stack] },
case comp : f g IHf IHg { exact IHg (cont.comp f k) v s },
case case : f g IHf IHg
{ rw step_normal,
obtain ⟨s', h⟩ := pred_ok _ _ s v _ _,
cases v.head with n,
{ obtain ⟨c, h₁, h₂⟩ := IHf k _ s', exact ⟨_, h₁, h.trans h₂⟩ },
{ obtain ⟨c, h₁, h₂⟩ := IHg k _ s', exact ⟨_, h₁, h.trans h₂⟩ } },
case fix : f IH { apply IH }
end
theorem tr_ret_respects (k v s) : ∃ b₂, tr_cfg (step_ret k v) b₂ ∧
reaches₁ (TM2.step tr)
⟨some (Λ'.ret (tr_cont k)), s, K'.elim (tr_list v) [] [] (tr_cont_stack k)⟩ b₂ :=
begin
induction k generalizing v s,
case halt { exact ⟨_, rfl, trans_gen.single rfl⟩ },
case cons₁ : fs as k IH
{ obtain ⟨s', h₁, h₂⟩ := tr_normal_respects fs (cont.cons₂ v k) as none,
refine ⟨s', h₁, trans_gen.head rfl _⟩, simp,
refine (move₂_ok dec_trivial _ (split_at_pred_ff _)).trans _, {refl}, simp,
refine (move₂_ok dec_trivial _ _).trans _, swap 4, {refl},
swap 4, {exact (split_at_pred_eq _ _ _ (some Γ'.Cons) _
(λ x h, to_bool_ff (tr_list_ne_Cons _ _ h)) ⟨rfl, rfl⟩)},
refine (move₂_ok dec_trivial _ (split_at_pred_ff _)).trans _, {refl}, simp,
exact h₂ },
case cons₂ : ns k IH
{ obtain ⟨c, h₁, h₂⟩ := IH (ns.head :: v) none,
exact ⟨c, h₁, trans_gen.head rfl $ head_stack_ok.trans h₂⟩ },
case comp : f k IH
{ obtain ⟨s', h₁, h₂⟩ := tr_normal_respects f k v s,
exact ⟨_, h₁, trans_gen.head rfl h₂⟩ },
case fix : f k IH
{ rw [step_ret],
have : if v.head = 0
then nat_end (tr_list v).head'.iget = tt ∧ (tr_list v).tail = tr_list v.tail
else nat_end (tr_list v).head'.iget = ff ∧
(tr_list v).tail = (tr_nat v.head).tail ++ Γ'.cons :: tr_list v.tail,
{ cases v with n, {exact ⟨rfl, rfl⟩}, cases n, {simp},
rw [tr_list, list.head, tr_nat, nat.cast_succ, num.add_one, num.succ, list.tail],
cases (n:num).succ'; exact ⟨rfl, rfl⟩ },
by_cases v.head = 0; simp [h] at this ⊢,
{ obtain ⟨c, h₁, h₂⟩ := IH v.tail (tr_list v).head',
refine ⟨c, h₁, trans_gen.head rfl _⟩,
simp [tr_cont, tr_cont_stack, this], exact h₂ },
{ obtain ⟨s', h₁, h₂⟩ := tr_normal_respects f (cont.fix f k) v.tail (some Γ'.cons),
refine ⟨_, h₁, trans_gen.head rfl $ trans_gen.trans _ h₂⟩,
swap 3, simp [tr_cont, this.1],
convert clear_ok (split_at_pred_eq _ _ (tr_nat v.head).tail (some Γ'.cons) _ _ _) using 2,
{ simp },
{ exact λ x h, tr_nat_nat_end _ _ (list.tail_subset _ h) },
{ exact ⟨rfl, this.2⟩ } } },
end
theorem tr_respects : respects step (TM2.step tr) tr_cfg
| (cfg.ret k v) _ ⟨s, rfl⟩ := tr_ret_respects _ _ _
| (cfg.halt v) _ rfl := rfl
/-- The initial state, evaluating function `c` on input `v`. -/
def init (c : code) (v : list ℕ) : cfg' :=
⟨some (tr_normal c cont'.halt), none, K'.elim (tr_list v) [] [] []⟩
theorem tr_init (c v) : ∃ b, tr_cfg (step_normal c cont.halt v) b ∧
reaches₁ (TM2.step tr) (init c v) b := tr_normal_respects _ _ _ _
theorem tr_eval (c v) : eval (TM2.step tr) (init c v) = halt <$> code.eval c v :=
begin
obtain ⟨i, h₁, h₂⟩ := tr_init c v,
refine part.ext (λ x, _),
rw [reaches_eval h₂.to_refl], simp,
refine ⟨λ h, _, _⟩,
{ obtain ⟨c, hc₁, hc₂⟩ := tr_eval_rev tr_respects h₁ h,
simp [step_normal_eval] at hc₂,
obtain ⟨v', hv, rfl⟩ := hc₂,
exact ⟨_, hv, hc₁.symm⟩ },
{ rintro ⟨v', hv, rfl⟩,
have := tr_eval tr_respects h₁,
simp [step_normal_eval] at this,
obtain ⟨_, ⟨⟩, h⟩ := this _ hv rfl,
exact h }
end
/-- The set of machine states reachable via downward label jumps, discounting jumps via `ret`. -/
def tr_stmts₁ : Λ' → finset Λ'
| Q@(Λ'.move p k₁ k₂ q) := insert Q $ tr_stmts₁ q
| Q@(Λ'.push k f q) := insert Q $ tr_stmts₁ q
| Q@(Λ'.read q) := insert Q $ finset.univ.bUnion $ λ s, tr_stmts₁ (q s)
| Q@(Λ'.clear p k q) := insert Q $ tr_stmts₁ q
| Q@(Λ'.copy q) := insert Q $ tr_stmts₁ q
| Q@(Λ'.succ q) := insert Q $ insert (unrev q) $ tr_stmts₁ q
| Q@(Λ'.pred q₁ q₂) := insert Q $ tr_stmts₁ q₁ ∪ insert (unrev q₂) (tr_stmts₁ q₂)
| Q@(Λ'.ret k) := {Q}
theorem tr_stmts₁_trans {q q'} : q' ∈ tr_stmts₁ q → tr_stmts₁ q' ⊆ tr_stmts₁ q :=
begin
induction q;
simp only [tr_stmts₁, finset.mem_insert, finset.mem_union, or_imp_distrib,
finset.mem_singleton, finset.subset.refl, imp_true_iff, true_and] {contextual := tt},
iterate 4 { exact λ h, finset.subset.trans (q_ih h) (finset.subset_insert _ _) },
{ simp, intros s h x h', simp, exact or.inr ⟨_, q_ih s h h'⟩ },
{ split,
{ rintro rfl, apply finset.subset_insert },
{ intros h x h', simp, exact or.inr (or.inr $ q_ih h h') } },
{ refine ⟨λ h x h', _, λ h x h', _, λ h x h', _⟩; simp,
{ exact or.inr (or.inr $ or.inl $ q_ih_q₁ h h') },
{ cases finset.mem_insert.1 h' with h' h'; simp [h', unrev] },
{ exact or.inr (or.inr $ or.inr $ q_ih_q₂ h h') } },
end
theorem tr_stmts₁_self (q) : q ∈ tr_stmts₁ q :=
by induction q; { apply finset.mem_singleton_self <|> apply finset.mem_insert_self }
/-- The (finite!) set of machine states visited during the course of evaluation of `c`,
including the state `ret k` but not any states after that (that is, the states visited while
evaluating `k`). -/
def code_supp' : code → cont' → finset Λ'
| c@code.zero' k := tr_stmts₁ (tr_normal c k)
| c@code.succ k := tr_stmts₁ (tr_normal c k)
| c@code.tail k := tr_stmts₁ (tr_normal c k)
| c@(code.cons f fs) k :=
tr_stmts₁ (tr_normal c k) ∪ (code_supp' f (cont'.cons₁ fs k) ∪
(tr_stmts₁ (
move₂ (λ _, ff) main aux $
move₂ (λ s, s = Γ'.Cons) stack main $
move₂ (λ _, ff) aux stack $
tr_normal fs (cont'.cons₂ k)) ∪ (code_supp' fs (cont'.cons₂ k) ∪
tr_stmts₁ (head stack $ Λ'.ret k))))
| c@(code.comp f g) k :=
tr_stmts₁ (tr_normal c k) ∪ (code_supp' g (cont'.comp f k) ∪
(tr_stmts₁ (tr_normal f k) ∪ code_supp' f k))
| c@(code.case f g) k := tr_stmts₁ (tr_normal c k) ∪ (code_supp' f k ∪ code_supp' g k)
| c@(code.fix f) k :=
tr_stmts₁ (tr_normal c k) ∪ (code_supp' f (cont'.fix f k) ∪
(tr_stmts₁ (Λ'.clear nat_end main $ tr_normal f (cont'.fix f k)) ∪ {Λ'.ret k}))
@[simp] theorem code_supp'_self (c k) : tr_stmts₁ (tr_normal c k) ⊆ code_supp' c k :=
by cases c; refl <|> exact finset.subset_union_left _ _
/-- The (finite!) set of machine states visited during the course of evaluation of a continuation
`k`, not including the initial state `ret k`. -/
def cont_supp : cont' → finset Λ'
| (cont'.cons₁ fs k) :=
tr_stmts₁ (
move₂ (λ _, ff) main aux $
move₂ (λ s, s = Γ'.Cons) stack main $
move₂ (λ _, ff) aux stack $
tr_normal fs (cont'.cons₂ k)) ∪ (code_supp' fs (cont'.cons₂ k) ∪
(tr_stmts₁ (head stack $ Λ'.ret k) ∪ cont_supp k))
| (cont'.cons₂ k) := tr_stmts₁ (head stack $ Λ'.ret k) ∪ cont_supp k
| (cont'.comp f k) := code_supp' f k ∪ cont_supp k
| (cont'.fix f k) := code_supp' (code.fix f) k ∪ cont_supp k
| cont'.halt := ∅
/-- The (finite!) set of machine states visited during the course of evaluation of `c` in
continuation `k`. This is actually closed under forward simulation (see `tr_supports`), and the
existence of this set means that the machine constructed in this section is in fact a proper
Turing machine, with a finite set of states. -/
def code_supp (c : code) (k : cont') : finset Λ' := code_supp' c k ∪ cont_supp k
@[simp] theorem code_supp_self (c k) : tr_stmts₁ (tr_normal c k) ⊆ code_supp c k :=
finset.subset.trans (code_supp'_self _ _) (finset.subset_union_left _ _)
@[simp] theorem code_supp_zero (k) : code_supp code.zero' k =
tr_stmts₁ (tr_normal code.zero' k) ∪ cont_supp k := rfl
@[simp] theorem code_supp_succ (k) : code_supp code.succ k =
tr_stmts₁ (tr_normal code.succ k) ∪ cont_supp k := rfl
@[simp] theorem code_supp_tail (k) : code_supp code.tail k =
tr_stmts₁ (tr_normal code.tail k) ∪ cont_supp k := rfl
@[simp] theorem code_supp_cons (f fs k) : code_supp (code.cons f fs) k =
tr_stmts₁ (tr_normal (code.cons f fs) k) ∪ code_supp f (cont'.cons₁ fs k) :=
by simp [code_supp, code_supp', cont_supp, finset.union_assoc]
@[simp] theorem code_supp_comp (f g k) : code_supp (code.comp f g) k =
tr_stmts₁ (tr_normal (code.comp f g) k) ∪ code_supp g (cont'.comp f k) :=
begin
simp [code_supp, code_supp', cont_supp, finset.union_assoc],
rw [← finset.union_assoc _ _ (cont_supp k),
finset.union_eq_right_iff_subset.2 (code_supp'_self _ _)]
end
@[simp] theorem code_supp_case (f g k) : code_supp (code.case f g) k =
tr_stmts₁ (tr_normal (code.case f g) k) ∪ (code_supp f k ∪ code_supp g k) :=
by simp [code_supp, code_supp', cont_supp, finset.union_assoc, finset.union_left_comm]
@[simp] theorem code_supp_fix (f k) : code_supp (code.fix f) k =
tr_stmts₁ (tr_normal (code.fix f) k) ∪ code_supp f (cont'.fix f k) :=
by simp [code_supp, code_supp', cont_supp, finset.union_assoc,
finset.union_left_comm, finset.union_left_idem]
@[simp] theorem cont_supp_cons₁ (fs k) : cont_supp (cont'.cons₁ fs k) =
tr_stmts₁ (move₂ (λ _, ff) main aux $ move₂ (λ s, s = Γ'.Cons) stack main $
move₂ (λ _, ff) aux stack $ tr_normal fs (cont'.cons₂ k)) ∪ code_supp fs (cont'.cons₂ k) :=
by simp [code_supp, code_supp', cont_supp, finset.union_assoc]
@[simp] theorem cont_supp_cons₂ (k) : cont_supp (cont'.cons₂ k) =
tr_stmts₁ (head stack $ Λ'.ret k) ∪ cont_supp k := rfl
@[simp] theorem cont_supp_comp (f k) : cont_supp (cont'.comp f k) = code_supp f k := rfl
theorem cont_supp_fix (f k) : cont_supp (cont'.fix f k) = code_supp f (cont'.fix f k) :=
by simp [code_supp, code_supp', cont_supp, finset.union_assoc,
finset.subset_iff] {contextual := tt}
@[simp] theorem cont_supp_halt : cont_supp cont'.halt = ∅ := rfl
/-- The statement `Λ'.supports S q` means that `cont_supp k ⊆ S` for any `ret k`
reachable from `q`.
(This is a technical condition used in the proof that the machine is supported.) -/
def Λ'.supports (S : finset Λ') : Λ' → Prop
| Q@(Λ'.move p k₁ k₂ q) := Λ'.supports q
| Q@(Λ'.push k f q) := Λ'.supports q
| Q@(Λ'.read q) := ∀ s, Λ'.supports (q s)
| Q@(Λ'.clear p k q) := Λ'.supports q
| Q@(Λ'.copy q) := Λ'.supports q
| Q@(Λ'.succ q) := Λ'.supports q
| Q@(Λ'.pred q₁ q₂) := Λ'.supports q₁ ∧ Λ'.supports q₂
| Q@(Λ'.ret k) := cont_supp k ⊆ S
/-- A shorthand for the predicate that we are proving in the main theorems `tr_stmts₁_supports`,
`code_supp'_supports`, `cont_supp_supports`, `code_supp_supports`. The set `S` is fixed throughout
the proof, and denotes the full set of states in the machine, while `K` is a subset that we are
currently proving a property about. The predicate asserts that every state in `K` is closed in `S`
under forward simulation, i.e. stepping forward through evaluation starting from any state in `K`
stays entirely within `S`. -/
def supports (K S : finset Λ') := ∀ q ∈ K, TM2.supports_stmt S (tr q)
theorem supports_insert {K S q} :
supports (insert q K) S ↔ TM2.supports_stmt S (tr q) ∧ supports K S :=
by simp [supports]
theorem supports_singleton {S q} : supports {q} S ↔ TM2.supports_stmt S (tr q) :=
by simp [supports]
theorem supports_union {K₁ K₂ S} :
supports (K₁ ∪ K₂) S ↔ supports K₁ S ∧ supports K₂ S :=
by simp [supports, or_imp_distrib, forall_and_distrib]
theorem supports_bUnion {K:option Γ' → finset Λ'} {S} :
supports (finset.univ.bUnion K) S ↔ ∀ a, supports (K a) S :=
by simp [supports]; apply forall_swap
theorem head_supports {S k q} (H : (q:Λ').supports S) : (head k q).supports S :=
λ _, by dsimp only; split_ifs; exact H
theorem ret_supports {S k} (H₁: cont_supp k ⊆ S) : TM2.supports_stmt S (tr (Λ'.ret k)) :=
begin
have W := λ {q}, tr_stmts₁_self q,
cases k,
case halt { trivial },
case cons₁ { rw [cont_supp_cons₁, finset.union_subset_iff] at H₁, exact λ _, H₁.1 W },
case cons₂ { rw [cont_supp_cons₂, finset.union_subset_iff] at H₁, exact λ _, H₁.1 W },
case comp { rw [cont_supp_comp] at H₁, exact λ _, H₁ (code_supp_self _ _ W) },
case fix
{ rw [cont_supp_fix] at H₁,
have L := @finset.mem_union_left, have R := @finset.mem_union_right,
intro s, dsimp only, cases nat_end s.iget,
{ refine H₁ (R _ $ L _ $ R _ $ R _ $ L _ W) },
{ exact H₁ (R _ $ L _ $ R _ $ R _ $ R _ $ finset.mem_singleton_self _) } }
end
theorem tr_stmts₁_supports {S q}
(H₁ : (q:Λ').supports S) (HS₁ : tr_stmts₁ q ⊆ S) : supports (tr_stmts₁ q) S :=
begin
have W := λ {q}, tr_stmts₁_self q,
induction q; simp [tr_stmts₁] at HS₁ ⊢,
any_goals
{ cases finset.insert_subset.1 HS₁ with h₁ h₂,
id { have h₃ := h₂ W } <|> try { simp [finset.subset_iff] at h₂ } },
{ exact supports_insert.2 ⟨⟨λ _, h₃, λ _, h₁⟩, q_ih H₁ h₂⟩ }, -- move
{ exact supports_insert.2 ⟨⟨λ _, h₃, λ _, h₁⟩, q_ih H₁ h₂⟩ }, -- clear
{ exact supports_insert.2 ⟨⟨λ _, h₁, λ _, h₃⟩, q_ih H₁ h₂⟩ }, -- copy
{ exact supports_insert.2 ⟨⟨λ _, h₃, λ _, h₃⟩, q_ih H₁ h₂⟩ }, -- push
-- read
{ refine supports_insert.2 ⟨λ _, h₂ _ W, _⟩,
exact supports_bUnion.2 (λ _, q_ih _ (H₁ _) (λ _ h, h₂ _ h)) },
-- succ
{ refine supports_insert.2 ⟨⟨λ _, h₁, λ _, h₂.1, λ _, h₂.1⟩, _⟩,
exact supports_insert.2 ⟨⟨λ _, h₂.2 _ W, λ _, h₂.1⟩, q_ih H₁ h₂.2⟩ },
-- pred
{ refine supports_insert.2 ⟨⟨λ _, h₁, λ _, h₂.2 _ (or.inl W), λ _, h₂.1, λ _, h₂.1⟩, _⟩,
refine supports_insert.2 ⟨⟨λ _, h₂.2 _ (or.inr W), λ _, h₂.1⟩, _⟩,
refine supports_union.2 ⟨_, _⟩,
{ exact q_ih_q₁ H₁.1 (λ _ h, h₂.2 _ (or.inl h)) },
{ exact q_ih_q₂ H₁.2 (λ _ h, h₂.2 _ (or.inr h)) } },
-- ret
{ exact supports_singleton.2 (ret_supports H₁) },
end
theorem tr_stmts₁_supports' {S q K} (H₁ : (q:Λ').supports S) (H₂ : tr_stmts₁ q ∪ K ⊆ S)
(H₃ : K ⊆ S → supports K S) : supports (tr_stmts₁ q ∪ K) S :=
begin
simp [finset.union_subset_iff] at H₂,
exact supports_union.2 ⟨tr_stmts₁_supports H₁ H₂.1, H₃ H₂.2⟩,
end
theorem tr_normal_supports {S c k} (Hk : code_supp c k ⊆ S) : (tr_normal c k).supports S :=
begin
induction c generalizing k; simp [Λ'.supports, head],
case zero' { exact finset.union_subset_right Hk },
case succ { intro, split_ifs; exact finset.union_subset_right Hk },
case tail { exact finset.union_subset_right Hk },
case cons : f fs IHf IHfs
{ apply IHf, rw code_supp_cons at Hk, exact finset.union_subset_right Hk },
case comp : f g IHf IHg
{ apply IHg, rw code_supp_comp at Hk, exact finset.union_subset_right Hk },
case case : f g IHf IHg
{ simp only [code_supp_case, finset.union_subset_iff] at Hk,
exact ⟨IHf Hk.2.1, IHg Hk.2.2⟩ },
case fix : f IHf { apply IHf, rw code_supp_fix at Hk, exact finset.union_subset_right Hk },
end
theorem code_supp'_supports {S c k}
(H : code_supp c k ⊆ S) : supports (code_supp' c k) S :=
begin
induction c generalizing k,
iterate 3
{ exact tr_stmts₁_supports (tr_normal_supports H)
(finset.subset.trans (code_supp_self _ _) H) },
case cons : f fs IHf IHfs
{ have H' := H, simp only [code_supp_cons, finset.union_subset_iff] at H',
refine tr_stmts₁_supports' (tr_normal_supports H) (finset.union_subset_left H) (λ h, _),
refine supports_union.2 ⟨IHf H'.2, _⟩,
refine tr_stmts₁_supports' (tr_normal_supports _) (finset.union_subset_right h) (λ h, _),
{ simp only [code_supp, finset.union_subset_iff, cont_supp] at h H ⊢,
exact ⟨h.2.2.1, h.2.2.2, H.2⟩ },
refine supports_union.2 ⟨IHfs _, _⟩,
{ rw [code_supp, cont_supp_cons₁] at H',
exact finset.union_subset_right (finset.union_subset_right H'.2) },
exact tr_stmts₁_supports (head_supports $ finset.union_subset_right H)
(finset.union_subset_right h) },
case comp : f g IHf IHg
{ have H' := H, rw [code_supp_comp] at H', have H' := finset.union_subset_right H',
refine tr_stmts₁_supports' (tr_normal_supports H) (finset.union_subset_left H) (λ h, _),
refine supports_union.2 ⟨IHg H', _⟩,
refine tr_stmts₁_supports' (tr_normal_supports _) (finset.union_subset_right h) (λ h, _),
{ simp only [code_supp', code_supp, finset.union_subset_iff, cont_supp] at h H ⊢,
exact ⟨h.2.2, H.2⟩ },
exact IHf (finset.union_subset_right H') },
case case : f g IHf IHg
{ have H' := H, simp only [code_supp_case, finset.union_subset_iff] at H',
refine tr_stmts₁_supports' (tr_normal_supports H) (finset.union_subset_left H) (λ h, _),
exact supports_union.2 ⟨IHf H'.2.1, IHg H'.2.2⟩ },
case fix : f IHf
{ have H' := H, simp only [code_supp_fix, finset.union_subset_iff] at H',
refine tr_stmts₁_supports' (tr_normal_supports H) (finset.union_subset_left H) (λ h, _),
refine supports_union.2 ⟨IHf H'.2, _⟩,
refine tr_stmts₁_supports' (tr_normal_supports _) (finset.union_subset_right h) (λ h, _),
{ simp only [code_supp', code_supp, finset.union_subset_iff, cont_supp,
tr_stmts₁, finset.insert_subset] at h H ⊢,
exact ⟨h.1, ⟨H.1.1, h⟩, H.2⟩ },
exact supports_singleton.2 (ret_supports $ finset.union_subset_right H) },
end
theorem cont_supp_supports {S k}
(H : cont_supp k ⊆ S) : supports (cont_supp k) S :=
begin
induction k,
{ simp [cont_supp_halt, supports] },
case cons₁ : f k IH
{ have H₁ := H, rw [cont_supp_cons₁] at H₁, have H₂ := finset.union_subset_right H₁,
refine tr_stmts₁_supports' (tr_normal_supports H₂) H₁ (λ h, _),
refine supports_union.2 ⟨code_supp'_supports H₂, _⟩,
simp only [code_supp, cont_supp_cons₂, finset.union_subset_iff] at H₂,
exact tr_stmts₁_supports' (head_supports H₂.2.2) (finset.union_subset_right h) IH },
case cons₂ : k IH
{ have H' := H, rw [cont_supp_cons₂] at H',
exact tr_stmts₁_supports' (head_supports $ finset.union_subset_right H') H' IH },
case comp : f k IH
{ have H' := H, rw [cont_supp_comp] at H', have H₂ := finset.union_subset_right H',
exact supports_union.2 ⟨code_supp'_supports H', IH H₂⟩ },
case fix : f k IH
{ rw cont_supp at H,
exact supports_union.2 ⟨code_supp'_supports H, IH (finset.union_subset_right H)⟩ }
end
theorem code_supp_supports {S c k}
(H : code_supp c k ⊆ S) : supports (code_supp c k) S :=
supports_union.2 ⟨code_supp'_supports H, cont_supp_supports (finset.union_subset_right H)⟩
/-- The set `code_supp c k` is a finite set that witnesses the effective finiteness of the `tr`
Turing machine. Starting from the initial state `tr_normal c k`, forward simulation uses only
states in `code_supp c k`, so this is a finite state machine. Even though the underlying type of
state labels `Λ'` is infinite, for a given partial recursive function `c` and continuation `k`,
only finitely many states are accessed, corresponding roughly to subterms of `c`. -/
theorem tr_supports (c k) : @TM2.supports _ _ _ _ _ ⟨tr_normal c k⟩ tr (code_supp c k) :=
⟨code_supp_self _ _ (tr_stmts₁_self _),
λ l', code_supp_supports (finset.subset.refl _) _⟩
end
end partrec_to_TM2
end turing
|
90c7d1897e56ffb30fa73078696f092fd46c61c0
|
8cae430f0a71442d02dbb1cbb14073b31048e4b0
|
/src/data/multiset/sum.lean
|
60d03036e59582a711273235d39578f6725fa92a
|
[
"Apache-2.0"
] |
permissive
|
leanprover-community/mathlib
|
56a2cadd17ac88caf4ece0a775932fa26327ba0e
|
442a83d738cb208d3600056c489be16900ba701d
|
refs/heads/master
| 1,693,584,102,358
| 1,693,471,902,000
| 1,693,471,902,000
| 97,922,418
| 1,595
| 352
|
Apache-2.0
| 1,694,693,445,000
| 1,500,624,130,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 3,194
|
lean
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import data.multiset.nodup
/-!
# Disjoint sum of multisets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines the disjoint sum of two multisets as `multiset (α ⊕ β)`. Beware not to confuse
with the `multiset.sum` operation which computes the additive sum.
## Main declarations
* `multiset.disj_sum`: `s.disj_sum t` is the disjoint sum of `s` and `t`.
-/
open sum
namespace multiset
variables {α β : Type*} (s : multiset α) (t : multiset β)
/-- Disjoint sum of multisets. -/
def disj_sum : multiset (α ⊕ β) := s.map inl + t.map inr
@[simp] lemma zero_disj_sum : (0 : multiset α).disj_sum t = t.map inr := zero_add _
@[simp] lemma disj_sum_zero : s.disj_sum (0 : multiset β) = s.map inl := add_zero _
@[simp] lemma card_disj_sum : (s.disj_sum t).card = s.card + t.card :=
by rw [disj_sum, card_add, card_map, card_map]
variables {s t} {s₁ s₂ : multiset α} {t₁ t₂ : multiset β} {a : α} {b : β} {x : α ⊕ β}
lemma mem_disj_sum : x ∈ s.disj_sum t ↔ (∃ a, a ∈ s ∧ inl a = x) ∨ ∃ b, b ∈ t ∧ inr b = x :=
by simp_rw [disj_sum, mem_add, mem_map]
@[simp] lemma inl_mem_disj_sum : inl a ∈ s.disj_sum t ↔ a ∈ s :=
begin
rw [mem_disj_sum, or_iff_left],
simp only [exists_eq_right],
rintro ⟨b, _, hb⟩,
exact inr_ne_inl hb,
end
@[simp] lemma inr_mem_disj_sum : inr b ∈ s.disj_sum t ↔ b ∈ t :=
begin
rw [mem_disj_sum, or_iff_right],
simp only [exists_eq_right],
rintro ⟨a, _, ha⟩,
exact inl_ne_inr ha,
end
lemma disj_sum_mono (hs : s₁ ≤ s₂) (ht : t₁ ≤ t₂) : s₁.disj_sum t₁ ≤ s₂.disj_sum t₂ :=
add_le_add (map_le_map hs) (map_le_map ht)
lemma disj_sum_mono_left (t : multiset β) : monotone (λ s : multiset α, s.disj_sum t) :=
λ s₁ s₂ hs, add_le_add_right (map_le_map hs) _
lemma disj_sum_mono_right (s : multiset α) :
monotone (s.disj_sum : multiset β → multiset (α ⊕ β)) :=
λ t₁ t₂ ht, add_le_add_left (map_le_map ht) _
lemma disj_sum_lt_disj_sum_of_lt_of_le (hs : s₁ < s₂) (ht : t₁ ≤ t₂) :
s₁.disj_sum t₁ < s₂.disj_sum t₂ :=
add_lt_add_of_lt_of_le (map_lt_map hs) (map_le_map ht)
lemma disj_sum_lt_disj_sum_of_le_of_lt (hs : s₁ ≤ s₂) (ht : t₁ < t₂) :
s₁.disj_sum t₁ < s₂.disj_sum t₂ :=
add_lt_add_of_le_of_lt (map_le_map hs) (map_lt_map ht)
lemma disj_sum_strict_mono_left (t : multiset β) : strict_mono (λ s : multiset α, s.disj_sum t) :=
λ s₁ s₂ hs, disj_sum_lt_disj_sum_of_lt_of_le hs le_rfl
lemma disj_sum_strict_mono_right (s : multiset α) :
strict_mono (s.disj_sum : multiset β → multiset (α ⊕ β)) :=
λ s₁ s₂, disj_sum_lt_disj_sum_of_le_of_lt le_rfl
protected lemma nodup.disj_sum (hs : s.nodup) (ht : t.nodup) : (s.disj_sum t).nodup :=
begin
refine ((hs.map inl_injective).add_iff $ ht.map inr_injective).2 (λ x hs ht, _),
rw multiset.mem_map at hs ht,
obtain ⟨a, _, rfl⟩ := hs,
obtain ⟨b, _, h⟩ := ht,
exact inr_ne_inl h,
end
end multiset
|
eb01178bd7023bfb83e8df48dac82c33b63f1c14
|
7850aae797be6c31052ce4633d86f5991178d3df
|
/src/Lean/Elab/Quotation.lean
|
44a7414abad97d90f292737bef934675c3d260d3
|
[
"Apache-2.0"
] |
permissive
|
miriamgoetze/lean4
|
4dc24d4dbd360cc969713647c2958c6691947d16
|
062cc5d5672250be456a168e9c7b9299a9c69bdb
|
refs/heads/master
| 1,685,865,971,011
| 1,624,107,703,000
| 1,624,107,703,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 24,854
|
lean
|
/-
Copyright (c) 2019 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sebastian Ullrich
Elaboration of syntax quotations as terms and patterns (in `match_syntax`). See also `./Hygiene.lean` for the basic
hygiene workings and data types.
-/
import Lean.Syntax
import Lean.ResolveName
import Lean.Elab.Term
import Lean.Elab.Quotation.Util
import Lean.Elab.Quotation.Precheck
import Lean.Parser.Term
namespace Lean.Elab.Term.Quotation
open Lean.Parser.Term
open Lean.Syntax
open Meta
/-- `C[$(e)]` ~> `let a := e; C[$a]`. Used in the implementation of antiquot splices. -/
private partial def floatOutAntiquotTerms : Syntax → StateT (Syntax → TermElabM Syntax) TermElabM Syntax
| stx@(Syntax.node k args) => do
if isAntiquot stx && !isEscapedAntiquot stx then
let e := getAntiquotTerm stx
if !e.isIdent || !e.getId.isAtomic then
return ← withFreshMacroScope do
let a ← `(a)
modify (fun cont stx => (`(let $a:ident := $e; $stx) : TermElabM _))
stx.setArg 2 a
Syntax.node k (← args.mapM floatOutAntiquotTerms)
| stx => pure stx
private def getSepFromSplice (splice : Syntax) : Syntax := do
let Syntax.atom _ sep ← getAntiquotSpliceSuffix splice | unreachable!
Syntax.mkStrLit (sep.dropRight 1)
partial def mkTuple : Array Syntax → TermElabM Syntax
| #[] => `(Unit.unit)
| #[e] => e
| es => do
let stx ← mkTuple (es.eraseIdx 0)
`(Prod.mk $(es[0]) $stx)
def resolveSectionVariable (sectionVars : NameMap Name) (id : Name) : List (Name × List String) :=
-- decode macro scopes from name before recursion
let extractionResult := extractMacroScopes id
let rec loop : Name → List String → List (Name × List String)
| id@(Name.str p s _), projs =>
-- NOTE: we assume that macro scopes always belong to the projected constant, not the projections
let id := { extractionResult with name := id }.review
match sectionVars.find? id with
| some newId => [(newId, projs)]
| none => loop p (s::projs)
| _, _ => []
loop extractionResult.name []
/-- Transform sequence of pushes and appends into acceptable code -/
def ArrayStxBuilder := Sum (Array Syntax) Syntax
namespace ArrayStxBuilder
def empty : ArrayStxBuilder := Sum.inl #[]
def build : ArrayStxBuilder → Syntax
| Sum.inl elems => quote elems
| Sum.inr arr => arr
def push (b : ArrayStxBuilder) (elem : Syntax) : ArrayStxBuilder :=
match b with
| Sum.inl elems => Sum.inl <| elems.push elem
| Sum.inr arr => Sum.inr <| mkCApp ``Array.push #[arr, elem]
def append (b : ArrayStxBuilder) (arr : Syntax) (appendName := ``Array.append) : ArrayStxBuilder :=
Sum.inr <| mkCApp appendName #[b.build, arr]
end ArrayStxBuilder
-- Elaborate the content of a syntax quotation term
private partial def quoteSyntax : Syntax → TermElabM Syntax
| Syntax.ident info rawVal val preresolved => do
if !hygiene.get (← getOptions) then
return ← `(Syntax.ident info $(quote rawVal) $(quote val) $(quote preresolved))
-- Add global scopes at compilation time (now), add macro scope at runtime (in the quotation).
-- See the paper for details.
let r ← resolveGlobalName val
-- extension of the paper algorithm: also store unique section variable names as top-level scopes
-- so they can be captured and used inside the section, but not outside
let r' := resolveSectionVariable (← read).sectionVars val
let preresolved := r ++ r' ++ preresolved
let val := quote val
-- `scp` is bound in stxQuot.expand
`(Syntax.ident info $(quote rawVal) (addMacroScope mainModule $val scp) $(quote preresolved))
-- if antiquotation, insert contents as-is, else recurse
| stx@(Syntax.node k _) => do
if isAntiquot stx && !isEscapedAntiquot stx then
getAntiquotTerm stx
else if isTokenAntiquot stx && !isEscapedAntiquot stx then
match stx[0] with
| Syntax.atom _ val => `(Syntax.atom (Option.getD (getHeadInfo? $(getAntiquotTerm stx)) info) $(quote val))
| _ => throwErrorAt stx "expected token"
else if isAntiquotSuffixSplice stx && !isEscapedAntiquot stx then
-- splices must occur in a `many` node
throwErrorAt stx "unexpected antiquotation splice"
else if isAntiquotSplice stx && !isEscapedAntiquot stx then
throwErrorAt stx "unexpected antiquotation splice"
else
-- if escaped antiquotation, decrement by one escape level
let stx := unescapeAntiquot stx
let mut args := ArrayStxBuilder.empty
let appendName := if (← getEnv).contains ``Array.append then ``Array.append else ``Array.appendCore
for arg in stx.getArgs do
if k == nullKind && isAntiquotSuffixSplice arg then
let antiquot := getAntiquotSuffixSpliceInner arg
args := args.append (appendName := appendName) <| ←
match antiquotSuffixSplice? arg with
| `optional => `(match $(getAntiquotTerm antiquot):term with
| some x => Array.empty.push x
| none => Array.empty)
| `many => getAntiquotTerm antiquot
| `sepBy => `(@SepArray.elemsAndSeps $(getSepFromSplice arg) $(getAntiquotTerm antiquot))
| k => throwErrorAt arg "invalid antiquotation suffix splice kind '{k}'"
else if k == nullKind && isAntiquotSplice arg then
let k := antiquotSpliceKind? arg
let (arg, bindLets) ← floatOutAntiquotTerms arg |>.run pure
let inner ← (getAntiquotSpliceContents arg).mapM quoteSyntax
let ids ← getAntiquotationIds arg
if ids.isEmpty then
throwErrorAt stx "antiquotation splice must contain at least one antiquotation"
let arr ← match k with
| `optional => `(match $[$ids:ident],* with
| $[some $ids:ident],* => $(quote inner)
| none => Array.empty)
| _ =>
let arr ← ids[:ids.size-1].foldrM (fun id arr => `(Array.zip $id $arr)) ids.back
`(Array.map (fun $(← mkTuple ids) => $(inner[0])) $arr)
let arr ←
if k == `sepBy then
`(mkSepArray $arr (mkAtom $(getSepFromSplice arg)))
else arr
let arr ← bindLets arr
args := args.append arr
else do
let arg ← quoteSyntax arg
args := args.push arg
`(Syntax.node $(quote k) $(args.build))
| Syntax.atom _ val =>
`(Syntax.atom info $(quote val))
| Syntax.missing => throwUnsupportedSyntax
def stxQuot.expand (stx : Syntax) : TermElabM Syntax := do
/- Syntax quotations are monadic values depending on the current macro scope. For efficiency, we bind
the macro scope once for each quotation, then build the syntax tree in a completely pure computation
depending on this binding. Note that regular function calls do not introduce a new macro scope (i.e.
we preserve referential transparency), so we can refer to this same `scp` inside `quoteSyntax` by
including it literally in a syntax quotation. -/
-- TODO: simplify to `(do scp ← getCurrMacroScope; pure $(quoteSyntax quoted))
let stx ← quoteSyntax stx.getQuotContent;
`(Bind.bind MonadRef.mkInfoFromRefPos (fun info =>
Bind.bind getCurrMacroScope (fun scp =>
Bind.bind getMainModule (fun mainModule => Pure.pure $stx))))
/- NOTE: It may seem like the newly introduced binding `scp` may accidentally
capture identifiers in an antiquotation introduced by `quoteSyntax`. However,
note that the syntax quotation above enjoys the same hygiene guarantees as
anywhere else in Lean; that is, we implement hygienic quotations by making
use of the hygienic quotation support of the bootstrapped Lean compiler!
Aside: While this might sound "dangerous", it is in fact less reliant on a
"chain of trust" than other bootstrapping parts of Lean: because this
implementation itself never uses `scp` (or any other identifier) both inside
and outside quotations, it can actually correctly be compiled by an
unhygienic (but otherwise correct) implementation of syntax quotations. As
long as it is then compiled again with the resulting executable (i.e. up to
stage 2), the result is a correct hygienic implementation. In this sense the
implementation is "self-stabilizing". It was in fact originally compiled
by an unhygienic prototype implementation. -/
macro "elab_stx_quot" kind:ident : command =>
`(@[builtinTermElab $kind:ident] def elabQuot : TermElab := adaptExpander stxQuot.expand)
--
elab_stx_quot Parser.Level.quot
elab_stx_quot Parser.Term.quot
elab_stx_quot Parser.Term.funBinder.quot
elab_stx_quot Parser.Term.bracketedBinder.quot
elab_stx_quot Parser.Term.matchDiscr.quot
elab_stx_quot Parser.Tactic.quot
elab_stx_quot Parser.Tactic.quotSeq
elab_stx_quot Parser.Term.stx.quot
elab_stx_quot Parser.Term.prec.quot
elab_stx_quot Parser.Term.attr.quot
elab_stx_quot Parser.Term.prio.quot
elab_stx_quot Parser.Term.doElem.quot
elab_stx_quot Parser.Term.dynamicQuot
/- match -/
-- an "alternative" of patterns plus right-hand side
private abbrev Alt := List Syntax × Syntax
/--
In a single match step, we match the first discriminant against the "head" of the first pattern of the first
alternative. This datatype describes what kind of check this involves, which helps other patterns decide if
they are covered by the same check and don't have to be checked again (see also `MatchResult`). -/
inductive HeadCheck where
-- match step that always succeeds: _, x, `($x), ...
| unconditional
-- match step based on kind and, optionally, arity of discriminant
-- If `arity` is given, that number of new discriminants is introduced. `covered` patterns should then introduce the
-- same number of new patterns.
-- We actually check the arity at run time only in the case of `null` nodes since it should otherwise by implied by
-- the node kind.
-- without arity: `($x:k)
-- with arity: any quotation without an antiquotation head pattern
| shape (k : SyntaxNodeKind) (arity : Option Nat)
-- Match step that succeeds on `null` nodes of arity at least `numPrefix + numSuffix`, introducing discriminants
-- for the first `numPrefix` children, one `null` node for those in between, and for the `numSuffix` last children.
-- example: `([$x, $xs,*, $y]) is `slice 2 2`
| slice (numPrefix numSuffix : Nat)
-- other, complicated match step that will probably only cover identical patterns
-- example: antiquotation splices `($[...]*)
| other (pat : Syntax)
open HeadCheck
/-- Describe whether a pattern is covered by a head check (induced by the pattern itself or a different pattern). -/
inductive MatchResult where
-- Pattern agrees with head check, remove and transform remaining alternative.
-- If `exhaustive` is `false`, *also* include unchanged alternative in the "no" branch.
| covered (f : Alt → TermElabM Alt) (exhaustive : Bool)
-- Pattern disagrees with head check, include in "no" branch only
| uncovered
-- Pattern is not quite sure yet; include unchanged in both branches
| undecided
open MatchResult
/-- All necessary information on a pattern head. -/
structure HeadInfo where
-- check induced by the pattern
check : HeadCheck
-- compute compatibility of pattern with given head check
onMatch (taken : HeadCheck) : MatchResult
-- actually run the specified head check, with the discriminant bound to `discr`
doMatch (yes : (newDiscrs : List Syntax) → TermElabM Syntax) (no : TermElabM Syntax) : TermElabM Syntax
/-- Adapt alternatives that do not introduce new discriminants in `doMatch`, but are covered by those that do so. -/
private def noOpMatchAdaptPats : HeadCheck → Alt → Alt
| shape k (some sz), (pats, rhs) => (List.replicate sz (Unhygienic.run `(_)) ++ pats, rhs)
| slice p s, (pats, rhs) => (List.replicate (p + 1 + s) (Unhygienic.run `(_)) ++ pats, rhs)
| _, alt => alt
private def adaptRhs (fn : Syntax → TermElabM Syntax) : Alt → TermElabM Alt
| (pats, rhs) => do (pats, ← fn rhs)
private partial def getHeadInfo (alt : Alt) : TermElabM HeadInfo :=
let pat := alt.fst.head!
let unconditionally (rhsFn) := pure {
check := unconditional,
doMatch := fun yes no => yes [],
onMatch := fun taken => covered (adaptRhs rhsFn ∘ noOpMatchAdaptPats taken) (match taken with | unconditional => true | _ => false)
}
-- quotation pattern
if isQuot pat then
let quoted := getQuotContent pat
if quoted.isAtom then
-- We assume that atoms are uniquely determined by the node kind and never have to be checked
unconditionally pure
else if quoted.isTokenAntiquot then
unconditionally (`(let $(quoted.getAntiquotTerm) := discr; $(·)))
else if isAntiquot quoted && !isEscapedAntiquot quoted then
-- quotation contains a single antiquotation
let k := antiquotKind? quoted |>.get!
match getAntiquotTerm quoted with
| `(_) => unconditionally pure
| `($id:ident) =>
-- Antiquotation kinds like `$id:ident` influence the parser, but also need to be considered by
-- `match` (but not by quotation terms). For example, `($id:ident) and `($e) are not
-- distinguishable without checking the kind of the node to be captured. Note that some
-- antiquotations like the latter one for terms do not correspond to any actual node kind
-- (signified by `k == Name.anonymous`), so we would only check for `ident` here.
--
-- if stx.isOfKind `ident then
-- let id := stx; let e := stx; ...
-- else
-- let e := stx; ...
let rhsFn := (`(let $id := discr; $(·)))
if k == Name.anonymous then unconditionally rhsFn else pure {
check := shape k none,
onMatch := fun
| other _ => undecided
| taken@(shape k' sz) =>
if k' == k then
covered (adaptRhs rhsFn ∘ noOpMatchAdaptPats taken) (exhaustive := sz.isNone)
else uncovered
| _ => uncovered,
doMatch := fun yes no => do `(cond (Syntax.isOfKind discr $(quote k)) $(← yes []) $(← no)),
}
| anti => throwErrorAt anti "unsupported antiquotation kind in pattern"
else if isAntiquotSuffixSplice quoted then throwErrorAt quoted "unexpected antiquotation splice"
else if isAntiquotSplice quoted then throwErrorAt quoted "unexpected antiquotation splice"
else if quoted.getArgs.size == 1 && isAntiquotSuffixSplice quoted[0] then
let anti := getAntiquotTerm (getAntiquotSuffixSpliceInner quoted[0])
unconditionally fun rhs => match antiquotSuffixSplice? quoted[0] with
| `optional => `(let $anti := Syntax.getOptional? discr; $rhs)
| `many => `(let $anti := Syntax.getArgs discr; $rhs)
| `sepBy => `(let $anti := @SepArray.mk $(getSepFromSplice quoted[0]) (Syntax.getArgs discr); $rhs)
| k => throwErrorAt quoted "invalid antiquotation suffix splice kind '{k}'"
else if quoted.getArgs.size == 1 && isAntiquotSplice quoted[0] then pure {
check := other pat,
onMatch := fun
| other pat' => if pat' == pat then covered pure (exhaustive := true) else undecided
| _ => undecided,
doMatch := fun yes no => do
let splice := quoted[0]
let k := antiquotSpliceKind? splice
let contents := getAntiquotSpliceContents splice
let ids ← getAntiquotationIds splice
let yes ← yes []
let no ← no
match k with
| `optional =>
let nones := mkArray ids.size (← `(none))
`(let_delayed yes _ $ids* := $yes;
if discr.isNone then yes () $[ $nones]*
else match discr with
| `($(mkNullNode contents)) => yes () $[ (some $ids)]*
| _ => $no)
| _ =>
let mut discrs ← `(Syntax.getArgs discr)
if k == `sepBy then
discrs ← `(Array.getSepElems $discrs)
let tuple ← mkTuple ids
let mut yes := yes
let resId ← match ids with
| #[id] => id
| _ =>
for id in ids do
yes ← `(let $id := tuples.map (fun $tuple => $id); $yes)
`(tuples)
let contents := if contents.size == 1
then contents[0]
else mkNullNode contents
`(match OptionM.run ($(discrs).sequenceMap fun
| `($contents) => some $tuple
| _ => none) with
| some $resId => $yes
| none => $no)
}
else if let some idx := quoted.getArgs.findIdx? (fun arg => isAntiquotSuffixSplice arg || isAntiquotSplice arg) then do
/-
pattern of the form `match discr, ... with | `(pat_0 ... pat_(idx-1) $[...]* pat_(idx+1) ...), ...`
transform to
```
if discr.getNumArgs >= $quoted.getNumArgs - 1 then
match discr[0], ..., discr[idx-1], mkNullNode (discr.getArgs.extract idx (discr.getNumArgs - $numSuffix))), ..., discr[quoted.getNumArgs - 1] with
| `(pat_0), ... `(pat_(idx-1)), `($[...])*, `(pat_(idx+1)), ...
```
-/
let numSuffix := quoted.getNumArgs - 1 - idx
pure {
check := slice idx numSuffix
onMatch := fun
| other _ => undecided
| slice p s =>
if p == idx && s == numSuffix then
let argPats := quoted.getArgs.mapIdx fun i arg =>
let arg := if (i : Nat) == idx then mkNullNode #[arg] else arg
Unhygienic.run `(`($(arg)))
covered (fun (pats, rhs) => (argPats.toList ++ pats, rhs)) (exhaustive := true)
else uncovered
| _ => uncovered
doMatch := fun yes no => do
let prefixDiscrs ← (List.range idx).mapM (`(Syntax.getArg discr $(quote ·)))
let sliceDiscr ← `(mkNullNode (discr.getArgs.extract $(quote idx) (discr.getNumArgs - $(quote numSuffix))))
let suffixDiscrs ← (List.range numSuffix).mapM fun i =>
`(Syntax.getArg discr (discr.getNumArgs - $(quote (numSuffix - i))))
`(ite (GE.ge discr.getNumArgs $(quote (quoted.getNumArgs - 1)))
$(← yes (prefixDiscrs ++ sliceDiscr :: suffixDiscrs))
$(← no))
}
else
-- not an antiquotation, or an escaped antiquotation: match head shape
let quoted := unescapeAntiquot quoted
let kind := quoted.getKind
let argPats := quoted.getArgs.map fun arg => Unhygienic.run `(`($(arg)))
pure {
check :=
if quoted.isIdent then
-- identifiers only match identical identifiers
-- NOTE: We could make this case more precise by including the matched identifier,
-- if any, in the `shape` constructor, but matching on literal identifiers is quite
-- rare.
other quoted
else
shape kind argPats.size,
onMatch := fun
| other stx' =>
if quoted.isIdent && quoted == stx' then
covered pure (exhaustive := true)
else
uncovered
| shape k' sz =>
if k' == kind && sz == argPats.size then
covered (fun (pats, rhs) => (argPats.toList ++ pats, rhs)) (exhaustive := true)
else
uncovered
| _ => uncovered,
doMatch := fun yes no => do
let cond ← match kind with
| `null => `(Syntax.matchesNull discr $(quote argPats.size))
| `ident => `(Syntax.matchesIdent discr $(quote quoted.getId))
| _ => `(Syntax.isOfKind discr $(quote kind))
let newDiscrs ← (List.range argPats.size).mapM fun i => `(Syntax.getArg discr $(quote i))
`(ite (Eq $cond true) $(← yes newDiscrs) $(← no))
}
else match pat with
| `(_) => unconditionally pure
| `($id:ident) => unconditionally (`(let $id := discr; $(·)))
| `($id:ident@$pat) => do
let info ← getHeadInfo (pat::alt.1.tail!, alt.2)
{ info with onMatch := fun taken => match info.onMatch taken with
| covered f exh => covered (fun alt => f alt >>= adaptRhs (`(let $id := discr; $(·)))) exh
| r => r }
| _ => throwErrorAt pat "match (syntax) : unexpected pattern kind {pat}"
-- Bind right-hand side to new `let_delayed` decl in order to prevent code duplication
private def deduplicate (floatedLetDecls : Array Syntax) : Alt → TermElabM (Array Syntax × Alt)
-- NOTE: new macro scope so that introduced bindings do not collide
| (pats, rhs) => do
if let `($f:ident $[ $args:ident]*) := rhs then
-- looks simple enough/created by this function, skip
return (floatedLetDecls, (pats, rhs))
withFreshMacroScope do
match ← getPatternsVars pats.toArray with
| #[] =>
-- no antiquotations => introduce Unit parameter to preserve evaluation order
let rhs' ← `(rhs Unit.unit)
(floatedLetDecls.push (← `(letDecl|rhs _ := $rhs)), (pats, rhs'))
| vars =>
let rhs' ← `(rhs $vars*)
(floatedLetDecls.push (← `(letDecl|rhs $vars:ident* := $rhs)), (pats, rhs'))
private partial def compileStxMatch (discrs : List Syntax) (alts : List Alt) : TermElabM Syntax := do
trace[Elab.match_syntax] "match {discrs} with {alts}"
match discrs, alts with
| [], ([], rhs)::_ => pure rhs -- nothing left to match
| _, [] =>
logError "non-exhaustive 'match' (syntax)"
pure Syntax.missing
| discr::discrs, alt::alts => do
let info ← getHeadInfo alt
let pat := alt.1.head!
let alts ← (alt::alts).mapM fun alt => do ((← getHeadInfo alt).onMatch info.check, alt)
let mut yesAlts := #[]
let mut undecidedAlts := #[]
let mut nonExhaustiveAlts := #[]
let mut floatedLetDecls := #[]
for alt in alts do
let mut alt := alt
match alt with
| (covered f exh, alt') =>
-- we can only factor out a common check if there are no undecided patterns in between;
-- otherwise we would change the order of alternatives
if undecidedAlts.isEmpty then
yesAlts ← yesAlts.push <$> f (alt'.1.tail!, alt'.2)
if !exh then
nonExhaustiveAlts := nonExhaustiveAlts.push alt'
else
(floatedLetDecls, alt) ← deduplicate floatedLetDecls alt'
undecidedAlts := undecidedAlts.push alt
nonExhaustiveAlts := nonExhaustiveAlts.push alt
| (undecided, alt') =>
(floatedLetDecls, alt) ← deduplicate floatedLetDecls alt'
undecidedAlts := undecidedAlts.push alt
nonExhaustiveAlts := nonExhaustiveAlts.push alt
| (uncovered, alt') =>
nonExhaustiveAlts := nonExhaustiveAlts.push alt'
let mut stx ← info.doMatch
(yes := fun newDiscrs => do
let mut yesAlts := yesAlts
if !undecidedAlts.isEmpty then
-- group undecided alternatives in a new default case `| discr2, ... => match discr, discr2, ... with ...`
let vars ← discrs.mapM fun _ => withFreshMacroScope `(discr)
let pats := List.replicate newDiscrs.length (Unhygienic.run `(_)) ++ vars
let alts ← undecidedAlts.mapM fun alt => `(matchAltExpr| | $(alt.1.toArray),* => $(alt.2))
let rhs ← `(match discr, $[$(vars.toArray):term],* with $alts:matchAlt*)
yesAlts := yesAlts.push (pats, rhs)
withFreshMacroScope $ compileStxMatch (newDiscrs ++ discrs) yesAlts.toList)
(no := withFreshMacroScope $ compileStxMatch (discr::discrs) nonExhaustiveAlts.toList)
for d in floatedLetDecls do
stx ← `(let_delayed $d:letDecl; $stx)
`(let discr := $discr; $stx)
| _, _ => unreachable!
def match_syntax.expand (stx : Syntax) : TermElabM Syntax := do
match stx with
| `(match $[$discrs:term],* with $[| $[$patss],* => $rhss]*) => do
if !patss.any (·.any (fun
| `($id@$pat) => pat.isQuot
| pat => pat.isQuot)) then
-- no quotations => fall back to regular `match`
throwUnsupportedSyntax
let stx ← compileStxMatch discrs.toList (patss.map (·.toList) |>.zip rhss).toList
trace[Elab.match_syntax.result] "{stx}"
stx
| _ => throwUnsupportedSyntax
@[builtinTermElab «match»] def elabMatchSyntax : TermElab :=
adaptExpander match_syntax.expand
builtin_initialize
registerTraceClass `Elab.match_syntax
registerTraceClass `Elab.match_syntax.result
end Lean.Elab.Term.Quotation
|
2ac236639203f582cb989e8cfaa557209c65ab54
|
1437b3495ef9020d5413178aa33c0a625f15f15f
|
/data/prod.lean
|
60555cae8a92374d46919a58a7739ed6a3165416
|
[
"Apache-2.0"
] |
permissive
|
jean002/mathlib
|
c66bbb2d9fdc9c03ae07f869acac7ddbfce67a30
|
dc6c38a765799c99c4d9c8d5207d9e6c9e0e2cfd
|
refs/heads/master
| 1,587,027,806,375
| 1,547,306,358,000
| 1,547,306,358,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 2,773
|
lean
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
Extends theory on products
-/
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
@[simp] theorem prod.forall {p : α × β → Prop} : (∀ x, p x) ↔ (∀ a b, p (a, b)) :=
⟨assume h a b, h (a, b), assume h ⟨a, b⟩, h a b⟩
@[simp] theorem prod.exists {p : α × β → Prop} : (∃ x, p x) ↔ (∃ a b, p (a, b)) :=
⟨assume ⟨⟨a, b⟩, h⟩, ⟨a, b, h⟩, assume ⟨a, b, h⟩, ⟨⟨a, b⟩, h⟩⟩
namespace prod
attribute [simp] prod.map
@[simp] lemma map_fst (f : α → γ) (g : β → δ) : ∀(p : α × β), (map f g p).1 = f (p.1)
| ⟨a, b⟩ := rfl
@[simp] lemma map_snd (f : α → γ) (g : β → δ) : ∀(p : α × β), (map f g p).2 = g (p.2)
| ⟨a, b⟩ := rfl
@[simp] theorem mk.inj_iff {a₁ a₂ : α} {b₁ b₂ : β} : (a₁, b₁) = (a₂, b₂) ↔ (a₁ = a₂ ∧ b₁ = b₂) :=
⟨prod.mk.inj, by cc⟩
lemma ext_iff {p q : α × β} : p = q ↔ p.1 = q.1 ∧ p.2 = q.2 :=
by rw [← @mk.eta _ _ p, ← @mk.eta _ _ q, mk.inj_iff]
lemma ext {α β} {p q : α × β} (h₁ : p.1 = q.1) (h₂ : p.2 = q.2) : p = q :=
ext_iff.2 ⟨h₁, h₂⟩
lemma id_prod : (λ (p : α × α), (p.1, p.2)) = id :=
funext $ λ ⟨a, b⟩, rfl
/-- Swap the factors of a product. `swap (a, b) = (b, a)` -/
def swap : α × β → β × α := λp, (p.2, p.1)
@[simp] lemma swap_swap : ∀ x : α × β, swap (swap x) = x
| ⟨a, b⟩ := rfl
@[simp] lemma fst_swap {p : α × β} : (swap p).1 = p.2 := rfl
@[simp] lemma snd_swap {p : α × β} : (swap p).2 = p.1 := rfl
@[simp] lemma swap_prod_mk {a : α} {b : β} : swap (a, b) = (b, a) := rfl
@[simp] lemma swap_swap_eq : swap ∘ swap = @id (α × β) :=
funext swap_swap
@[simp] lemma swap_left_inverse : function.left_inverse (@swap α β) swap :=
swap_swap
@[simp] lemma swap_right_inverse : function.right_inverse (@swap α β) swap :=
swap_swap
lemma eq_iff_fst_eq_snd_eq : ∀{p q : α × β}, p = q ↔ (p.1 = q.1 ∧ p.2 = q.2)
| ⟨p₁, p₂⟩ ⟨q₁, q₂⟩ := by simp
theorem lex_def (r : α → α → Prop) (s : β → β → Prop)
{p q : α × β} : prod.lex r s p q ↔ r p.1 q.1 ∨ p.1 = q.1 ∧ s p.2 q.2 :=
⟨λ h, by cases h; simp *,
λ h, match p, q, h with
| (a, b), (c, d), or.inl h := lex.left _ _ _ h
| (a, b), (c, d), or.inr ⟨e, h⟩ :=
by change a = c at e; subst e; exact lex.right _ _ h
end⟩
instance lex.decidable [decidable_eq α] [decidable_eq β]
(r : α → α → Prop) (s : β → β → Prop) [decidable_rel r] [decidable_rel s] :
decidable_rel (prod.lex r s) :=
λ p q, decidable_of_decidable_of_iff (by apply_instance) (lex_def r s).symm
end prod
|
43e7359c3e576a7584bd6650a10fc248d063ad88
|
bb31430994044506fa42fd667e2d556327e18dfe
|
/src/logic/encodable/basic.lean
|
9d47356f8757d026aaaae8b2c3a7e37aaa5c5946
|
[
"Apache-2.0"
] |
permissive
|
sgouezel/mathlib
|
0cb4e5335a2ba189fa7af96d83a377f83270e503
|
00638177efd1b2534fc5269363ebf42a7871df9a
|
refs/heads/master
| 1,674,527,483,042
| 1,673,665,568,000
| 1,673,665,568,000
| 119,598,202
| 0
| 0
| null | 1,517,348,647,000
| 1,517,348,646,000
| null |
UTF-8
|
Lean
| false
| false
| 19,841
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Mario Carneiro
-/
import logic.equiv.nat
import data.pnat.basic
import order.directed
import data.countable.defs
import order.rel_iso.basic
import data.fin.basic
/-!
# Encodable types
This file defines encodable (constructively countable) types as a typeclass.
This is used to provide explicit encode/decode functions from and to `ℕ`, with the information that
those functions are inverses of each other.
The difference with `denumerable` is that finite types are encodable. For infinite types,
`encodable` and `denumerable` agree.
## Main declarations
* `encodable α`: States that there exists an explicit encoding function `encode : α → ℕ` with a
partial inverse `decode : ℕ → option α`.
* `decode₂`: Version of `decode` that is equal to `none` outside of the range of `encode`. Useful as
we do not require this in the definition of `decode`.
* `ulower α`: Any encodable type has an equivalent type living in the lowest universe, namely a
subtype of `ℕ`. `ulower α` finds it.
## Implementation notes
The point of asking for an explicit partial inverse `decode : ℕ → option α` to `encode : α → ℕ` is
to make the range of `encode` decidable even when the finiteness of `α` is not.
-/
open option list nat function
/-- Constructively countable type. Made from an explicit injection `encode : α → ℕ` and a partial
inverse `decode : ℕ → option α`. Note that finite types *are* countable. See `denumerable` if you
wish to enforce infiniteness. -/
class encodable (α : Type*) :=
(encode : α → ℕ)
(decode [] : ℕ → option α)
(encodek : ∀ a, decode (encode a) = some a)
attribute [simp] encodable.encodek
namespace encodable
variables {α : Type*} {β : Type*}
universe u
theorem encode_injective [encodable α] : function.injective (@encode α _)
| x y e := option.some.inj $ by rw [← encodek, e, encodek]
@[simp] lemma encode_inj [encodable α] {a b : α} : encode a = encode b ↔ a = b :=
encode_injective.eq_iff
-- The priority of the instance below is less than the priorities of `subtype.countable`
-- and `quotient.countable`
@[priority 400] instance [encodable α] : countable α := encode_injective.countable
lemma surjective_decode_iget (α : Type*) [encodable α] [inhabited α] :
surjective (λ n, (encodable.decode α n).iget) :=
λ x, ⟨encodable.encode x, by simp_rw [encodable.encodek]⟩
/-- An encodable type has decidable equality. Not set as an instance because this is usually not the
best way to infer decidability. -/
def decidable_eq_of_encodable (α) [encodable α] : decidable_eq α
| a b := decidable_of_iff _ encode_inj
/-- If `α` is encodable and there is an injection `f : β → α`, then `β` is encodable as well. -/
def of_left_injection [encodable α]
(f : β → α) (finv : α → option β) (linv : ∀ b, finv (f b) = some b) : encodable β :=
⟨λ b, encode (f b),
λ n, (decode α n).bind finv,
λ b, by simp [encodable.encodek, linv]⟩
/-- If `α` is encodable and `f : β → α` is invertible, then `β` is encodable as well. -/
def of_left_inverse [encodable α]
(f : β → α) (finv : α → β) (linv : ∀ b, finv (f b) = b) : encodable β :=
of_left_injection f (some ∘ finv) (λ b, congr_arg some (linv b))
/-- Encodability is preserved by equivalence. -/
def of_equiv (α) [encodable α] (e : β ≃ α) : encodable β :=
of_left_inverse e e.symm e.left_inv
@[simp] theorem encode_of_equiv {α β} [encodable α] (e : β ≃ α) (b : β) :
@encode _ (of_equiv _ e) b = encode (e b) := rfl
@[simp] theorem decode_of_equiv {α β} [encodable α] (e : β ≃ α) (n : ℕ) :
@decode _ (of_equiv _ e) n = (decode α n).map e.symm := rfl
instance _root_.nat.encodable : encodable ℕ :=
⟨id, some, λ a, rfl⟩
@[simp] theorem encode_nat (n : ℕ) : encode n = n := rfl
@[simp] theorem decode_nat (n : ℕ) : decode ℕ n = some n := rfl
@[priority 100] instance _root_.is_empty.to_encodable [is_empty α] : encodable α :=
⟨is_empty_elim, λ n, none, is_empty_elim⟩
instance _root_.punit.encodable : encodable punit :=
⟨λ_, 0, λ n, nat.cases_on n (some punit.star) (λ _, none), λ _, by simp⟩
@[simp] theorem encode_star : encode punit.star = 0 := rfl
@[simp] theorem decode_unit_zero : decode punit 0 = some punit.star := rfl
@[simp] theorem decode_unit_succ (n) : decode punit (succ n) = none := rfl
/-- If `α` is encodable, then so is `option α`. -/
instance _root_.option.encodable {α : Type*} [h : encodable α] : encodable (option α) :=
⟨λ o, option.cases_on o nat.zero (λ a, succ (encode a)),
λ n, nat.cases_on n (some none) (λ m, (decode α m).map some),
λ o, by cases o; dsimp; simp [encodek, nat.succ_ne_zero]⟩
@[simp] theorem encode_none [encodable α] : encode (@none α) = 0 := rfl
@[simp] theorem encode_some [encodable α] (a : α) :
encode (some a) = succ (encode a) := rfl
@[simp] theorem decode_option_zero [encodable α] : decode (option α) 0 = some none := rfl
@[simp] theorem decode_option_succ [encodable α] (n) :
decode (option α) (succ n) = (decode α n).map some := rfl
/-- Failsafe variant of `decode`. `decode₂ α n` returns the preimage of `n` under `encode` if it
exists, and returns `none` if it doesn't. This requirement could be imposed directly on `decode` but
is not to help make the definition easier to use. -/
def decode₂ (α) [encodable α] (n : ℕ) : option α :=
(decode α n).bind (option.guard (λ a, encode a = n))
theorem mem_decode₂' [encodable α] {n : ℕ} {a : α} :
a ∈ decode₂ α n ↔ a ∈ decode α n ∧ encode a = n :=
by simp [decode₂]; exact
⟨λ ⟨_, h₁, rfl, h₂⟩, ⟨h₁, h₂⟩, λ ⟨h₁, h₂⟩, ⟨_, h₁, rfl, h₂⟩⟩
theorem mem_decode₂ [encodable α] {n : ℕ} {a : α} :
a ∈ decode₂ α n ↔ encode a = n :=
mem_decode₂'.trans (and_iff_right_of_imp $ λ e, e ▸ encodek _)
theorem decode₂_eq_some [encodable α] {n : ℕ} {a : α} :
decode₂ α n = some a ↔ encode a = n :=
mem_decode₂
@[simp] lemma decode₂_encode [encodable α] (a : α) :
decode₂ α (encode a) = some a :=
by { ext, simp [mem_decode₂, eq_comm] }
theorem decode₂_ne_none_iff [encodable α] {n : ℕ} :
decode₂ α n ≠ none ↔ n ∈ set.range (encode : α → ℕ) :=
by simp_rw [set.range, set.mem_set_of_eq, ne.def, option.eq_none_iff_forall_not_mem,
encodable.mem_decode₂, not_forall, not_not]
theorem decode₂_is_partial_inv [encodable α] : is_partial_inv encode (decode₂ α) :=
λ a n, mem_decode₂
theorem decode₂_inj [encodable α] {n : ℕ} {a₁ a₂ : α}
(h₁ : a₁ ∈ decode₂ α n) (h₂ : a₂ ∈ decode₂ α n) : a₁ = a₂ :=
encode_injective $ (mem_decode₂.1 h₁).trans (mem_decode₂.1 h₂).symm
theorem encodek₂ [encodable α] (a : α) : decode₂ α (encode a) = some a :=
mem_decode₂.2 rfl
/-- The encoding function has decidable range. -/
def decidable_range_encode (α : Type*) [encodable α] : decidable_pred (∈ set.range (@encode α _)) :=
λ x, decidable_of_iff (option.is_some (decode₂ α x))
⟨λ h, ⟨option.get h, by rw [← decode₂_is_partial_inv (option.get h), option.some_get]⟩,
λ ⟨n, hn⟩, by rw [← hn, encodek₂]; exact rfl⟩
/-- An encodable type is equivalent to the range of its encoding function. -/
def equiv_range_encode (α : Type*) [encodable α] : α ≃ set.range (@encode α _) :=
{ to_fun := λ a : α, ⟨encode a, set.mem_range_self _⟩,
inv_fun := λ n, option.get (show is_some (decode₂ α n.1),
by cases n.2 with x hx; rw [← hx, encodek₂]; exact rfl),
left_inv := λ a, by dsimp;
rw [← option.some_inj, option.some_get, encodek₂],
right_inv := λ ⟨n, x, hx⟩, begin
apply subtype.eq,
dsimp,
conv {to_rhs, rw ← hx},
rw [encode_injective.eq_iff, ← option.some_inj, option.some_get, ← hx, encodek₂],
end }
/-- A type with unique element is encodable. This is not an instance to avoid diamonds. -/
def _root_.unique.encodable [unique α] : encodable α :=
⟨λ _, 0, λ _, some default, unique.forall_iff.2 rfl⟩
section sum
variables [encodable α] [encodable β]
/-- Explicit encoding function for the sum of two encodable types. -/
def encode_sum : α ⊕ β → ℕ
| (sum.inl a) := bit0 $ encode a
| (sum.inr b) := bit1 $ encode b
/-- Explicit decoding function for the sum of two encodable types. -/
def decode_sum (n : ℕ) : option (α ⊕ β) :=
match bodd_div2 n with
| (ff, m) := (decode α m).map sum.inl
| (tt, m) := (decode β m).map sum.inr
end
/-- If `α` and `β` are encodable, then so is their sum. -/
instance _root_.sum.encodable : encodable (α ⊕ β) :=
⟨encode_sum, decode_sum, λ s,
by cases s; simp [encode_sum, decode_sum, encodek]; refl⟩
@[simp] theorem encode_inl (a : α) :
@encode (α ⊕ β) _ (sum.inl a) = bit0 (encode a) := rfl
@[simp] theorem encode_inr (b : β) :
@encode (α ⊕ β) _ (sum.inr b) = bit1 (encode b) := rfl
@[simp] theorem decode_sum_val (n : ℕ) :
decode (α ⊕ β) n = decode_sum n := rfl
end sum
instance _root_.bool.encodable : encodable bool :=
of_equiv (unit ⊕ unit) equiv.bool_equiv_punit_sum_punit
@[simp] theorem encode_tt : encode tt = 1 := rfl
@[simp] theorem encode_ff : encode ff = 0 := rfl
@[simp] theorem decode_zero : decode bool 0 = some ff := rfl
@[simp] theorem decode_one : decode bool 1 = some tt := rfl
theorem decode_ge_two (n) (h : 2 ≤ n) : decode bool n = none :=
begin
suffices : decode_sum n = none,
{ change (decode_sum n).map _ = none, rw this, refl },
have : 1 ≤ div2 n,
{ rw [div2_val, nat.le_div_iff_mul_le],
exacts [h, dec_trivial] },
cases exists_eq_succ_of_ne_zero (ne_of_gt this) with m e,
simp [decode_sum]; cases bodd n; simp [decode_sum]; rw e; refl
end
noncomputable instance _root_.Prop.encodable : encodable Prop :=
of_equiv bool equiv.Prop_equiv_bool
section sigma
variables {γ : α → Type*} [encodable α] [∀ a, encodable (γ a)]
/-- Explicit encoding function for `sigma γ` -/
def encode_sigma : sigma γ → ℕ
| ⟨a, b⟩ := mkpair (encode a) (encode b)
/-- Explicit decoding function for `sigma γ` -/
def decode_sigma (n : ℕ) : option (sigma γ) :=
let (n₁, n₂) := unpair n in
(decode α n₁).bind $ λ a, (decode (γ a) n₂).map $ sigma.mk a
instance _root_.sigma.encodable : encodable (sigma γ) :=
⟨encode_sigma, decode_sigma, λ ⟨a, b⟩,
by simp [encode_sigma, decode_sigma, unpair_mkpair, encodek]⟩
@[simp] theorem decode_sigma_val (n : ℕ) : decode (sigma γ) n =
(decode α n.unpair.1).bind (λ a, (decode (γ a) n.unpair.2).map $ sigma.mk a) :=
show decode_sigma._match_1 _ = _, by cases n.unpair; refl
@[simp] theorem encode_sigma_val (a b) : @encode (sigma γ) _ ⟨a, b⟩ =
mkpair (encode a) (encode b) := rfl
end sigma
section prod
variables [encodable α] [encodable β]
/-- If `α` and `β` are encodable, then so is their product. -/
instance _root_.prod.encodable : encodable (α × β) :=
of_equiv _ (equiv.sigma_equiv_prod α β).symm
@[simp] theorem decode_prod_val (n : ℕ) : decode (α × β) n =
(decode α n.unpair.1).bind (λ a, (decode β n.unpair.2).map $ prod.mk a) :=
show (decode (sigma (λ _, β)) n).map (equiv.sigma_equiv_prod α β) = _,
by simp; cases decode α n.unpair.1; simp;
cases decode β n.unpair.2; refl
@[simp] theorem encode_prod_val (a b) : @encode (α × β) _ (a, b) =
mkpair (encode a) (encode b) := rfl
end prod
section subtype
open subtype decidable
variables {P : α → Prop} [encA : encodable α] [decP : decidable_pred P]
include encA
/-- Explicit encoding function for a decidable subtype of an encodable type -/
def encode_subtype : {a : α // P a} → ℕ
| ⟨v, h⟩ := encode v
include decP
/-- Explicit decoding function for a decidable subtype of an encodable type -/
def decode_subtype (v : ℕ) : option {a : α // P a} :=
(decode α v).bind $ λ a,
if h : P a then some ⟨a, h⟩ else none
/-- A decidable subtype of an encodable type is encodable. -/
instance _root_.subtype.encodable : encodable {a : α // P a} :=
⟨encode_subtype, decode_subtype,
λ ⟨v, h⟩, by simp [encode_subtype, decode_subtype, encodek, h]⟩
lemma subtype.encode_eq (a : subtype P) : encode a = encode a.val :=
by cases a; refl
end subtype
instance _root_.fin.encodable (n) : encodable (fin n) :=
of_equiv _ fin.equiv_subtype
instance _root_.int.encodable : encodable ℤ :=
of_equiv _ equiv.int_equiv_nat
instance _root_.pnat.encodable : encodable ℕ+ :=
of_equiv _ equiv.pnat_equiv_nat
/-- The lift of an encodable type is encodable. -/
instance _root_.ulift.encodable [encodable α] : encodable (ulift α) :=
of_equiv _ equiv.ulift
/-- The lift of an encodable type is encodable. -/
instance _root_.plift.encodable [encodable α] : encodable (plift α) :=
of_equiv _ equiv.plift
/-- If `β` is encodable and there is an injection `f : α → β`, then `α` is encodable as well. -/
noncomputable def of_inj [encodable β] (f : α → β) (hf : injective f) : encodable α :=
of_left_injection f (partial_inv f) (λ x, (partial_inv_of_injective hf _ _).2 rfl)
/-- If `α` is countable, then it has a (non-canonical) `encodable` structure. -/
noncomputable def of_countable (α : Type*) [countable α] : encodable α :=
nonempty.some $ let ⟨f, hf⟩ := exists_injective_nat α in ⟨of_inj f hf⟩
@[simp] lemma nonempty_encodable : nonempty (encodable α) ↔ countable α :=
⟨λ ⟨h⟩, @encodable.countable α h, λ h, ⟨@of_countable _ h⟩⟩
end encodable
/-- See also `nonempty_fintype`, `nonempty_denumerable`. -/
lemma nonempty_encodable (α : Type*) [countable α] : nonempty (encodable α) :=
⟨encodable.of_countable _⟩
instance : countable ℕ+ := subtype.countable -- short-circuit instance search
section ulower
local attribute [instance, priority 100] encodable.decidable_range_encode
/-- `ulower α : Type` is an equivalent type in the lowest universe, given `encodable α`. -/
@[derive decidable_eq, derive encodable]
def ulower (α : Type*) [encodable α] : Type :=
set.range (encodable.encode : α → ℕ)
end ulower
namespace ulower
variables (α : Type*) [encodable α]
/-- The equivalence between the encodable type `α` and `ulower α : Type`. -/
def equiv : α ≃ ulower α :=
encodable.equiv_range_encode α
variables {α}
/-- Lowers an `a : α` into `ulower α`. -/
def down (a : α) : ulower α := equiv α a
instance [inhabited α] : inhabited (ulower α) := ⟨down default⟩
/-- Lifts an `a : ulower α` into `α`. -/
def up (a : ulower α) : α := (equiv α).symm a
@[simp] lemma down_up {a : ulower α} : down a.up = a := equiv.right_inv _ _
@[simp] lemma up_down {a : α} : (down a).up = a := equiv.left_inv _ _
@[simp] lemma up_eq_up {a b : ulower α} : a.up = b.up ↔ a = b :=
equiv.apply_eq_iff_eq _
@[simp] lemma down_eq_down {a b : α} : down a = down b ↔ a = b :=
equiv.apply_eq_iff_eq _
@[ext] protected lemma ext {a b : ulower α} : a.up = b.up → a = b :=
up_eq_up.1
end ulower
/-
Choice function for encodable types and decidable predicates.
We provide the following API
choose {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] : (∃ x, p x) → α :=
choose_spec {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] (ex : ∃ x, p x) :
p (choose ex) :=
-/
namespace encodable
section find_a
variables {α : Type*} (p : α → Prop) [encodable α] [decidable_pred p]
private def good : option α → Prop
| (some a) := p a
| none := false
private def decidable_good : decidable_pred (good p)
| n := by cases n; unfold good; apply_instance
local attribute [instance] decidable_good
open encodable
variable {p}
/-- Constructive choice function for a decidable subtype of an encodable type. -/
def choose_x (h : ∃ x, p x) : {a : α // p a} :=
have ∃ n, good p (decode α n), from
let ⟨w, pw⟩ := h in ⟨encode w, by simp [good, encodek, pw]⟩,
match _, nat.find_spec this : ∀ o, good p o → {a // p a} with
| some a, h := ⟨a, h⟩
end
/-- Constructive choice function for a decidable predicate over an encodable type. -/
def choose (h : ∃ x, p x) : α := (choose_x h).1
lemma choose_spec (h : ∃ x, p x) : p (choose h) := (choose_x h).2
end find_a
/-- A constructive version of `classical.axiom_of_choice` for `encodable` types. -/
theorem axiom_of_choice {α : Type*} {β : α → Type*} {R : Π x, β x → Prop}
[Π a, encodable (β a)] [∀ x y, decidable (R x y)]
(H : ∀ x, ∃ y, R x y) : ∃ f : Π a, β a, ∀ x, R x (f x) :=
⟨λ x, choose (H x), λ x, choose_spec (H x)⟩
/-- A constructive version of `classical.skolem` for `encodable` types. -/
theorem skolem {α : Type*} {β : α → Type*} {P : Π x, β x → Prop}
[c : Π a, encodable (β a)] [d : ∀ x y, decidable (P x y)] :
(∀ x, ∃ y, P x y) ↔ ∃ f : Π a, β a, (∀ x, P x (f x)) :=
⟨axiom_of_choice, λ ⟨f, H⟩ x, ⟨_, H x⟩⟩
/-
There is a total ordering on the elements of an encodable type, induced by the map to ℕ.
-/
/-- The `encode` function, viewed as an embedding. -/
def encode' (α) [encodable α] : α ↪ ℕ :=
⟨encodable.encode, encodable.encode_injective⟩
instance {α} [encodable α] : is_trans _ (encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_trans
instance {α} [encodable α] : is_antisymm _ (encodable.encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_antisymm
instance {α} [encodable α] : is_total _ (encodable.encode' α ⁻¹'o (≤)) :=
(rel_embedding.preimage _ _).is_total
end encodable
namespace directed
open encodable
variables {α : Type*} {β : Type*} [encodable α] [inhabited α]
/-- Given a `directed r` function `f : α → β` defined on an encodable inhabited type,
construct a noncomputable sequence such that `r (f (x n)) (f (x (n + 1)))`
and `r (f a) (f (x (encode a + 1))`. -/
protected noncomputable def sequence {r : β → β → Prop} (f : α → β) (hf : directed r f) : ℕ → α
| 0 := default
| (n + 1) :=
let p := sequence n in
match decode α n with
| none := classical.some (hf p p)
| (some a) := classical.some (hf p a)
end
lemma sequence_mono_nat {r : β → β → Prop} {f : α → β} (hf : directed r f) (n : ℕ) :
r (f (hf.sequence f n)) (f (hf.sequence f (n+1))) :=
begin
dsimp [directed.sequence],
generalize eq : hf.sequence f n = p,
cases h : decode α n with a,
{ exact (classical.some_spec (hf p p)).1 },
{ exact (classical.some_spec (hf p a)).1 }
end
lemma rel_sequence {r : β → β → Prop} {f : α → β} (hf : directed r f) (a : α) :
r (f a) (f (hf.sequence f (encode a + 1))) :=
begin
simp only [directed.sequence, encodek],
exact (classical.some_spec (hf _ a)).2
end
variables [preorder β] {f : α → β} (hf : directed (≤) f)
lemma sequence_mono : monotone (f ∘ (hf.sequence f)) :=
monotone_nat_of_le_succ $ hf.sequence_mono_nat
lemma le_sequence (a : α) : f a ≤ f (hf.sequence f (encode a + 1)) :=
hf.rel_sequence a
end directed
section quotient
open encodable quotient
variables {α : Type*} {s : setoid α} [@decidable_rel α (≈)] [encodable α]
/-- Representative of an equivalence class. This is a computable version of `quot.out` for a setoid
on an encodable type. -/
def quotient.rep (q : quotient s) : α :=
choose (exists_rep q)
theorem quotient.rep_spec (q : quotient s) : ⟦q.rep⟧ = q :=
choose_spec (exists_rep q)
/-- The quotient of an encodable space by a decidable equivalence relation is encodable. -/
def encodable_quotient : encodable (quotient s) :=
⟨λ q, encode q.rep,
λ n, quotient.mk <$> decode α n,
by rintros ⟨l⟩; rw encodek; exact congr_arg some ⟦l⟧.rep_spec⟩
end quotient
|
28ee9636def76077d19e0e53362b087d9938081a
|
55c7fc2bf55d496ace18cd6f3376e12bb14c8cc5
|
/src/category_theory/limits/connected.lean
|
c36f9a1d65a79998adf092f0cf4cbfe76e6de536
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/mathlib
|
62de4ec6544bf3b79086afd27b6529acfaf2c1bb
|
8582b06b0a5d06c33ee07d0bdf7c646cae22cf36
|
refs/heads/master
| 1,669,494,854,016
| 1,595,692,409,000
| 1,595,692,409,000
| 272,046,630
| 0
| 0
|
Apache-2.0
| 1,592,066,143,000
| 1,592,066,142,000
| null |
UTF-8
|
Lean
| false
| false
| 3,607
|
lean
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
-/
import category_theory.limits.shapes.pullbacks
import category_theory.limits.shapes.equalizers
import category_theory.limits.preserves
import category_theory.connected
/-!
# Connected limits
A connected limit is a limit whose shape is a connected category.
We give examples of connected categories, and prove that the functor given
by `(X × -)` preserves any connected limit. That is, any limit of shape `J`
where `J` is a connected category is preserved by the functor `(X × -)`.
-/
universes v₁ v₂ u₁ u₂
open category_theory category_theory.category category_theory.limits
namespace category_theory
section examples
instance wide_pullback_shape_connected (J : Type v₁) : connected (wide_pullback_shape J) :=
begin
apply connected.of_induct,
introv _ t,
cases j,
{ exact a },
{ rwa t (wide_pullback_shape.hom.term j) }
end
instance wide_pushout_shape_connected (J : Type v₁) : connected (wide_pushout_shape J) :=
begin
apply connected.of_induct,
introv _ t,
cases j,
{ exact a },
{ rwa ← t (wide_pushout_shape.hom.init j) }
end
instance parallel_pair_inhabited : inhabited walking_parallel_pair := ⟨walking_parallel_pair.one⟩
instance parallel_pair_connected : connected (walking_parallel_pair) :=
begin
apply connected.of_induct,
introv _ t,
cases j,
{ rwa t walking_parallel_pair_hom.left },
{ assumption }
end
end examples
local attribute [tidy] tactic.case_bash
variables {C : Type u₂} [category.{v₂} C]
variables [has_binary_products C]
variables {J : Type v₂} [small_category J]
namespace prod_preserves_connected_limits
/-- (Impl). The obvious natural transformation from (X × K -) to K. -/
@[simps]
def γ₂ {K : J ⥤ C} (X : C) : K ⋙ prod_functor.obj X ⟶ K :=
{ app := λ Y, limits.prod.snd }
/-- (Impl). The obvious natural transformation from (X × K -) to X -/
@[simps]
def γ₁ {K : J ⥤ C} (X : C) : K ⋙ prod_functor.obj X ⟶ (functor.const J).obj X :=
{ app := λ Y, limits.prod.fst }
/-- (Impl). Given a cone for (X × K -), produce a cone for K using the natural transformation `γ₂` -/
@[simps]
def forget_cone {X : C} {K : J ⥤ C} (s : cone (K ⋙ prod_functor.obj X)) : cone K :=
{ X := s.X,
π := s.π ≫ γ₂ X }
end prod_preserves_connected_limits
open prod_preserves_connected_limits
/--
The functor `(X × -)` preserves any connected limit.
Note that this functor does not preserve the two most obvious disconnected limits - that is,
`(X × -)` does not preserve products or terminal object, eg `(X ⨯ A) ⨯ (X ⨯ B)` is not isomorphic to
`X ⨯ (A ⨯ B)` and `X ⨯ 1` is not isomorphic to `1`.
-/
def prod_preserves_connected_limits [connected J] (X : C) :
preserves_limits_of_shape J (prod_functor.obj X) :=
{ preserves_limit := λ K,
{ preserves := λ c l,
{ lift := λ s, prod.lift (s.π.app (default _) ≫ limits.prod.fst) (l.lift (forget_cone s)),
fac' := λ s j,
begin
apply prod.hom_ext,
{ erw [assoc, limit.map_π, comp_id, limit.lift_π],
exact (nat_trans_from_connected (s.π ≫ γ₁ X) j).symm },
{ simp [← l.fac (forget_cone s) j] }
end,
uniq' := λ s m L,
begin
apply prod.hom_ext,
{ erw [limit.lift_π, ← L (default J), assoc, limit.map_π, comp_id],
refl },
{ rw limit.lift_π,
apply l.uniq (forget_cone s),
intro j,
simp [← L j] }
end } } }
end category_theory
|
4c651d9a6db9aeaead5f470544ea2688268cc9b5
|
ce6917c5bacabee346655160b74a307b4a5ab620
|
/src/ch4/ex0206.lean
|
78f53d9a99973b16f87e92aa635d2f5ce670f053
|
[] |
no_license
|
Ailrun/Theorem_Proving_in_Lean
|
ae6a23f3c54d62d401314d6a771e8ff8b4132db2
|
2eb1b5caf93c6a5a555c79e9097cf2ba5a66cf68
|
refs/heads/master
| 1,609,838,270,467
| 1,586,846,743,000
| 1,586,846,743,000
| 240,967,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 176
|
lean
|
universe u
variables (α β : Type u)
example (f : α → β) (a : α) : (λ x, f x) a = f a := rfl
example (a : α) (b : α) : (a, b).1 = a := rfl
example : 2 + 3 = 5 := rfl
|
4894c6fc7e6619572669ed7834976e40cc3f8674
|
ce6917c5bacabee346655160b74a307b4a5ab620
|
/src/ch5/ex0210.lean
|
788598882680d6ca98fe29dd38d95e49e492c512
|
[] |
no_license
|
Ailrun/Theorem_Proving_in_Lean
|
ae6a23f3c54d62d401314d6a771e8ff8b4132db2
|
2eb1b5caf93c6a5a555c79e9097cf2ba5a66cf68
|
refs/heads/master
| 1,609,838,270,467
| 1,586,846,743,000
| 1,586,846,743,000
| 240,967,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 135
|
lean
|
example : ∀ a b c : ℕ, a = b → a = c → c = b :=
begin
intros,
apply eq.trans,
apply eq.symm,
repeat { assumption }
end
|
3ecfe854b4fc57634e6c2951b34e9fbc1d39b713
|
57fdc8de88f5ea3bfde4325e6ecd13f93a274ab5
|
/algebra/euclidean_domain.lean
|
5dcd00cfaadb9f11dcd61080e60640153e07ea6f
|
[
"Apache-2.0"
] |
permissive
|
louisanu/mathlib
|
11f56f2d40dc792bc05ee2f78ea37d73e98ecbfe
|
2bd5e2159d20a8f20d04fc4d382e65eea775ed39
|
refs/heads/master
| 1,617,706,993,439
| 1,523,163,654,000
| 1,523,163,654,000
| 124,519,997
| 0
| 0
|
Apache-2.0
| 1,520,588,283,000
| 1,520,588,283,000
| null |
UTF-8
|
Lean
| false
| false
| 7,212
|
lean
|
/-
Copyright (c) 2018 Louis Carlin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Louis Carlin
Euclidean domains and Euclidean algorithm (extended to come)
A lot is based on pre-existing code in mathlib for natural number gcds
-/
import tactic.ring
universe u
class euclidean_domain (α : Type u) [decidable_eq α] extends integral_domain α :=
(quotient : α → α → α)
(remainder : α → α → α)
(quotient_mul_add_remainder_eq : ∀ a b, (quotient a b) * b + (remainder a b) = a) -- This could be changed to the same order as int.mod_add_div. We normally write qb+r rather than r + qb though.
(valuation : α → ℕ)
(valuation_remainder_lt : ∀ a b, b ≠ 0 → valuation (remainder a b) < valuation b)
(le_valuation_mul : ∀ a b, b ≠ 0 → valuation a ≤ valuation (a*b))
/-
le_valuation_mul is often not a required in definitions of a euclidean domain since given the other properties we can show there is a (noncomputable) euclidean domain α with the property le_valuation_mul.
So potentially this definition could be split into two different ones (euclidean_domain_weak and euclidean_domain_strong) with a noncomputable function from weak to strong.
I've currently divided the lemmas into strong and weak depending on whether they require le_valuation_mul or not.
-/
namespace euclidean_domain
variable {α : Type u}
variables [decidable_eq α] [euclidean_domain α]
instance : has_div α := ⟨quotient⟩
instance : has_mod α := ⟨remainder⟩
instance : has_sizeof α := ⟨valuation⟩
lemma gcd_decreasing (a b : α) (w : a ≠ 0) : has_well_founded.r (b % a) a := valuation_remainder_lt b a w
def gcd : α → α → α
| a b := if a_zero : a = 0 then b
else have h : has_well_founded.r (b % a) a := gcd_decreasing a b a_zero,
gcd (b%a) a
/- weak lemmas -/
@[simp] lemma mod_zero (a : α) : a % 0 = a := by simpa using
quotient_mul_add_remainder_eq a 0
lemma dvd_mod_self (a : α) : a ∣ a % a :=
begin
let d := (a/a)*a, -- ring tactic can't solve things without this
have : a%a = a - (a/a)*a, from
calc
a%a = d + a%a - d : by ring
... = (a/a)*a + a%a - (a/a)*a : by dsimp [d]; refl
... = a - (a/a)*a : by simp [(%), (/), quotient_mul_add_remainder_eq a a],
rw this,
exact dvd_sub (dvd_refl a) (dvd_mul_left a (a/a)),
end
lemma mod_lt : ∀ (a : α) {b : α}, valuation b > valuation (0:α) → valuation (a%b) < valuation b :=
begin
intros a b h,
by_cases b_zero : (b=0),
{ rw b_zero at h,
have := lt_irrefl (valuation (0:α)),
contradiction },
{ exact valuation_remainder_lt a b b_zero }
end
lemma neq_zero_lt_mod_lt (a b : α) : b ≠ 0 → valuation (a%b) < valuation b
| hnb := valuation_remainder_lt a b hnb
lemma dvd_mod {a b c : α} : c ∣ a → c ∣ b → c ∣ a % b :=
begin
intros dvd_a dvd_b,
have : remainder a b = a - quotient a b * b, from
calc
a%b = quotient a b * b + a%b - quotient a b * b : by ring
... = a - quotient a b * b : by {dsimp[(%)]; rw (quotient_mul_add_remainder_eq a b)},
dsimp [(%)],
rw this,
exact dvd_sub dvd_a (dvd_mul_of_dvd_right dvd_b (a/b)),
end
/- strong lemmas -/
lemma val_lt_one (a : α) : valuation a < valuation (1:α) → a = 0 :=
begin
intro a_lt,
by_cases a = 0,
{ exact h },
{ have := le_valuation_mul (1:α) a h,
simp at this,
have := not_le_of_lt a_lt,
contradiction }
end
lemma val_dvd_le : ∀ a b : α, b ∣ a → a ≠ 0 → valuation b ≤ valuation a
| _ b ⟨d, rfl⟩ ha :=
begin
by_cases d = 0,
{ simp[h] at ha,
contradiction },
{ exact le_valuation_mul b d h }
end
@[simp] lemma mod_one (a : α) : a % 1 = 0 :=
val_lt_one _ (valuation_remainder_lt a 1 one_ne_zero)
@[simp] lemma zero_mod (b : α) : 0 % b = 0 :=
begin
have h : remainder 0 b = b * (-quotient 0 b ), from
calc
remainder 0 b = quotient 0 b * b + remainder 0 b + b * (-quotient 0 b ) : by ring
... = b * (-quotient 0 b ) : by rw [quotient_mul_add_remainder_eq 0 b, zero_add],
by_cases quotient_zero : (-quotient 0 b) = 0,
{ simp[quotient_zero] at h,
exact h },
{ by_cases h'' : b = 0,
{ rw h'',
simp },
{ have := not_le_of_lt (valuation_remainder_lt 0 b h''),
rw h at this,
have := le_valuation_mul b (-quotient 0 b) quotient_zero,
contradiction }}
end
@[simp] lemma zero_div (b : α) (hnb : b ≠ 0) : 0 / b = 0 :=
begin
have h1 : remainder 0 b = 0, from zero_mod b,
have h2 := quotient_mul_add_remainder_eq 0 b,
simp[h1] at h2,
cases eq_zero_or_eq_zero_of_mul_eq_zero h2,
{ exact h },
{ contradiction }
end
@[simp] lemma mod_self (a : α) : a % a = 0 :=
let ⟨m, a_mul⟩ := dvd_mod_self a in
begin
by_cases m = 0,
{ rw [h, mul_zero] at a_mul,
exact a_mul },
{ by_cases a_zero : a = 0,
{ rw a_zero,
simp },
{ have := le_valuation_mul a m h,
rw ←a_mul at this,
have := not_le_of_lt (valuation_remainder_lt a a a_zero),
contradiction}}
end
lemma div_self (a : α) : a ≠ 0 → a / a = (1:α) :=
begin
intro hna,
have wit_aa := quotient_mul_add_remainder_eq a a,
have a_mod_a := mod_self a,
dsimp [(%)] at a_mod_a,
simp [a_mod_a] at wit_aa,
have h1 : 1 * a = a, from one_mul a,
conv at wit_aa {for a [4] {rw ←h1}},
exact eq_of_mul_eq_mul_right hna wit_aa
end
/- weak gcd lemmas -/
@[simp] theorem gcd_zero_left (a : α) : gcd 0 a = a :=
begin
rw gcd,
simp,
end
@[elab_as_eliminator]
theorem gcd.induction {P : α → α → Prop}
(a b : α)
(H0 : ∀ x, P 0 x)
(H1 : ∀ a b, a ≠ 0 → P (b%a) a → P a b) :
P a b :=
@well_founded.induction _ _ (has_well_founded.wf α) (λa, ∀b, P a b) a
begin
intros c IH,
by_cases c = 0,
{ rw h,
exact H0 },
{ intro b,
exact H1 c b (h) (IH (b%c) (neq_zero_lt_mod_lt b c h) c) }
end
b
theorem gcd_dvd (a b : α) : (gcd a b ∣ a) ∧ (gcd a b ∣ b) :=
gcd.induction a b
(λ b, by simp)
begin
intros a b aneq h_dvd,
rw gcd,
simp [aneq],
induction h_dvd,
split,
{ exact h_dvd_right },
{ conv {for b [2] {rw ←(quotient_mul_add_remainder_eq b a)}},
have h_dvd_right_a:= dvd_mul_of_dvd_right h_dvd_right (b/a),
exact dvd_add h_dvd_right_a h_dvd_left }
end
theorem gcd_dvd_left (a b : α) : gcd a b ∣ a := (gcd_dvd a b).left
theorem gcd_dvd_right (a b : α) : gcd a b ∣ b := (gcd_dvd a b).right
theorem dvd_gcd {a b c : α} : c ∣ a → c ∣ b → c ∣ gcd a b :=
gcd.induction a b
begin
intros b dvd_0 dvd_b,
simp,
exact dvd_b
end
begin
intros a b hna d dvd_a dvd_b,
rw gcd,
simp [hna],
exact d (dvd_mod dvd_b dvd_a) dvd_a,
end
/- strong gcd lemmas -/
@[simp] theorem gcd_zero_right (a : α) : gcd a 0 = a :=
begin
by_cases (a=0),
{ simp[h] },
{ rw gcd,
simp [h] }
end
@[simp] theorem gcd_one_left (a : α) : gcd 1 a = 1 :=
begin
rw [gcd],
simp,
end
theorem gcd_next (a b : α) : gcd a b = gcd (b % a) a :=
begin
by_cases (a=0),
{ simp [h] },
{ rw gcd,
simp [h] }
end
@[simp] theorem gcd_self (a : α) : gcd a a = a :=
by rw [gcd_next a a, mod_self a, gcd_zero_left]
end euclidean_domain
|
12b202c838705920ddc5e757d213cc60aa702dcd
|
88892181780ff536a81e794003fe058062f06758
|
/src/100_theorems/t068.lean
|
94eb3ee35cb91b026b5a9151ae66c58d81e4e004
|
[] |
no_license
|
AtnNn/lean-sandbox
|
fe2c44280444e8bb8146ab8ac391c82b480c0a2e
|
8c68afbdc09213173aef1be195da7a9a86060a97
|
refs/heads/master
| 1,623,004,395,876
| 1,579,969,507,000
| 1,579,969,507,000
| 146,666,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 171
|
lean
|
import algebra.big_operators
-- Sum of an arithmetic series
open finset
theorem t068 : ∀ (n : ℕ), (finset.range n).sum (λi, i) = (n * (n - 1)) / 2
:= sum_range_id
|
7402de42b20fb7147aaa6c07197b038ea7f28000
|
4727251e0cd73359b15b664c3170e5d754078599
|
/src/analysis/calculus/inverse.lean
|
4532272141c61fc8de8ed71414d85ced80d4cf41
|
[
"Apache-2.0"
] |
permissive
|
Vierkantor/mathlib
|
0ea59ac32a3a43c93c44d70f441c4ee810ccceca
|
83bc3b9ce9b13910b57bda6b56222495ebd31c2f
|
refs/heads/master
| 1,658,323,012,449
| 1,652,256,003,000
| 1,652,256,003,000
| 209,296,341
| 0
| 1
|
Apache-2.0
| 1,568,807,655,000
| 1,568,807,655,000
| null |
UTF-8
|
Lean
| false
| false
| 37,352
|
lean
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, Heather Macbeth, Sébastien Gouëzel
-/
import analysis.calculus.cont_diff
import tactic.ring_exp
import analysis.normed_space.banach
import topology.local_homeomorph
/-!
# Inverse function theorem
In this file we prove the inverse function theorem. It says that if a map `f : E → F`
has an invertible strict derivative `f'` at `a`, then it is locally invertible,
and the inverse function has derivative `f' ⁻¹`.
We define `has_strict_deriv_at.to_local_homeomorph` that repacks a function `f`
with a `hf : has_strict_fderiv_at f f' a`, `f' : E ≃L[𝕜] F`, into a `local_homeomorph`.
The `to_fun` of this `local_homeomorph` is `defeq` to `f`, so one can apply theorems
about `local_homeomorph` to `hf.to_local_homeomorph f`, and get statements about `f`.
Then we define `has_strict_fderiv_at.local_inverse` to be the `inv_fun` of this `local_homeomorph`,
and prove two versions of the inverse function theorem:
* `has_strict_fderiv_at.to_local_inverse`: if `f` has an invertible derivative `f'` at `a` in the
strict sense (`hf`), then `hf.local_inverse f f' a` has derivative `f'.symm` at `f a` in the
strict sense;
* `has_strict_fderiv_at.to_local_left_inverse`: if `f` has an invertible derivative `f'` at `a` in
the strict sense and `g` is locally left inverse to `f` near `a`, then `g` has derivative
`f'.symm` at `f a` in the strict sense.
In the one-dimensional case we reformulate these theorems in terms of `has_strict_deriv_at` and
`f'⁻¹`.
We also reformulate the theorems in terms of `cont_diff`, to give that `C^k` (respectively,
smooth) inputs give `C^k` (smooth) inverses. These versions require that continuous
differentiability implies strict differentiability; this is false over a general field, true over
`ℝ` or `ℂ` and implemented here assuming `is_R_or_C 𝕂`.
Some related theorems, providing the derivative and higher regularity assuming that we already know
the inverse function, are formulated in `fderiv.lean`, `deriv.lean`, and `cont_diff.lean`.
## Notations
In the section about `approximates_linear_on` we introduce some `local notation` to make formulas
shorter:
* by `N` we denote `∥f'⁻¹∥`;
* by `g` we denote the auxiliary contracting map `x ↦ x + f'.symm (y - f x)` used to prove that
`{x | f x = y}` is nonempty.
## Tags
derivative, strictly differentiable, continuously differentiable, smooth, inverse function
-/
open function set filter metric
open_locale topological_space classical nnreal
noncomputable theory
variables {𝕜 : Type*} [nondiscrete_normed_field 𝕜]
variables {E : Type*} [normed_group E] [normed_space 𝕜 E]
variables {F : Type*} [normed_group F] [normed_space 𝕜 F]
variables {G : Type*} [normed_group G] [normed_space 𝕜 G]
variables {G' : Type*} [normed_group G'] [normed_space 𝕜 G']
variables {ε : ℝ}
open asymptotics filter metric set
open continuous_linear_map (id)
/-!
### Non-linear maps close to affine maps
In this section we study a map `f` such that `∥f x - f y - f' (x - y)∥ ≤ c * ∥x - y∥` on an open set
`s`, where `f' : E →L[𝕜] F` is a continuous linear map and `c` is suitably small. Maps of this type
behave like `f a + f' (x - a)` near each `a ∈ s`.
When `f'` is onto, we show that `f` is locally onto.
When `f'` is a continuous linear equiv, we show that `f` is a homeomorphism
between `s` and `f '' s`. More precisely, we define `approximates_linear_on.to_local_homeomorph` to
be a `local_homeomorph` with `to_fun = f`, `source = s`, and `target = f '' s`.
Maps of this type naturally appear in the proof of the inverse function theorem (see next section),
and `approximates_linear_on.to_local_homeomorph` will imply that the locally inverse function
exists.
We define this auxiliary notion to split the proof of the inverse function theorem into small
lemmas. This approach makes it possible
- to prove a lower estimate on the size of the domain of the inverse function;
- to reuse parts of the proofs in the case if a function is not strictly differentiable. E.g., for a
function `f : E × F → G` with estimates on `f x y₁ - f x y₂` but not on `f x₁ y - f x₂ y`.
-/
/-- We say that `f` approximates a continuous linear map `f'` on `s` with constant `c`,
if `∥f x - f y - f' (x - y)∥ ≤ c * ∥x - y∥` whenever `x, y ∈ s`.
This predicate is defined to facilitate the splitting of the inverse function theorem into small
lemmas. Some of these lemmas can be useful, e.g., to prove that the inverse function is defined
on a specific set. -/
def approximates_linear_on (f : E → F) (f' : E →L[𝕜] F) (s : set E) (c : ℝ≥0) : Prop :=
∀ (x ∈ s) (y ∈ s), ∥f x - f y - f' (x - y)∥ ≤ c * ∥x - y∥
@[simp] lemma approximates_linear_on_empty (f : E → F) (f' : E →L[𝕜] F) (c : ℝ≥0) :
approximates_linear_on f f' ∅ c :=
by simp [approximates_linear_on]
namespace approximates_linear_on
variables [cs : complete_space E] {f : E → F}
/-! First we prove some properties of a function that `approximates_linear_on` a (not necessarily
invertible) continuous linear map. -/
section
variables {f' : E →L[𝕜] F} {s t : set E} {c c' : ℝ≥0}
theorem mono_num (hc : c ≤ c') (hf : approximates_linear_on f f' s c) :
approximates_linear_on f f' s c' :=
λ x hx y hy, le_trans (hf x hx y hy) (mul_le_mul_of_nonneg_right hc $ norm_nonneg _)
theorem mono_set (hst : s ⊆ t) (hf : approximates_linear_on f f' t c) :
approximates_linear_on f f' s c :=
λ x hx y hy, hf x (hst hx) y (hst hy)
lemma approximates_linear_on_iff_lipschitz_on_with
{f : E → F} {f' : E →L[𝕜] F} {s : set E} {c : ℝ≥0} :
approximates_linear_on f f' s c ↔ lipschitz_on_with c (f - f') s :=
begin
have : ∀ x y, f x - f y - f' (x - y) = (f - f') x - (f - f') y,
{ assume x y, simp only [map_sub, pi.sub_apply], abel },
simp only [this, lipschitz_on_with_iff_norm_sub_le, approximates_linear_on],
end
alias approximates_linear_on_iff_lipschitz_on_with ↔
approximates_linear_on.lipschitz_on_with lipschitz_on_with.approximates_linear_on
lemma lipschitz_sub (hf : approximates_linear_on f f' s c) :
lipschitz_with c (λ x : s, f x - f' x) :=
begin
refine lipschitz_with.of_dist_le_mul (λ x y, _),
rw [dist_eq_norm, subtype.dist_eq, dist_eq_norm],
convert hf x x.2 y y.2 using 2,
rw [f'.map_sub], abel
end
protected lemma lipschitz (hf : approximates_linear_on f f' s c) :
lipschitz_with (∥f'∥₊ + c) (s.restrict f) :=
by simpa only [restrict_apply, add_sub_cancel'_right]
using (f'.lipschitz.restrict s).add hf.lipschitz_sub
protected lemma continuous (hf : approximates_linear_on f f' s c) :
continuous (s.restrict f) :=
hf.lipschitz.continuous
protected lemma continuous_on (hf : approximates_linear_on f f' s c) :
continuous_on f s :=
continuous_on_iff_continuous_restrict.2 hf.continuous
end
section locally_onto
/-!
We prove that a function which is linearly approximated by a continuous linear map with a nonlinear
right inverse is locally onto. This will apply to the case where the approximating map is a linear
equivalence, for the local inverse theorem, but also whenever the approximating map is onto,
by Banach's open mapping theorem. -/
include cs
variables {s : set E} {c : ℝ≥0} {f' : E →L[𝕜] F}
/-- If a function is linearly approximated by a continuous linear map with a (possibly nonlinear)
right inverse, then it is locally onto: a ball of an explicit radius is included in the image
of the map. -/
theorem surj_on_closed_ball_of_nonlinear_right_inverse
(hf : approximates_linear_on f f' s c) (f'symm : f'.nonlinear_right_inverse)
{ε : ℝ} {b : E} (ε0 : 0 ≤ ε) (hε : closed_ball b ε ⊆ s) :
surj_on f (closed_ball b ε) (closed_ball (f b) (((f'symm.nnnorm : ℝ)⁻¹ - c) * ε)) :=
begin
assume y hy,
cases le_or_lt (f'symm.nnnorm : ℝ) ⁻¹ c with hc hc,
{ refine ⟨b, by simp [ε0], _⟩,
have : dist y (f b) ≤ 0 :=
(mem_closed_ball.1 hy).trans (mul_nonpos_of_nonpos_of_nonneg (by linarith) ε0),
simp only [dist_le_zero] at this,
rw this },
have If' : (0 : ℝ) < f'symm.nnnorm,
by { rw [← inv_pos], exact (nnreal.coe_nonneg _).trans_lt hc },
have Icf' : (c : ℝ) * f'symm.nnnorm < 1, by rwa [inv_eq_one_div, lt_div_iff If'] at hc,
have Jf' : (f'symm.nnnorm : ℝ) ≠ 0 := ne_of_gt If',
have Jcf' : (1 : ℝ) - c * f'symm.nnnorm ≠ 0, by { apply ne_of_gt, linarith },
/- We have to show that `y` can be written as `f x` for some `x ∈ closed_ball b ε`.
The idea of the proof is to apply the Banach contraction principle to the map
`g : x ↦ x + f'symm (y - f x)`, as a fixed point of this map satisfies `f x = y`.
When `f'symm` is a genuine linear inverse, `g` is a contracting map. In our case, since `f'symm`
is nonlinear, this map is not contracting (it is not even continuous), but still the proof of
the contraction theorem holds: `uₙ = gⁿ b` is a Cauchy sequence, converging exponentially fast
to the desired point `x`. Instead of appealing to general results, we check this by hand.
The main point is that `f (u n)` becomes exponentially close to `y`, and therefore
`dist (u (n+1)) (u n)` becomes exponentally small, making it possible to get an inductive
bound on `dist (u n) b`, from which one checks that `u n` stays in the ball on which one has a
control. Therefore, the bound can be checked at the next step, and so on inductively.
-/
set g := λ x, x + f'symm (y - f x) with hg,
set u := λ (n : ℕ), g ^[n] b with hu,
have usucc : ∀ n, u (n + 1) = g (u n), by simp [hu, ← iterate_succ_apply' g _ b],
-- First bound: if `f z` is close to `y`, then `g z` is close to `z` (i.e., almost a fixed point).
have A : ∀ z, dist (g z) z ≤ f'symm.nnnorm * dist (f z) y,
{ assume z,
rw [dist_eq_norm, hg, add_sub_cancel', dist_eq_norm'],
exact f'symm.bound _ },
-- Second bound: if `z` and `g z` are in the set with good control, then `f (g z)` becomes closer
-- to `y` than `f z` was (this uses the linear approximation property, and is the reason for the
-- choice of the formula for `g`).
have B : ∀ z ∈ closed_ball b ε, g z ∈ closed_ball b ε →
dist (f (g z)) y ≤ c * f'symm.nnnorm * dist (f z) y,
{ assume z hz hgz,
set v := f'symm (y - f z) with hv,
calc dist (f (g z)) y = ∥f (z + v) - y∥ : by rw [dist_eq_norm]
... = ∥f (z + v) - f z - f' v + f' v - (y - f z)∥ : by { congr' 1, abel }
... = ∥f (z + v) - f z - f' ((z + v) - z)∥ :
by simp only [continuous_linear_map.nonlinear_right_inverse.right_inv,
add_sub_cancel', sub_add_cancel]
... ≤ c * ∥(z + v) - z∥ : hf _ (hε hgz) _ (hε hz)
... ≤ c * (f'symm.nnnorm * dist (f z) y) : begin
apply mul_le_mul_of_nonneg_left _ (nnreal.coe_nonneg c),
simpa [hv, dist_eq_norm'] using f'symm.bound (y - f z),
end
... = c * f'symm.nnnorm * dist (f z) y : by ring },
-- Third bound: a complicated bound on `dist w b` (that will show up in the induction) is enough
-- to check that `w` is in the ball on which one has controls. Will be used to check that `u n`
-- belongs to this ball for all `n`.
have C : ∀ (n : ℕ) (w : E),
dist w b ≤ f'symm.nnnorm * (1 - (c * f'symm.nnnorm)^n) / (1 - c * f'symm.nnnorm) * dist (f b) y
→ w ∈ closed_ball b ε,
{ assume n w hw,
apply hw.trans,
rw [div_mul_eq_mul_div, div_le_iff], swap, { linarith },
calc (f'symm.nnnorm : ℝ) * (1 - (c * f'symm.nnnorm) ^ n) * dist (f b) y
= f'symm.nnnorm * dist (f b) y * (1 - (c * f'symm.nnnorm) ^ n) : by ring
... ≤ f'symm.nnnorm * dist (f b) y * 1 :
begin
apply mul_le_mul_of_nonneg_left _ (mul_nonneg (nnreal.coe_nonneg _) dist_nonneg),
rw [sub_le_self_iff],
exact pow_nonneg (mul_nonneg (nnreal.coe_nonneg _) (nnreal.coe_nonneg _)) _,
end
... ≤ f'symm.nnnorm * (((f'symm.nnnorm : ℝ)⁻¹ - c) * ε) :
by { rw [mul_one],
exact mul_le_mul_of_nonneg_left (mem_closed_ball'.1 hy) (nnreal.coe_nonneg _) }
... = ε * (1 - c * f'symm.nnnorm) : by { field_simp, ring } },
/- Main inductive control: `f (u n)` becomes exponentially close to `y`, and therefore
`dist (u (n+1)) (u n)` becomes exponentally small, making it possible to get an inductive
bound on `dist (u n) b`, from which one checks that `u n` remains in the ball on which we
have estimates. -/
have D : ∀ (n : ℕ), dist (f (u n)) y ≤ (c * f'symm.nnnorm)^n * dist (f b) y
∧ dist (u n) b ≤ f'symm.nnnorm * (1 - (c * f'symm.nnnorm)^n) / (1 - c * f'symm.nnnorm)
* dist (f b) y,
{ assume n,
induction n with n IH, { simp [hu, le_refl] },
rw usucc,
have Ign : dist (g (u n)) b ≤
f'symm.nnnorm * (1 - (c * f'symm.nnnorm)^n.succ) / (1 - c * f'symm.nnnorm) * dist (f b) y :=
calc
dist (g (u n)) b ≤ dist (g (u n)) (u n) + dist (u n) b : dist_triangle _ _ _
... ≤ f'symm.nnnorm * dist (f (u n)) y + dist (u n) b : add_le_add (A _) le_rfl
... ≤ f'symm.nnnorm * ((c * f'symm.nnnorm)^n * dist (f b) y) +
f'symm.nnnorm * (1 - (c * f'symm.nnnorm)^n) / (1 - c * f'symm.nnnorm) * dist (f b) y :
add_le_add (mul_le_mul_of_nonneg_left IH.1 (nnreal.coe_nonneg _)) IH.2
... = f'symm.nnnorm * (1 - (c * f'symm.nnnorm)^n.succ) / (1 - c * f'symm.nnnorm)
* dist (f b) y : by { field_simp [Jcf'], ring_exp },
refine ⟨_, Ign⟩,
calc dist (f (g (u n))) y ≤ c * f'symm.nnnorm * dist (f (u n)) y :
B _ (C n _ IH.2) (C n.succ _ Ign)
... ≤ (c * f'symm.nnnorm) * ((c * f'symm.nnnorm)^n * dist (f b) y) :
mul_le_mul_of_nonneg_left IH.1 (mul_nonneg (nnreal.coe_nonneg _) (nnreal.coe_nonneg _))
... = (c * f'symm.nnnorm) ^ n.succ * dist (f b) y : by ring_exp },
-- Deduce from the inductive bound that `uₙ` is a Cauchy sequence, therefore converging.
have : cauchy_seq u,
{ have : ∀ (n : ℕ), dist (u n) (u (n+1)) ≤ f'symm.nnnorm * dist (f b) y * (c * f'symm.nnnorm)^n,
{ assume n,
calc dist (u n) (u (n+1)) = dist (g (u n)) (u n) : by rw [usucc, dist_comm]
... ≤ f'symm.nnnorm * dist (f (u n)) y : A _
... ≤ f'symm.nnnorm * ((c * f'symm.nnnorm)^n * dist (f b) y) :
mul_le_mul_of_nonneg_left (D n).1 (nnreal.coe_nonneg _)
... = f'symm.nnnorm * dist (f b) y * (c * f'symm.nnnorm)^n : by ring },
exact cauchy_seq_of_le_geometric _ _ Icf' this },
obtain ⟨x, hx⟩ : ∃ x, tendsto u at_top (𝓝 x) := cauchy_seq_tendsto_of_complete this,
-- As all the `uₙ` belong to the ball `closed_ball b ε`, so does their limit `x`.
have xmem : x ∈ closed_ball b ε :=
is_closed_ball.mem_of_tendsto hx (eventually_of_forall (λ n, C n _ (D n).2)),
refine ⟨x, xmem, _⟩,
-- It remains to check that `f x = y`. This follows from continuity of `f` on `closed_ball b ε`
-- and from the fact that `f uₙ` is converging to `y` by construction.
have hx' : tendsto u at_top (𝓝[closed_ball b ε] x),
{ simp only [nhds_within, tendsto_inf, hx, true_and, ge_iff_le, tendsto_principal],
exact eventually_of_forall (λ n, C n _ (D n).2) },
have T1 : tendsto (λ n, f (u n)) at_top (𝓝 (f x)) :=
(hf.continuous_on.mono hε x xmem).tendsto.comp hx',
have T2 : tendsto (λ n, f (u n)) at_top (𝓝 y),
{ rw tendsto_iff_dist_tendsto_zero,
refine squeeze_zero (λ n, dist_nonneg) (λ n, (D n).1) _,
simpa using (tendsto_pow_at_top_nhds_0_of_lt_1
(mul_nonneg (nnreal.coe_nonneg _) (nnreal.coe_nonneg _)) Icf').mul tendsto_const_nhds },
exact tendsto_nhds_unique T1 T2,
end
lemma open_image (hf : approximates_linear_on f f' s c) (f'symm : f'.nonlinear_right_inverse)
(hs : is_open s) (hc : subsingleton F ∨ c < f'symm.nnnorm⁻¹) : is_open (f '' s) :=
begin
cases hc with hE hc, { resetI, apply is_open_discrete },
simp only [is_open_iff_mem_nhds, nhds_basis_closed_ball.mem_iff, ball_image_iff] at hs ⊢,
intros x hx,
rcases hs x hx with ⟨ε, ε0, hε⟩,
refine ⟨(f'symm.nnnorm⁻¹ - c) * ε, mul_pos (sub_pos.2 hc) ε0, _⟩,
exact (hf.surj_on_closed_ball_of_nonlinear_right_inverse f'symm (le_of_lt ε0) hε).mono
hε (subset.refl _)
end
lemma image_mem_nhds (hf : approximates_linear_on f f' s c) (f'symm : f'.nonlinear_right_inverse)
{x : E} (hs : s ∈ 𝓝 x) (hc : subsingleton F ∨ c < f'symm.nnnorm⁻¹) :
f '' s ∈ 𝓝 (f x) :=
begin
obtain ⟨t, hts, ht, xt⟩ : ∃ t ⊆ s, is_open t ∧ x ∈ t := _root_.mem_nhds_iff.1 hs,
have := is_open.mem_nhds ((hf.mono_set hts).open_image f'symm ht hc) (mem_image_of_mem _ xt),
exact mem_of_superset this (image_subset _ hts),
end
lemma map_nhds_eq (hf : approximates_linear_on f f' s c) (f'symm : f'.nonlinear_right_inverse)
{x : E} (hs : s ∈ 𝓝 x) (hc : subsingleton F ∨ c < f'symm.nnnorm⁻¹) :
map f (𝓝 x) = 𝓝 (f x) :=
begin
refine le_antisymm ((hf.continuous_on x (mem_of_mem_nhds hs)).continuous_at hs)
(le_map (λ t ht, _)),
have : f '' (s ∩ t) ∈ 𝓝 (f x) := (hf.mono_set (inter_subset_left s t)).image_mem_nhds
f'symm (inter_mem hs ht) hc,
exact mem_of_superset this (image_subset _ (inter_subset_right _ _)),
end
end locally_onto
/-!
From now on we assume that `f` approximates an invertible continuous linear map `f : E ≃L[𝕜] F`.
We also assume that either `E = {0}`, or `c < ∥f'⁻¹∥⁻¹`. We use `N` as an abbreviation for `∥f'⁻¹∥`.
-/
variables {f' : E ≃L[𝕜] F} {s : set E} {c : ℝ≥0}
local notation `N` := ∥(f'.symm : F →L[𝕜] E)∥₊
protected lemma antilipschitz (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) :
antilipschitz_with (N⁻¹ - c)⁻¹ (s.restrict f) :=
begin
cases hc with hE hc,
{ haveI : subsingleton s := ⟨λ x y, subtype.eq $ @subsingleton.elim _ hE _ _⟩,
exact antilipschitz_with.of_subsingleton },
convert (f'.antilipschitz.restrict s).add_lipschitz_with hf.lipschitz_sub hc,
simp [restrict]
end
protected lemma injective (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) :
injective (s.restrict f) :=
(hf.antilipschitz hc).injective
protected lemma inj_on (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) :
inj_on f s :=
inj_on_iff_injective.2 $ hf.injective hc
protected lemma surjective [complete_space E]
(hf : approximates_linear_on f (f' : E →L[𝕜] F) univ c) (hc : subsingleton E ∨ c < N⁻¹) :
surjective f :=
begin
cases hc with hE hc,
{ haveI : subsingleton F := (equiv.subsingleton_congr f'.to_linear_equiv.to_equiv).1 hE,
exact surjective_to_subsingleton _ },
{ apply forall_of_forall_mem_closed_ball (λ (y : F), ∃ a, f a = y) (f 0) _,
have hc' : (0 : ℝ) < N⁻¹ - c, by { rw sub_pos, exact hc },
let p : ℝ → Prop := λ R, closed_ball (f 0) R ⊆ set.range f,
have hp : ∀ᶠ (r:ℝ) in at_top, p ((N⁻¹ - c) * r),
{ have hr : ∀ᶠ (r:ℝ) in at_top, 0 ≤ r := eventually_ge_at_top 0,
refine hr.mono (λ r hr, subset.trans _ (image_subset_range f (closed_ball 0 r))),
refine hf.surj_on_closed_ball_of_nonlinear_right_inverse f'.to_nonlinear_right_inverse hr _,
exact subset_univ _ },
refine ((tendsto_id.const_mul_at_top hc').frequently hp.frequently).mono _,
exact λ R h y hy, h hy },
end
/-- A map approximating a linear equivalence on a set defines a local equivalence on this set.
Should not be used outside of this file, because it is superseded by `to_local_homeomorph` below.
This is a first step towards the inverse function. -/
def to_local_equiv (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) : local_equiv E F :=
(hf.inj_on hc).to_local_equiv _ _
/-- The inverse function is continuous on `f '' s`. Use properties of `local_homeomorph` instead. -/
lemma inverse_continuous_on (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) :
continuous_on (hf.to_local_equiv hc).symm (f '' s) :=
begin
apply continuous_on_iff_continuous_restrict.2,
refine ((hf.antilipschitz hc).to_right_inv_on' _ (hf.to_local_equiv hc).right_inv').continuous,
exact (λ x hx, (hf.to_local_equiv hc).map_target hx)
end
/-- The inverse function is approximated linearly on `f '' s` by `f'.symm`. -/
lemma to_inv (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) :
approximates_linear_on (hf.to_local_equiv hc).symm (f'.symm : F →L[𝕜] E) (f '' s)
(N * (N⁻¹ - c)⁻¹ * c) :=
begin
assume x hx y hy,
set A := hf.to_local_equiv hc with hA,
have Af : ∀ z, A z = f z := λ z, rfl,
rcases (mem_image _ _ _).1 hx with ⟨x', x's, rfl⟩,
rcases (mem_image _ _ _).1 hy with ⟨y', y's, rfl⟩,
rw [← Af x', ← Af y', A.left_inv x's, A.left_inv y's],
calc ∥x' - y' - (f'.symm) (A x' - A y')∥
≤ N * ∥f' (x' - y' - (f'.symm) (A x' - A y'))∥ :
(f' : E →L[𝕜] F).bound_of_antilipschitz f'.antilipschitz _
... = N * ∥A y' - A x' - f' (y' - x')∥ :
begin
congr' 2,
simp only [continuous_linear_equiv.apply_symm_apply, continuous_linear_equiv.map_sub],
abel,
end
... ≤ N * (c * ∥y' - x'∥) :
mul_le_mul_of_nonneg_left (hf _ y's _ x's) (nnreal.coe_nonneg _)
... ≤ N * (c * (((N⁻¹ - c)⁻¹ : ℝ≥0) * ∥A y' - A x'∥)) :
begin
apply_rules [mul_le_mul_of_nonneg_left, nnreal.coe_nonneg],
rw [← dist_eq_norm, ← dist_eq_norm],
exact (hf.antilipschitz hc).le_mul_dist ⟨y', y's⟩ ⟨x', x's⟩,
end
... = (N * (N⁻¹ - c)⁻¹ * c : ℝ≥0) * ∥A x' - A y'∥ :
by { simp only [norm_sub_rev, nonneg.coe_mul], ring }
end
include cs
section
variables (f s)
/-- Given a function `f` that approximates a linear equivalence on an open set `s`,
returns a local homeomorph with `to_fun = f` and `source = s`. -/
def to_local_homeomorph (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) (hs : is_open s) : local_homeomorph E F :=
{ to_local_equiv := hf.to_local_equiv hc,
open_source := hs,
open_target := hf.open_image f'.to_nonlinear_right_inverse hs
(by rwa f'.to_linear_equiv.to_equiv.subsingleton_congr at hc),
continuous_to_fun := hf.continuous_on,
continuous_inv_fun := hf.inverse_continuous_on hc }
/-- A function `f` that approximates a linear equivalence on the whole space is a homeomorphism. -/
def to_homeomorph (hf : approximates_linear_on f (f' : E →L[𝕜] F) univ c)
(hc : subsingleton E ∨ c < N⁻¹) :
E ≃ₜ F :=
begin
refine (hf.to_local_homeomorph _ _ hc is_open_univ).to_homeomorph_of_source_eq_univ_target_eq_univ
rfl _,
change f '' univ = univ,
rw [image_univ, range_iff_surjective],
exact hf.surjective hc,
end
omit cs
/-- In a real vector space, a function `f` that approximates a linear equivalence on a subset `s`
can be extended to a homeomorphism of the whole space. -/
lemma exists_homeomorph_extension {E : Type*} [normed_group E] [normed_space ℝ E]
{F : Type*} [normed_group F] [normed_space ℝ F] [finite_dimensional ℝ F]
{s : set E} {f : E → F} {f' : E ≃L[ℝ] F} {c : ℝ≥0}
(hf : approximates_linear_on f (f' : E →L[ℝ] F) s c)
(hc : subsingleton E ∨ lipschitz_extension_constant F * c < (∥(f'.symm : F →L[ℝ] E)∥₊)⁻¹) :
∃ g : E ≃ₜ F, eq_on f g s :=
begin
-- the difference `f - f'` is Lipschitz on `s`. It can be extended to a Lipschitz function `u`
-- on the whole space, with a slightly worse Lipschitz constant. Then `f' + u` will be the
-- desired homeomorphism.
obtain ⟨u, hu, uf⟩ : ∃ (u : E → F), lipschitz_with (lipschitz_extension_constant F * c) u
∧ eq_on (f - f') u s := hf.lipschitz_on_with.extend_finite_dimension,
let g : E → F := λ x, f' x + u x,
have fg : eq_on f g s := λ x hx, by simp_rw [g, ← uf hx, pi.sub_apply, add_sub_cancel'_right],
have hg : approximates_linear_on g (f' : E →L[ℝ] F) univ (lipschitz_extension_constant F * c),
{ apply lipschitz_on_with.approximates_linear_on,
rw lipschitz_on_univ,
convert hu,
ext x,
simp only [add_sub_cancel', continuous_linear_equiv.coe_coe, pi.sub_apply] },
haveI : finite_dimensional ℝ E := f'.symm.to_linear_equiv.finite_dimensional,
exact ⟨hg.to_homeomorph g hc, fg⟩,
end
end
@[simp] lemma to_local_homeomorph_coe (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) (hs : is_open s) :
(hf.to_local_homeomorph f s hc hs : E → F) = f := rfl
@[simp] lemma to_local_homeomorph_source (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) (hs : is_open s) :
(hf.to_local_homeomorph f s hc hs).source = s := rfl
@[simp] lemma to_local_homeomorph_target (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) (hs : is_open s) :
(hf.to_local_homeomorph f s hc hs).target = f '' s := rfl
lemma closed_ball_subset_target (hf : approximates_linear_on f (f' : E →L[𝕜] F) s c)
(hc : subsingleton E ∨ c < N⁻¹) (hs : is_open s) {b : E} (ε0 : 0 ≤ ε) (hε : closed_ball b ε ⊆ s) :
closed_ball (f b) ((N⁻¹ - c) * ε) ⊆ (hf.to_local_homeomorph f s hc hs).target :=
(hf.surj_on_closed_ball_of_nonlinear_right_inverse f'.to_nonlinear_right_inverse
ε0 hε).mono hε (subset.refl _)
end approximates_linear_on
/-!
### Inverse function theorem
Now we prove the inverse function theorem. Let `f : E → F` be a map defined on a complete vector
space `E`. Assume that `f` has an invertible derivative `f' : E ≃L[𝕜] F` at `a : E` in the strict
sense. Then `f` approximates `f'` in the sense of `approximates_linear_on` on an open neighborhood
of `a`, and we can apply `approximates_linear_on.to_local_homeomorph` to construct the inverse
function. -/
namespace has_strict_fderiv_at
/-- If `f` has derivative `f'` at `a` in the strict sense and `c > 0`, then `f` approximates `f'`
with constant `c` on some neighborhood of `a`. -/
lemma approximates_deriv_on_nhds {f : E → F} {f' : E →L[𝕜] F} {a : E}
(hf : has_strict_fderiv_at f f' a) {c : ℝ≥0} (hc : subsingleton E ∨ 0 < c) :
∃ s ∈ 𝓝 a, approximates_linear_on f f' s c :=
begin
cases hc with hE hc,
{ refine ⟨univ, is_open.mem_nhds is_open_univ trivial, λ x hx y hy, _⟩,
simp [@subsingleton.elim E hE x y] },
have := hf.def hc,
rw [nhds_prod_eq, filter.eventually, mem_prod_same_iff] at this,
rcases this with ⟨s, has, hs⟩,
exact ⟨s, has, λ x hx y hy, hs (mk_mem_prod hx hy)⟩
end
lemma map_nhds_eq_of_surj [complete_space E] [complete_space F]
{f : E → F} {f' : E →L[𝕜] F} {a : E}
(hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) (h : f'.range = ⊤) :
map f (𝓝 a) = 𝓝 (f a) :=
begin
let f'symm := f'.nonlinear_right_inverse_of_surjective h,
set c : ℝ≥0 := f'symm.nnnorm⁻¹ / 2 with hc,
have f'symm_pos : 0 < f'symm.nnnorm := f'.nonlinear_right_inverse_of_surjective_nnnorm_pos h,
have cpos : 0 < c, by simp [hc, nnreal.half_pos, nnreal.inv_pos, f'symm_pos],
obtain ⟨s, s_nhds, hs⟩ : ∃ s ∈ 𝓝 a, approximates_linear_on f f' s c :=
hf.approximates_deriv_on_nhds (or.inr cpos),
apply hs.map_nhds_eq f'symm s_nhds (or.inr (nnreal.half_lt_self _)),
simp [ne_of_gt f'symm_pos],
end
variables [cs : complete_space E] {f : E → F} {f' : E ≃L[𝕜] F} {a : E}
lemma approximates_deriv_on_open_nhds (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
∃ (s : set E) (hs : a ∈ s ∧ is_open s),
approximates_linear_on f (f' : E →L[𝕜] F) s (∥(f'.symm : F →L[𝕜] E)∥₊⁻¹ / 2) :=
begin
refine ((nhds_basis_opens a).exists_iff _).1 _,
exact (λ s t, approximates_linear_on.mono_set),
exact (hf.approximates_deriv_on_nhds $ f'.subsingleton_or_nnnorm_symm_pos.imp id $
λ hf', nnreal.half_pos $ nnreal.inv_pos.2 $ hf')
end
include cs
variable (f)
/-- Given a function with an invertible strict derivative at `a`, returns a `local_homeomorph`
with `to_fun = f` and `a ∈ source`. This is a part of the inverse function theorem.
The other part `has_strict_fderiv_at.to_local_inverse` states that the inverse function
of this `local_homeomorph` has derivative `f'.symm`. -/
def to_local_homeomorph (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) : local_homeomorph E F :=
approximates_linear_on.to_local_homeomorph f
(classical.some hf.approximates_deriv_on_open_nhds)
(classical.some_spec hf.approximates_deriv_on_open_nhds).snd
(f'.subsingleton_or_nnnorm_symm_pos.imp id $ λ hf', nnreal.half_lt_self $ ne_of_gt $
nnreal.inv_pos.2 $ hf')
(classical.some_spec hf.approximates_deriv_on_open_nhds).fst.2
variable {f}
@[simp] lemma to_local_homeomorph_coe (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
(hf.to_local_homeomorph f : E → F) = f := rfl
lemma mem_to_local_homeomorph_source (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
a ∈ (hf.to_local_homeomorph f).source :=
(classical.some_spec hf.approximates_deriv_on_open_nhds).fst.1
lemma image_mem_to_local_homeomorph_target (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
f a ∈ (hf.to_local_homeomorph f).target :=
(hf.to_local_homeomorph f).map_source hf.mem_to_local_homeomorph_source
lemma map_nhds_eq_of_equiv (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
map f (𝓝 a) = 𝓝 (f a) :=
(hf.to_local_homeomorph f).map_nhds_eq hf.mem_to_local_homeomorph_source
variables (f f' a)
/-- Given a function `f` with an invertible derivative, returns a function that is locally inverse
to `f`. -/
def local_inverse (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) : F → E :=
(hf.to_local_homeomorph f).symm
variables {f f' a}
lemma local_inverse_def (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
hf.local_inverse f _ _ = (hf.to_local_homeomorph f).symm :=
rfl
lemma eventually_left_inverse (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
∀ᶠ x in 𝓝 a, hf.local_inverse f f' a (f x) = x :=
(hf.to_local_homeomorph f).eventually_left_inverse hf.mem_to_local_homeomorph_source
@[simp] lemma local_inverse_apply_image (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
hf.local_inverse f f' a (f a) = a :=
hf.eventually_left_inverse.self_of_nhds
lemma eventually_right_inverse (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
∀ᶠ y in 𝓝 (f a), f (hf.local_inverse f f' a y) = y :=
(hf.to_local_homeomorph f).eventually_right_inverse' hf.mem_to_local_homeomorph_source
lemma local_inverse_continuous_at (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
continuous_at (hf.local_inverse f f' a) (f a) :=
(hf.to_local_homeomorph f).continuous_at_symm hf.image_mem_to_local_homeomorph_target
lemma local_inverse_tendsto (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
tendsto (hf.local_inverse f f' a) (𝓝 $ f a) (𝓝 a) :=
(hf.to_local_homeomorph f).tendsto_symm hf.mem_to_local_homeomorph_source
lemma local_inverse_unique (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) {g : F → E}
(hg : ∀ᶠ x in 𝓝 a, g (f x) = x) :
∀ᶠ y in 𝓝 (f a), g y = local_inverse f f' a hf y :=
eventually_eq_of_left_inv_of_right_inv hg hf.eventually_right_inverse $
(hf.to_local_homeomorph f).tendsto_symm hf.mem_to_local_homeomorph_source
/-- If `f` has an invertible derivative `f'` at `a` in the sense of strict differentiability `(hf)`,
then the inverse function `hf.local_inverse f` has derivative `f'.symm` at `f a`. -/
theorem to_local_inverse (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) :
has_strict_fderiv_at (hf.local_inverse f f' a) (f'.symm : F →L[𝕜] E) (f a) :=
(hf.to_local_homeomorph f).has_strict_fderiv_at_symm hf.image_mem_to_local_homeomorph_target $
by simpa [← local_inverse_def] using hf
/-- If `f : E → F` has an invertible derivative `f'` at `a` in the sense of strict differentiability
and `g (f x) = x` in a neighborhood of `a`, then `g` has derivative `f'.symm` at `f a`.
For a version assuming `f (g y) = y` and continuity of `g` at `f a` but not `[complete_space E]`
see `of_local_left_inverse`. -/
theorem to_local_left_inverse (hf : has_strict_fderiv_at f (f' : E →L[𝕜] F) a) {g : F → E}
(hg : ∀ᶠ x in 𝓝 a, g (f x) = x) :
has_strict_fderiv_at g (f'.symm : F →L[𝕜] E) (f a) :=
hf.to_local_inverse.congr_of_eventually_eq $ (hf.local_inverse_unique hg).mono $ λ _, eq.symm
end has_strict_fderiv_at
/-- If a function has an invertible strict derivative at all points, then it is an open map. -/
lemma open_map_of_strict_fderiv_equiv [complete_space E] {f : E → F} {f' : E → E ≃L[𝕜] F}
(hf : ∀ x, has_strict_fderiv_at f (f' x : E →L[𝕜] F) x) :
is_open_map f :=
is_open_map_iff_nhds_le.2 $ λ x, (hf x).map_nhds_eq_of_equiv.ge
/-!
### Inverse function theorem, 1D case
In this case we prove a version of the inverse function theorem for maps `f : 𝕜 → 𝕜`.
We use `continuous_linear_equiv.units_equiv_aut` to translate `has_strict_deriv_at f f' a` and
`f' ≠ 0` into `has_strict_fderiv_at f (_ : 𝕜 ≃L[𝕜] 𝕜) a`.
-/
namespace has_strict_deriv_at
variables [cs : complete_space 𝕜] {f : 𝕜 → 𝕜} {f' a : 𝕜} (hf : has_strict_deriv_at f f' a)
(hf' : f' ≠ 0)
include cs
variables (f f' a)
/-- A function that is inverse to `f` near `a`. -/
@[reducible] def local_inverse : 𝕜 → 𝕜 :=
(hf.has_strict_fderiv_at_equiv hf').local_inverse _ _ _
variables {f f' a}
lemma map_nhds_eq : map f (𝓝 a) = 𝓝 (f a) :=
(hf.has_strict_fderiv_at_equiv hf').map_nhds_eq_of_equiv
theorem to_local_inverse : has_strict_deriv_at (hf.local_inverse f f' a hf') f'⁻¹ (f a) :=
(hf.has_strict_fderiv_at_equiv hf').to_local_inverse
theorem to_local_left_inverse {g : 𝕜 → 𝕜} (hg : ∀ᶠ x in 𝓝 a, g (f x) = x) :
has_strict_deriv_at g f'⁻¹ (f a) :=
(hf.has_strict_fderiv_at_equiv hf').to_local_left_inverse hg
end has_strict_deriv_at
/-- If a function has a non-zero strict derivative at all points, then it is an open map. -/
lemma open_map_of_strict_deriv [complete_space 𝕜] {f f' : 𝕜 → 𝕜}
(hf : ∀ x, has_strict_deriv_at f (f' x) x) (h0 : ∀ x, f' x ≠ 0) :
is_open_map f :=
is_open_map_iff_nhds_le.2 $ λ x, ((hf x).map_nhds_eq (h0 x)).ge
/-!
### Inverse function theorem, smooth case
-/
namespace cont_diff_at
variables {𝕂 : Type*} [is_R_or_C 𝕂]
variables {E' : Type*} [normed_group E'] [normed_space 𝕂 E']
variables {F' : Type*} [normed_group F'] [normed_space 𝕂 F']
variables [complete_space E'] (f : E' → F') {f' : E' ≃L[𝕂] F'} {a : E'}
/-- Given a `cont_diff` function over `𝕂` (which is `ℝ` or `ℂ`) with an invertible
derivative at `a`, returns a `local_homeomorph` with `to_fun = f` and `a ∈ source`. -/
def to_local_homeomorph
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
local_homeomorph E' F' :=
(hf.has_strict_fderiv_at' hf' hn).to_local_homeomorph f
variable {f}
@[simp] lemma to_local_homeomorph_coe
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
(hf.to_local_homeomorph f hf' hn : E' → F') = f := rfl
lemma mem_to_local_homeomorph_source
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
a ∈ (hf.to_local_homeomorph f hf' hn).source :=
(hf.has_strict_fderiv_at' hf' hn).mem_to_local_homeomorph_source
lemma image_mem_to_local_homeomorph_target
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
f a ∈ (hf.to_local_homeomorph f hf' hn).target :=
(hf.has_strict_fderiv_at' hf' hn).image_mem_to_local_homeomorph_target
/-- Given a `cont_diff` function over `𝕂` (which is `ℝ` or `ℂ`) with an invertible derivative
at `a`, returns a function that is locally inverse to `f`. -/
def local_inverse
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
F' → E' :=
(hf.has_strict_fderiv_at' hf' hn).local_inverse f f' a
lemma local_inverse_apply_image
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
hf.local_inverse hf' hn (f a) = a :=
(hf.has_strict_fderiv_at' hf' hn).local_inverse_apply_image
/-- Given a `cont_diff` function over `𝕂` (which is `ℝ` or `ℂ`) with an invertible derivative
at `a`, the inverse function (produced by `cont_diff.to_local_homeomorph`) is
also `cont_diff`. -/
lemma to_local_inverse
{n : with_top ℕ} (hf : cont_diff_at 𝕂 n f a) (hf' : has_fderiv_at f (f' : E' →L[𝕂] F') a)
(hn : 1 ≤ n) :
cont_diff_at 𝕂 n (hf.local_inverse hf' hn) (f a) :=
begin
have := hf.local_inverse_apply_image hf' hn,
apply (hf.to_local_homeomorph f hf' hn).cont_diff_at_symm
(image_mem_to_local_homeomorph_target hf hf' hn),
{ convert hf' },
{ convert hf }
end
end cont_diff_at
|
1edbabd7df3e9c03c48fa71a39849a93c402027e
|
b2fe74b11b57d362c13326bc5651244f111fa6f4
|
/src/analysis/convex/basic.lean
|
95e456e43f938281f1930608e77b6028cb69ba7d
|
[
"Apache-2.0"
] |
permissive
|
midfield/mathlib
|
c4db5fa898b5ac8f2f80ae0d00c95eb6f745f4c7
|
775edc615ecec631d65b6180dbcc7bc26c3abc26
|
refs/heads/master
| 1,675,330,551,921
| 1,608,304,514,000
| 1,608,304,514,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 58,026
|
lean
|
/-
Copyright (c) 2019 Alexander Bentkamp. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Alexander Bentkamp, Yury Kudriashov
-/
import data.set.intervals.ord_connected
import data.set.intervals.image_preimage
import data.complex.module
import linear_algebra.affine_space.affine_map
import algebra.module.ordered
/-!
# Convex sets and functions on real vector spaces
In a real vector space, we define the following objects and properties.
* `segment x y` is the closed segment joining `x` and `y`.
* A set `s` is `convex` if for any two points `x y ∈ s` it includes `segment x y`;
* A function `f : E → β` is `convex_on` a set `s` if `s` is itself a convex set, and for any two
points `x y ∈ s` the segment joining `(x, f x)` to `(y, f y)` is (non-strictly) above the graph
of `f`; equivalently, `convex_on f s` means that the epigraph
`{p : E × β | p.1 ∈ s ∧ f p.1 ≤ p.2}` is a convex set;
* Center mass of a finite set of points with prescribed weights.
* Convex hull of a set `s` is the minimal convex set that includes `s`.
* Standard simplex `std_simplex ι [fintype ι]` is the intersection of the positive quadrant with
the hyperplane `s.sum = 1` in the space `ι → ℝ`.
We also provide various equivalent versions of the definitions above, prove that some specific sets
are convex, and prove Jensen's inequality.
Note: To define convexity for functions `f : E → β`, we need `β` to be an ordered vector space,
defined using the instance `ordered_semimodule ℝ β`.
## Notations
We use the following local notations:
* `I = Icc (0:ℝ) 1`;
* `[x, y] = segment x y`.
They are defined using `local notation`, so they are not available outside of this file.
-/
universes u' u v v' w x
variables {E : Type u} {F : Type v} {ι : Type w} {ι' : Type x} {α : Type v'}
[add_comm_group E] [vector_space ℝ E] [add_comm_group F] [vector_space ℝ F]
[linear_ordered_field α]
{s : set E}
open set linear_map
open_locale classical big_operators
local notation `I` := (Icc 0 1 : set ℝ)
section sets
/-! ### Segment -/
/-- Segments in a vector space -/
def segment (x y : E) : set E :=
{z : E | ∃ (a b : ℝ) (ha : 0 ≤ a) (hb : 0 ≤ b) (hab : a + b = 1), a • x + b • y = z}
local notation `[`x `, ` y `]` := segment x y
lemma segment_symm (x y : E) : [x, y] = [y, x] :=
set.ext $ λ z,
⟨λ ⟨a, b, ha, hb, hab, H⟩, ⟨b, a, hb, ha, (add_comm _ _).trans hab, (add_comm _ _).trans H⟩,
λ ⟨a, b, ha, hb, hab, H⟩, ⟨b, a, hb, ha, (add_comm _ _).trans hab, (add_comm _ _).trans H⟩⟩
lemma left_mem_segment (x y : E) : x ∈ [x, y] :=
⟨1, 0, zero_le_one, le_refl 0, add_zero 1, by rw [zero_smul, one_smul, add_zero]⟩
lemma right_mem_segment (x y : E) : y ∈ [x, y] :=
segment_symm y x ▸ left_mem_segment y x
lemma segment_same (x : E) : [x, x] = {x} :=
set.ext $ λ z, ⟨λ ⟨a, b, ha, hb, hab, hz⟩,
by simpa only [(add_smul _ _ _).symm, mem_singleton_iff, hab, one_smul, eq_comm] using hz,
λ h, mem_singleton_iff.1 h ▸ left_mem_segment z z⟩
lemma segment_eq_image (x y : E) : segment x y = (λ (θ : ℝ), (1 - θ) • x + θ • y) '' I :=
set.ext $ λ z,
⟨λ ⟨a, b, ha, hb, hab, hz⟩,
⟨b, ⟨hb, hab ▸ le_add_of_nonneg_left ha⟩, hab ▸ hz ▸ by simp only [add_sub_cancel]⟩,
λ ⟨θ, ⟨hθ₀, hθ₁⟩, hz⟩, ⟨1-θ, θ, sub_nonneg.2 hθ₁, hθ₀, sub_add_cancel _ _, hz⟩⟩
lemma segment_eq_image' (x y : E) : segment x y = (λ (θ : ℝ), x + θ • (y - x)) '' I :=
by { convert segment_eq_image x y, ext θ, simp only [smul_sub, sub_smul, one_smul], abel }
lemma segment_eq_image₂ (x y : E) :
segment x y = (λ p:ℝ×ℝ, p.1 • x + p.2 • y) '' {p | 0 ≤ p.1 ∧ 0 ≤ p.2 ∧ p.1 + p.2 = 1} :=
by simp only [segment, image, prod.exists, mem_set_of_eq, exists_prop, and_assoc]
lemma segment_eq_Icc {a b : ℝ} (h : a ≤ b) : [a, b] = Icc a b :=
begin
rw [segment_eq_image'],
show (((+) a) ∘ (λ t, t * (b - a))) '' Icc 0 1 = Icc a b,
rw [image_comp, image_mul_right_Icc (@zero_le_one ℝ _) (sub_nonneg.2 h), image_const_add_Icc],
simp
end
lemma segment_eq_Icc' (a b : ℝ) : [a, b] = Icc (min a b) (max a b) :=
by cases le_total a b; [skip, rw segment_symm]; simp [segment_eq_Icc, *]
lemma segment_eq_interval (a b : ℝ) : segment a b = interval a b :=
segment_eq_Icc' _ _
lemma mem_segment_translate (a : E) {x b c} : a + x ∈ [a + b, a + c] ↔ x ∈ [b, c] :=
begin
rw [segment_eq_image', segment_eq_image'],
refine exists_congr (λ θ, and_congr iff.rfl _),
simp only [add_sub_add_left_eq_sub, add_assoc, add_right_inj]
end
lemma segment_translate_preimage (a b c : E) : (λ x, a + x) ⁻¹' [a + b, a + c] = [b, c] :=
set.ext $ λ x, mem_segment_translate a
lemma segment_translate_image (a b c: E) : (λx, a + x) '' [b, c] = [a + b, a + c] :=
segment_translate_preimage a b c ▸ image_preimage_eq $ add_left_surjective a
/-! ### Convexity of sets -/
/-- Convexity of sets -/
def convex (s : set E) :=
∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → a + b = 1 →
a • x + b • y ∈ s
lemma convex_iff_forall_pos :
convex s ↔ ∀ ⦃x y⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 < a → 0 < b → a + b = 1 → a • x + b • y ∈ s :=
begin
refine ⟨λ h x y hx hy a b ha hb hab, h hx hy (le_of_lt ha) (le_of_lt hb) hab, _⟩,
intros h x y hx hy a b ha hb hab,
cases eq_or_lt_of_le ha with ha ha,
{ subst a, rw [zero_add] at hab, simp [hab, hy] },
cases eq_or_lt_of_le hb with hb hb,
{ subst b, rw [add_zero] at hab, simp [hab, hx] },
exact h hx hy ha hb hab
end
lemma convex_iff_segment_subset : convex s ↔ ∀ ⦃x y⦄, x ∈ s → y ∈ s → [x, y] ⊆ s :=
by simp only [convex, segment_eq_image₂, subset_def, ball_image_iff, prod.forall,
mem_set_of_eq, and_imp]
lemma convex.segment_subset (h : convex s) {x y:E} (hx : x ∈ s) (hy : y ∈ s) : [x, y] ⊆ s :=
convex_iff_segment_subset.1 h hx hy
/-- Alternative definition of set convexity, in terms of pointwise set operations. -/
lemma convex_iff_pointwise_add_subset:
convex s ↔ ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → a + b = 1 → a • s + b • s ⊆ s :=
iff.intro
begin
rintros hA a b ha hb hab w ⟨au, bv, ⟨u, hu, rfl⟩, ⟨v, hv, rfl⟩, rfl⟩,
exact hA hu hv ha hb hab
end
(λ h x y hx hy a b ha hb hab,
(h ha hb hab) (set.add_mem_add ⟨_, hx, rfl⟩ ⟨_, hy, rfl⟩))
/-- Alternative definition of set convexity, using division -/
lemma convex_iff_div:
convex s ↔ ∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄,
0 ≤ a → 0 ≤ b → 0 < a + b → (a/(a+b)) • x + (b/(a+b)) • y ∈ s :=
⟨begin
assume h x y hx hy a b ha hb hab,
apply h hx hy,
have ha', from mul_le_mul_of_nonneg_left ha (le_of_lt (inv_pos.2 hab)),
rwa [mul_zero, ←div_eq_inv_mul] at ha',
have hb', from mul_le_mul_of_nonneg_left hb (le_of_lt (inv_pos.2 hab)),
rwa [mul_zero, ←div_eq_inv_mul] at hb',
rw [←add_div],
exact div_self (ne_of_lt hab).symm
end,
begin
assume h x y hx hy a b ha hb hab,
have h', from h hx hy ha hb,
rw [hab, div_one, div_one] at h',
exact h' zero_lt_one
end⟩
/-! ### Examples of convex sets -/
lemma convex_empty : convex (∅ : set E) := by finish
lemma convex_singleton (c : E) : convex ({c} : set E) :=
begin
intros x y hx hy a b ha hb hab,
rw [set.eq_of_mem_singleton hx, set.eq_of_mem_singleton hy, ←add_smul, hab, one_smul],
exact mem_singleton c
end
lemma convex_univ : convex (set.univ : set E) := λ _ _ _ _ _ _ _ _ _, trivial
lemma convex.inter {t : set E} (hs: convex s) (ht: convex t) : convex (s ∩ t) :=
λ x y (hx : x ∈ s ∩ t) (hy : y ∈ s ∩ t) a b (ha : 0 ≤ a) (hb : 0 ≤ b) (hab : a + b = 1),
⟨hs hx.left hy.left ha hb hab, ht hx.right hy.right ha hb hab⟩
lemma convex_sInter {S : set (set E)} (h : ∀ s ∈ S, convex s) : convex (⋂₀ S) :=
assume x y hx hy a b ha hb hab s hs,
h s hs (hx s hs) (hy s hs) ha hb hab
lemma convex_Inter {ι : Sort*} {s: ι → set E} (h: ∀ i : ι, convex (s i)) : convex (⋂ i, s i) :=
(sInter_range s) ▸ convex_sInter $ forall_range_iff.2 h
lemma convex.prod {s : set E} {t : set F} (hs : convex s) (ht : convex t) :
convex (s.prod t) :=
begin
intros x y hx hy a b ha hb hab,
apply mem_prod.2,
exact ⟨hs (mem_prod.1 hx).1 (mem_prod.1 hy).1 ha hb hab,
ht (mem_prod.1 hx).2 (mem_prod.1 hy).2 ha hb hab⟩
end
lemma convex.combo_to_vadd {a b : ℝ} {x y : E} (h : a + b = 1) :
a • x + b • y = b • (y - x) + x :=
eq.symm (calc
b • (y - x) + x = b • (y - x) + (1 : ℝ) • x : by rw [one_smul]
... = b • (y - x) + (a + b) • x : by rw [h]
... = (b • y - b • x) + (a • x + b • x) : by rw [add_smul, smul_sub]
... = a • x + b • y : by abel )
/--
Applying an affine map to an affine combination of two points yields
an affine combination of the images.
-/
lemma convex.combo_affine_apply {a b : ℝ} {x y : E} {f : E →ᵃ[ℝ] F} (h : a + b = 1) :
f (a • x + b • y) = a • f x + b • f y :=
begin
simp only [convex.combo_to_vadd h, ← vsub_eq_sub],
exact f.apply_line_map _ _ _,
end
/-- The preimage of a convex set under an affine map is convex. -/
lemma convex.affine_preimage (f : E →ᵃ[ℝ] F) {s : set F} (hs : convex s) :
convex (f ⁻¹' s) :=
begin
intros x y xs ys a b ha hb hab,
rw [mem_preimage, convex.combo_affine_apply hab],
exact hs xs ys ha hb hab,
end
/-- The image of a convex set under an affine map is convex. -/
lemma convex.affine_image (f : E →ᵃ[ℝ] F) {s : set E} (hs : convex s) :
convex (f '' s) :=
begin
rintros x y ⟨x', ⟨hx', hx'f⟩⟩ ⟨y', ⟨hy', hy'f⟩⟩ a b ha hb hab,
refine ⟨a • x' + b • y', ⟨hs hx' hy' ha hb hab, _⟩⟩,
rw [convex.combo_affine_apply hab, hx'f, hy'f]
end
lemma convex.linear_image (hs : convex s) (f : E →ₗ[ℝ] F) : convex (image f s) :=
hs.affine_image f.to_affine_map
lemma convex.is_linear_image (hs : convex s) {f : E → F} (hf : is_linear_map ℝ f) :
convex (f '' s) :=
hs.linear_image $ hf.mk' f
lemma convex.linear_preimage {s : set F} (hs : convex s) (f : E →ₗ[ℝ] F) :
convex (preimage f s) :=
hs.affine_preimage f.to_affine_map
lemma convex.is_linear_preimage {s : set F} (hs : convex s) {f : E → F} (hf : is_linear_map ℝ f) :
convex (preimage f s) :=
hs.linear_preimage $ hf.mk' f
lemma convex.neg (hs : convex s) : convex ((λ z, -z) '' s) :=
hs.is_linear_image is_linear_map.is_linear_map_neg
lemma convex.neg_preimage (hs : convex s) : convex ((λ z, -z) ⁻¹' s) :=
hs.is_linear_preimage is_linear_map.is_linear_map_neg
lemma convex.smul (c : ℝ) (hs : convex s) : convex (c • s) :=
hs.linear_image (linear_map.lsmul _ _ c)
lemma convex.smul_preimage (c : ℝ) (hs : convex s) : convex ((λ z, c • z) ⁻¹' s) :=
hs.linear_preimage (linear_map.lsmul _ _ c)
lemma convex.add {t : set E} (hs : convex s) (ht : convex t) : convex (s + t) :=
by { rw ← add_image_prod, exact (hs.prod ht).is_linear_image is_linear_map.is_linear_map_add }
lemma convex.sub {t : set E} (hs : convex s) (ht : convex t) :
convex ((λx : E × E, x.1 - x.2) '' (s.prod t)) :=
(hs.prod ht).is_linear_image is_linear_map.is_linear_map_sub
lemma convex.translate (hs : convex s) (z : E) : convex ((λx, z + x) '' s) :=
hs.affine_image $ affine_map.const ℝ E z +ᵥ affine_map.id ℝ E
/-- The translation of a convex set is also convex -/
lemma convex.translate_preimage_right (hs : convex s) (a : E) : convex ((λ z, a + z) ⁻¹' s) :=
hs.affine_preimage $ affine_map.const ℝ E a +ᵥ affine_map.id ℝ E
/-- The translation of a convex set is also convex -/
lemma convex.translate_preimage_left (hs : convex s) (a : E) : convex ((λ z, z + a) ⁻¹' s) :=
by simpa only [add_comm] using hs.translate_preimage_right a
lemma convex.affinity (hs : convex s) (z : E) (c : ℝ) : convex ((λx, z + c • x) '' s) :=
hs.affine_image $ affine_map.const ℝ E z +ᵥ c • affine_map.id ℝ E
lemma real.convex_iff_ord_connected {s : set ℝ} : convex s ↔ ord_connected s :=
begin
simp only [convex_iff_segment_subset, segment_eq_interval, ord_connected_iff_interval_subset],
exact forall_congr (λ x, forall_swap)
end
alias real.convex_iff_ord_connected ↔ convex.ord_connected set.ord_connected.convex
lemma convex_Iio (r : ℝ) : convex (Iio r) := ord_connected_Iio.convex
lemma convex_Ioi (r : ℝ) : convex (Ioi r) := ord_connected_Ioi.convex
lemma convex_Iic (r : ℝ) : convex (Iic r) := ord_connected_Iic.convex
lemma convex_Ici (r : ℝ) : convex (Ici r) := ord_connected_Ici.convex
lemma convex_Ioo (r s : ℝ) : convex (Ioo r s) := ord_connected_Ioo.convex
lemma convex_Ico (r s : ℝ) : convex (Ico r s) := ord_connected_Ico.convex
lemma convex_Ioc (r : ℝ) (s : ℝ) : convex (Ioc r s) := ord_connected_Ioc.convex
lemma convex_Icc (r : ℝ) (s : ℝ) : convex (Icc r s) := ord_connected_Icc.convex
lemma convex_interval (r : ℝ) (s : ℝ) : convex (interval r s) := ord_connected_interval.convex
lemma convex_segment (a b : E) : convex [a, b] :=
begin
have : (λ (t : ℝ), a + t • (b - a)) = (λz : E, a + z) ∘ (λt:ℝ, t • (b - a)) := rfl,
rw [segment_eq_image', this, image_comp],
refine ((convex_Icc _ _).is_linear_image _).translate _,
exact is_linear_map.is_linear_map_smul' _
end
lemma convex_halfspace_lt {f : E → ℝ} (h : is_linear_map ℝ f) (r : ℝ) :
convex {w | f w < r} :=
(convex_Iio r).is_linear_preimage h
lemma convex_halfspace_le {f : E → ℝ} (h : is_linear_map ℝ f) (r : ℝ) :
convex {w | f w ≤ r} :=
(convex_Iic r).is_linear_preimage h
lemma convex_halfspace_gt {f : E → ℝ} (h : is_linear_map ℝ f) (r : ℝ) :
convex {w | r < f w} :=
(convex_Ioi r).is_linear_preimage h
lemma convex_halfspace_ge {f : E → ℝ} (h : is_linear_map ℝ f) (r : ℝ) :
convex {w | r ≤ f w} :=
(convex_Ici r).is_linear_preimage h
lemma convex_hyperplane {f : E → ℝ} (h : is_linear_map ℝ f) (r : ℝ) :
convex {w | f w = r} :=
begin
show convex (f ⁻¹' {p | p = r}),
rw set_of_eq_eq_singleton,
exact (convex_singleton r).is_linear_preimage h
end
lemma convex_halfspace_re_lt (r : ℝ) : convex {c : ℂ | c.re < r} :=
convex_halfspace_lt (is_linear_map.mk complex.add_re complex.smul_re) _
lemma convex_halfspace_re_le (r : ℝ) : convex {c : ℂ | c.re ≤ r} :=
convex_halfspace_le (is_linear_map.mk complex.add_re complex.smul_re) _
lemma convex_halfspace_re_gt (r : ℝ) : convex {c : ℂ | r < c.re } :=
convex_halfspace_gt (is_linear_map.mk complex.add_re complex.smul_re) _
lemma convex_halfspace_re_lge (r : ℝ) : convex {c : ℂ | r ≤ c.re} :=
convex_halfspace_ge (is_linear_map.mk complex.add_re complex.smul_re) _
lemma convex_halfspace_im_lt (r : ℝ) : convex {c : ℂ | c.im < r} :=
convex_halfspace_lt (is_linear_map.mk complex.add_im complex.smul_im) _
lemma convex_halfspace_im_le (r : ℝ) : convex {c : ℂ | c.im ≤ r} :=
convex_halfspace_le (is_linear_map.mk complex.add_im complex.smul_im) _
lemma convex_halfspace_im_gt (r : ℝ) : convex {c : ℂ | r < c.im } :=
convex_halfspace_gt (is_linear_map.mk complex.add_im complex.smul_im) _
lemma convex_halfspace_im_lge (r : ℝ) : convex {c : ℂ | r ≤ c.im} :=
convex_halfspace_ge (is_linear_map.mk complex.add_im complex.smul_im) _
/- Convex combinations in intervals -/
lemma convex.combo_self (a : α) {x y : α} (h : x + y = 1) : a = x * a + y * a :=
calc
a = 1 * a : by rw [one_mul]
... = (x + y) * a : by rw [h]
... = x * a + y * a : by rw [add_mul]
/--
If x is in an Ioo, it can be expressed as a convex combination of the endpoints.
-/
lemma convex.mem_Ioo {a b x : α} (h : a < b) :
x ∈ Ioo a b ↔ ∃ (x_a x_b : α), 0 < x_a ∧ 0 < x_b ∧ x_a + x_b = 1 ∧ x_a * a + x_b * b = x :=
begin
split,
{ rintros ⟨h_ax, h_bx⟩,
by_cases hab : ¬a < b,
{ exfalso; exact hab h },
{ refine ⟨(b-x) / (b-a), (x-a) / (b-a), _⟩,
refine ⟨div_pos (by linarith) (by linarith), div_pos (by linarith) (by linarith),_,_⟩;
{ field_simp [show b - a ≠ 0, by linarith], ring } } },
{ rw [mem_Ioo],
rintros ⟨xa, xb, ⟨hxa, hxb, hxaxb, h₂⟩⟩,
rw [←h₂],
exact ⟨by nlinarith [convex.combo_self a hxaxb], by nlinarith [convex.combo_self b hxaxb]⟩ }
end
/-- If x is in an Ioc, it can be expressed as a convex combination of the endpoints. -/
lemma convex.mem_Ioc {a b x : α} (h : a < b) :
x ∈ Ioc a b ↔ ∃ (x_a x_b : α), 0 ≤ x_a ∧ 0 < x_b ∧ x_a + x_b = 1 ∧ x_a * a + x_b * b = x :=
begin
split,
{ rintros ⟨h_ax, h_bx⟩,
by_cases h_x : x = b,
{ exact ⟨0, 1, by linarith, by linarith, by ring, by {rw [h_x], ring}⟩ },
{ rcases (convex.mem_Ioo h).mp ⟨h_ax, lt_of_le_of_ne h_bx h_x⟩ with ⟨x_a, x_b, Ioo_case⟩,
exact ⟨x_a, x_b, by linarith, Ioo_case.2⟩ } },
{ rw [mem_Ioc],
rintros ⟨xa, xb, ⟨hxa, hxb, hxaxb, h₂⟩⟩,
rw [←h₂],
exact ⟨by nlinarith [convex.combo_self a hxaxb], by nlinarith [convex.combo_self b hxaxb]⟩ }
end
/-- If x is in an Ico, it can be expressed as a convex combination of the endpoints. -/
lemma convex.mem_Ico {a b x : α} (h : a < b) :
x ∈ Ico a b ↔ ∃ (x_a x_b : α), 0 < x_a ∧ 0 ≤ x_b ∧ x_a + x_b = 1 ∧ x_a * a + x_b * b = x :=
begin
split,
{ rintros ⟨h_ax, h_bx⟩,
by_cases h_x : x = a,
{ exact ⟨1, 0, by linarith, by linarith, by ring, by {rw [h_x], ring}⟩ },
{ rcases (convex.mem_Ioo h).mp ⟨lt_of_le_of_ne h_ax (ne.symm h_x), h_bx⟩
with ⟨x_a, x_b, Ioo_case⟩,
exact ⟨x_a, x_b, Ioo_case.1, by linarith, (Ioo_case.2).2⟩ } },
{ rw [mem_Ico],
rintros ⟨xa, xb, ⟨hxa, hxb, hxaxb, h₂⟩⟩,
rw [←h₂],
exact ⟨by nlinarith [convex.combo_self a hxaxb], by nlinarith [convex.combo_self b hxaxb]⟩ }
end
/-- If x is in an Icc, it can be expressed as a convex combination of the endpoints. -/
lemma convex.mem_Icc {a b x : α} (h : a ≤ b):
x ∈ Icc a b ↔ ∃ (x_a x_b : α), 0 ≤ x_a ∧ 0 ≤ x_b ∧ x_a + x_b = 1 ∧ x_a * a + x_b * b = x :=
begin
split,
{ intro x_in_I,
rw [Icc, mem_set_of_eq] at x_in_I,
rcases x_in_I with ⟨h_ax, h_bx⟩,
by_cases hab' : a = b,
{ exact ⟨0, 1, le_refl 0, by linarith, by ring, by linarith⟩ },
change a ≠ b at hab',
replace h : a < b, exact lt_of_le_of_ne h hab',
by_cases h_x : x = a,
{ exact ⟨1, 0, by linarith, by linarith, by ring, by {rw [h_x], ring}⟩ },
{ rcases (convex.mem_Ioc h).mp ⟨lt_of_le_of_ne h_ax (ne.symm h_x), h_bx⟩
with ⟨x_a, x_b, Ioo_case⟩,
exact ⟨x_a, x_b, Ioo_case.1, by linarith, (Ioo_case.2).2⟩ } },
{ rw [mem_Icc],
rintros ⟨xa, xb, ⟨hxa, hxb, hxaxb, h₂⟩⟩,
rw [←h₂],
exact ⟨by nlinarith [convex.combo_self a hxaxb], by nlinarith [convex.combo_self b hxaxb]⟩ }
end
section submodule
open submodule
lemma submodule.convex (K : submodule ℝ E) : convex (↑K : set E) :=
by { repeat {intro}, refine add_mem _ (smul_mem _ _ _) (smul_mem _ _ _); assumption }
lemma subspace.convex (K : subspace ℝ E) : convex (↑K : set E) := K.convex
end submodule
end sets
section functions
variables {β : Type*} [ordered_add_comm_monoid β] [semimodule ℝ β]
local notation `[`x `, ` y `]` := segment x y
/-! ### Convex functions -/
/-- Convexity of functions -/
def convex_on (s : set E) (f : E → β) : Prop :=
convex s ∧
∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → a + b = 1 →
f (a • x + b • y) ≤ a • f x + b • f y
/-- Concavity of functions -/
def concave_on (s : set E) (f : E → β) : Prop :=
convex s ∧
∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → a + b = 1 →
a • f x + b • f y ≤ f (a • x + b • y)
/-- A function f is concave iff -f is convex -/
@[simp] lemma neg_convex_on_iff {γ : Type*} [ordered_add_comm_group γ] [semimodule ℝ γ]
(s : set E) (f : E → γ) : convex_on s (-f) ↔ concave_on s f :=
begin
split,
{ rintros ⟨hconv, h⟩,
refine ⟨hconv, _⟩,
intros x y xs ys a b ha hb hab,
specialize h xs ys ha hb hab,
simp [neg_apply, neg_le, add_comm] at h,
exact h },
{ rintros ⟨hconv, h⟩,
refine ⟨hconv, _⟩,
intros x y xs ys a b ha hb hab,
specialize h xs ys ha hb hab,
simp [neg_apply, neg_le, add_comm, h] }
end
/-- A function f is concave iff -f is convex -/
@[simp] lemma neg_concave_on_iff {γ : Type*} [ordered_add_comm_group γ] [semimodule ℝ γ]
(s : set E) (f : E → γ) : concave_on s (-f) ↔ convex_on s f:=
by rw [← neg_convex_on_iff s (-f), neg_neg f]
lemma convex_on_id {s : set ℝ} (hs : convex s) : convex_on s id := ⟨hs, by { intros, refl }⟩
lemma concave_on_id {s : set ℝ} (hs : convex s) : concave_on s id := ⟨hs, by { intros, refl }⟩
lemma convex_on_const (c : β) (hs : convex s) : convex_on s (λ x:E, c) :=
⟨hs, by { intros, simp only [← add_smul, *, one_smul] }⟩
lemma concave_on_const (c : β) (hs : convex s) : concave_on s (λ x:E, c) :=
@convex_on_const _ _ _ _ (order_dual β) _ _ c hs
variables {t : set E}
lemma convex_on_iff_div {f : E → β} :
convex_on s f ↔ convex s ∧ ∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → 0 < a + b →
f ((a/(a+b)) • x + (b/(a+b)) • y) ≤ (a/(a+b)) • f x + (b/(a+b)) • f y :=
and_congr iff.rfl
⟨begin
intros h x y hx hy a b ha hb hab,
apply h hx hy (div_nonneg ha $ le_of_lt hab) (div_nonneg hb $ le_of_lt hab),
rw [←add_div],
exact div_self (ne_of_gt hab)
end,
begin
intros h x y hx hy a b ha hb hab,
simpa [hab, zero_lt_one] using h hx hy ha hb,
end⟩
lemma concave_on_iff_div {f : E → β} :
concave_on s f ↔ convex s ∧ ∀ ⦃x y : E⦄, x ∈ s → y ∈ s → ∀ ⦃a b : ℝ⦄, 0 ≤ a → 0 ≤ b → 0 < a + b →
(a/(a+b)) • f x + (b/(a+b)) • f y ≤ f ((a/(a+b)) • x + (b/(a+b)) • y) :=
@convex_on_iff_div _ _ _ _ (order_dual β) _ _ _
/-- For a function on a convex set in a linear ordered space, in order to prove that it is convex
it suffices to verify the inequality `f (a • x + b • y) ≤ a • f x + b • f y` only for `x < y`
and positive `a`, `b`. The main use case is `E = ℝ` however one can apply it, e.g., to `ℝ^n` with
lexicographic order. -/
lemma linear_order.convex_on_of_lt {f : E → β} [linear_order E] (hs : convex s)
(hf : ∀ ⦃x y : E⦄, x ∈ s → y ∈ s → x < y → ∀ ⦃a b : ℝ⦄, 0 < a → 0 < b → a + b = 1 →
f (a • x + b • y) ≤ a • f x + b • f y) : convex_on s f :=
begin
use hs,
intros x y hx hy a b ha hb hab,
wlog hxy : x<=y using [x y a b, y x b a],
{ exact le_total _ _ },
{ cases eq_or_lt_of_le hxy with hxy hxy,
by { subst y, rw [← add_smul, ← add_smul, hab, one_smul, one_smul] },
cases eq_or_lt_of_le ha with ha ha,
by { subst a, rw [zero_add] at hab, subst b, simp },
cases eq_or_lt_of_le hb with hb hb,
by { subst b, rw [add_zero] at hab, subst a, simp },
exact hf hx hy hxy ha hb hab }
end
/-- For a function on a convex set in a linear ordered space, in order to prove that it is concave
it suffices to verify the inequality `a • f x + b • f y ≤ f (a • x + b • y)` only for `x < y`
and positive `a`, `b`. The main use case is `E = ℝ` however one can apply it, e.g., to `ℝ^n` with
lexicographic order. -/
lemma linear_order.concave_on_of_lt {f : E → β} [linear_order E] (hs : convex s)
(hf : ∀ ⦃x y : E⦄, x ∈ s → y ∈ s → x < y → ∀ ⦃a b : ℝ⦄, 0 < a → 0 < b → a + b = 1 →
a • f x + b • f y ≤ f (a • x + b • y)) : concave_on s f :=
@linear_order.convex_on_of_lt _ _ _ _ (order_dual β) _ _ f _ hs hf
/-- For a function `f` defined on a convex subset `D` of `ℝ`, if for any three points `x<y<z`
the slope of the secant line of `f` on `[x, y]` is less than or equal to the slope
of the secant line of `f` on `[x, z]`, then `f` is convex on `D`. This way of proving convexity
of a function is used in the proof of convexity of a function with a monotone derivative. -/
lemma convex_on_real_of_slope_mono_adjacent {s : set ℝ} (hs : convex s) {f : ℝ → ℝ}
(hf : ∀ {x y z : ℝ}, x ∈ s → z ∈ s → x < y → y < z →
(f y - f x) / (y - x) ≤ (f z - f y) / (z - y)) :
convex_on s f :=
linear_order.convex_on_of_lt hs
begin
assume x z hx hz hxz a b ha hb hab,
let y := a * x + b * z,
have hxy : x < y,
{ rw [← one_mul x, ← hab, add_mul],
exact add_lt_add_left ((mul_lt_mul_left hb).2 hxz) _ },
have hyz : y < z,
{ rw [← one_mul z, ← hab, add_mul],
exact add_lt_add_right ((mul_lt_mul_left ha).2 hxz) _ },
have : (f y - f x) * (z - y) ≤ (f z - f y) * (y - x),
from (div_le_div_iff (sub_pos.2 hxy) (sub_pos.2 hyz)).1 (hf hx hz hxy hyz),
have A : z - y + (y - x) = z - x, by abel,
have B : 0 < z - x, from sub_pos.2 (lt_trans hxy hyz),
rw [sub_mul, sub_mul, sub_le_iff_le_add', ← add_sub_assoc, le_sub_iff_add_le, ← mul_add, A,
← le_div_iff B, add_div, mul_div_assoc, mul_div_assoc,
mul_comm (f x), mul_comm (f z)] at this,
rw [eq_comm, ← sub_eq_iff_eq_add] at hab; subst a,
convert this; symmetry; simp only [div_eq_iff (ne_of_gt B), y]; ring
end
/-- For a function `f` defined on a subset `D` of `ℝ`, if `f` is convex on `D`, then for any three
points `x<y<z`, the slope of the secant line of `f` on `[x, y]` is less than or equal to the slope
of the secant line of `f` on `[x, z]`. -/
lemma convex_on.slope_mono_adjacent {s : set ℝ} {f : ℝ → ℝ} (hf : convex_on s f)
{x y z : ℝ} (hx : x ∈ s) (hz : z ∈ s) (hxy : x < y) (hyz : y < z) :
(f y - f x) / (y - x) ≤ (f z - f y) / (z - y) :=
begin
have h₁ : 0 < y - x := by linarith,
have h₂ : 0 < z - y := by linarith,
have h₃ : 0 < z - x := by linarith,
suffices : f y / (y - x) + f y / (z - y) ≤ f x / (y - x) + f z / (z - y),
by { ring at this ⊢, linarith },
set a := (z - y) / (z - x),
set b := (y - x) / (z - x),
have heqz : a • x + b • z = y, by { field_simp, rw div_eq_iff; [ring, linarith], },
have key, from
hf.2 hx hz
(show 0 ≤ a, by apply div_nonneg; linarith)
(show 0 ≤ b, by apply div_nonneg; linarith)
(show a + b = 1, by { field_simp, rw div_eq_iff; [ring, linarith], }),
rw heqz at key,
replace key := mul_le_mul_of_nonneg_left key (le_of_lt h₃),
field_simp [ne_of_gt h₁, ne_of_gt h₂, ne_of_gt h₃, mul_comm (z - x) _] at key ⊢,
rw div_le_div_right,
{ linarith, },
{ nlinarith, },
end
/-- For a function `f` defined on a convex subset `D` of `ℝ`, `f` is convex on `D` iff for any three
points `x<y<z` the slope of the secant line of `f` on `[x, y]` is less than or equal to the slope
of the secant line of `f` on `[x, z]`. -/
lemma convex_on_real_iff_slope_mono_adjacent {s : set ℝ} (hs : convex s) {f : ℝ → ℝ} :
convex_on s f ↔
(∀ {x y z : ℝ}, x ∈ s → z ∈ s → x < y → y < z →
(f y - f x) / (y - x) ≤ (f z - f y) / (z - y)) :=
⟨convex_on.slope_mono_adjacent, convex_on_real_of_slope_mono_adjacent hs⟩
/-- For a function `f` defined on a convex subset `D` of `ℝ`, if for any three points `x<y<z`
the slope of the secant line of `f` on `[x, y]` is greater than or equal to the slope
of the secant line of `f` on `[x, z]`, then `f` is concave on `D`. -/
lemma concave_on_real_of_slope_mono_adjacent {s : set ℝ} (hs : convex s) {f : ℝ → ℝ}
(hf : ∀ {x y z : ℝ}, x ∈ s → z ∈ s → x < y → y < z →
(f z - f y) / (z - y) ≤ (f y - f x) / (y - x)) : concave_on s f :=
begin
rw [←neg_convex_on_iff],
apply convex_on_real_of_slope_mono_adjacent hs,
intros x y z xs zs xy yz,
rw [←neg_le_neg_iff, ←neg_div, ←neg_div, neg_sub, neg_sub],
simp only [hf xs zs xy yz, neg_sub_neg, pi.neg_apply],
end
/-- For a function `f` defined on a subset `D` of `ℝ`, if `f` is concave on `D`, then for any three
points `x<y<z`, the slope of the secant line of `f` on `[x, y]` is greater than or equal to the
slope of the secant line of `f` on `[x, z]`. -/
lemma concave_on.slope_mono_adjacent {s : set ℝ} {f : ℝ → ℝ} (hf : concave_on s f)
{x y z : ℝ} (hx : x ∈ s) (hz : z ∈ s) (hxy : x < y) (hyz : y < z) :
(f z - f y) / (z - y) ≤ (f y - f x) / (y - x) :=
begin
rw [←neg_le_neg_iff, ←neg_div, ←neg_div, neg_sub, neg_sub],
rw [←neg_sub_neg (f y), ←neg_sub_neg (f z)],
simp_rw [←pi.neg_apply],
rw [←neg_convex_on_iff] at hf,
apply convex_on.slope_mono_adjacent hf; assumption,
end
/-- For a function `f` defined on a convex subset `D` of `ℝ`, `f` is concave on `D` iff for any
three points `x<y<z` the slope of the secant line of `f` on `[x, y]` is greater than or equal to
the slope of the secant line of `f` on `[x, z]`. -/
lemma concave_on_real_iff_slope_mono_adjacent {s : set ℝ} (hs : convex s) {f : ℝ → ℝ} :
concave_on s f ↔
(∀ {x y z : ℝ}, x ∈ s → z ∈ s → x < y → y < z →
(f z - f y) / (z - y) ≤ (f y - f x) / (y - x)) :=
⟨concave_on.slope_mono_adjacent, concave_on_real_of_slope_mono_adjacent hs⟩
lemma convex_on.subset {f : E → β} (h_convex_on : convex_on t f)
(h_subset : s ⊆ t) (h_convex : convex s) : convex_on s f :=
begin
apply and.intro h_convex,
intros x y hx hy,
exact h_convex_on.2 (h_subset hx) (h_subset hy),
end
lemma concave_on.subset {f : E → β}
(h_concave_on : concave_on t f) (h_subset : s ⊆ t) (h_convex : convex s) : concave_on s f :=
@convex_on.subset _ _ _ _ (order_dual β) _ _ t f h_concave_on h_subset h_convex
lemma convex_on.add {f g : E → β} (hf : convex_on s f) (hg : convex_on s g) :
convex_on s (λx, f x + g x) :=
begin
apply and.intro hf.1,
intros x y hx hy a b ha hb hab,
calc
f (a • x + b • y) + g (a • x + b • y) ≤ (a • f x + b • f y) + (a • g x + b • g y)
: add_le_add (hf.2 hx hy ha hb hab) (hg.2 hx hy ha hb hab)
... = a • f x + a • g x + b • f y + b • g y : by abel
... = a • (f x + g x) + b • (f y + g y) : by simp [smul_add, add_assoc]
end
lemma concave_on.add {f g : E → β} (hf : concave_on s f) (hg : concave_on s g) :
concave_on s (λx, f x + g x) :=
@convex_on.add _ _ _ _ (order_dual β) _ _ f g hf hg
lemma convex_on.smul [ordered_semimodule ℝ β] {f : E → β} {c : ℝ} (hc : 0 ≤ c)
(hf : convex_on s f) : convex_on s (λx, c • f x) :=
begin
apply and.intro hf.1,
intros x y hx hy a b ha hb hab,
calc
c • f (a • x + b • y) ≤ c • (a • f x + b • f y)
: smul_le_smul_of_nonneg (hf.2 hx hy ha hb hab) hc
... = a • (c • f x) + b • (c • f y) : by simp only [smul_add, smul_comm c]
end
lemma concave_on.smul [ordered_semimodule ℝ β] {f : E → β} {c : ℝ} (hc : 0 ≤ c)
(hf : concave_on s f) : concave_on s (λx, c • f x) :=
@convex_on.smul _ _ _ _ (order_dual β) _ _ _ f c hc hf
/-- A convex function on a segment is upper-bounded by the max of its endpoints. -/
lemma convex_on.le_on_segment' {γ : Type*}
[linear_ordered_add_comm_group γ] [semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} {x y : E} {a b : ℝ}
(hf : convex_on s f) (hx : x ∈ s) (hy : y ∈ s) (ha : 0 ≤ a) (hb : 0 ≤ b) (hab : a + b = 1) :
f (a • x + b • y) ≤ max (f x) (f y) :=
calc
f (a • x + b • y) ≤ a • f x + b • f y : hf.2 hx hy ha hb hab
... ≤ a • max (f x) (f y) + b • max (f x) (f y) :
add_le_add (smul_le_smul_of_nonneg (le_max_left _ _) ha) (smul_le_smul_of_nonneg (le_max_right _ _) hb)
... ≤ max (f x) (f y) : by rw [←add_smul, hab, one_smul]
/-- A concave function on a segment is lower-bounded by the min of its endpoints. -/
lemma concave_on.le_on_segment' {γ : Type*}
[linear_ordered_add_comm_group γ] [semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} {x y : E} {a b : ℝ}
(hf : concave_on s f) (hx : x ∈ s) (hy : y ∈ s) (ha : 0 ≤ a) (hb : 0 ≤ b) (hab : a + b = 1) :
min (f x) (f y) ≤ f (a • x + b • y) :=
@convex_on.le_on_segment' _ _ _ _ (order_dual γ) _ _ _ f x y a b hf hx hy ha hb hab
/-- A convex function on a segment is upper-bounded by the max of its endpoints. -/
lemma convex_on.le_on_segment {γ : Type*}
[linear_ordered_add_comm_group γ] [semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : convex_on s f) {x y z : E}
(hx : x ∈ s) (hy : y ∈ s) (hz : z ∈ [x, y]) :
f z ≤ max (f x) (f y) :=
let ⟨a, b, ha, hb, hab, hz⟩ := hz in hz ▸ hf.le_on_segment' hx hy ha hb hab
/-- A concave function on a segment is lower-bounded by the min of its endpoints. -/
lemma concave_on.le_on_segment {γ : Type*}
[linear_ordered_add_comm_group γ] [semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : concave_on s f) {x y z : E}
(hx : x ∈ s) (hy : y ∈ s) (hz : z ∈ [x, y]) :
min (f x) (f y) ≤ f z :=
@convex_on.le_on_segment _ _ _ _ (order_dual γ) _ _ _ f hf x y z hx hy hz
lemma convex_on.convex_le [ordered_semimodule ℝ β] {f : E → β} (hf : convex_on s f) (r : β) :
convex {x ∈ s | f x ≤ r} :=
convex_iff_segment_subset.2 $ λ x y hx hy z hz,
begin
refine ⟨hf.1.segment_subset hx.1 hy.1 hz,_⟩,
rcases hz with ⟨za,zb,hza,hzb,hzazb,H⟩,
rw ←H,
calc
f (za • x + zb • y) ≤ za • (f x) + zb • (f y) : hf.2 hx.1 hy.1 hza hzb hzazb
... ≤ za • r + zb • r
: add_le_add (smul_le_smul_of_nonneg hx.2 hza)
(smul_le_smul_of_nonneg hy.2 hzb)
... ≤ r : by simp [←add_smul, hzazb]
end
lemma concave_on.concave_le [ordered_semimodule ℝ β] {f : E → β} (hf : concave_on s f) (r : β) :
convex {x ∈ s | r ≤ f x} :=
@convex_on.convex_le _ _ _ _ (order_dual β) _ _ _ f hf r
lemma convex_on.convex_lt {γ : Type*} [ordered_cancel_add_comm_monoid γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : convex_on s f) (r : γ) : convex {x ∈ s | f x < r} :=
begin
intros a b as bs xa xb hxa hxb hxaxb,
refine ⟨hf.1 as.1 bs.1 hxa hxb hxaxb,_⟩,
dsimp,
by_cases H : xa = 0,
{ have H' : xb = 1 := by rwa [H, zero_add] at hxaxb,
rw [H, H', zero_smul, one_smul, zero_add],
exact bs.2 },
{ calc
f (xa • a + xb • b) ≤ xa • (f a) + xb • (f b) : hf.2 as.1 bs.1 hxa hxb hxaxb
... < xa • r + xb • (f b)
: (add_lt_add_iff_right (xb • (f b))).mpr
(smul_lt_smul_of_pos as.2
(lt_of_le_of_ne hxa (ne.symm H)))
... ≤ xa • r + xb • r
: (add_le_add_iff_left (xa • r)).mpr
(smul_le_smul_of_nonneg (le_of_lt bs.2) hxb)
... = r
: by simp only [←add_smul, hxaxb, one_smul] }
end
lemma concave_on.convex_lt {γ : Type*} [ordered_cancel_add_comm_monoid γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : concave_on s f) (r : γ) : convex {x ∈ s | r < f x} :=
@convex_on.convex_lt _ _ _ _ (order_dual γ) _ _ _ f hf r
lemma convex_on.convex_epigraph {γ : Type*} [ordered_add_comm_group γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : convex_on s f) :
convex {p : E × γ | p.1 ∈ s ∧ f p.1 ≤ p.2} :=
begin
rintros ⟨x, r⟩ ⟨y, t⟩ ⟨hx, hr⟩ ⟨hy, ht⟩ a b ha hb hab,
refine ⟨hf.1 hx hy ha hb hab, _⟩,
calc f (a • x + b • y) ≤ a • f x + b • f y : hf.2 hx hy ha hb hab
... ≤ a • r + b • t : add_le_add (smul_le_smul_of_nonneg hr ha)
(smul_le_smul_of_nonneg ht hb)
end
lemma concave_on.convex_hypograph {γ : Type*} [ordered_add_comm_group γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} (hf : concave_on s f) :
convex {p : E × γ | p.1 ∈ s ∧ p.2 ≤ f p.1} :=
@convex_on.convex_epigraph _ _ _ _ (order_dual γ) _ _ _ f hf
lemma convex_on_iff_convex_epigraph {γ : Type*} [ordered_add_comm_group γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} :
convex_on s f ↔ convex {p : E × γ | p.1 ∈ s ∧ f p.1 ≤ p.2} :=
begin
refine ⟨convex_on.convex_epigraph, λ h, ⟨_, _⟩⟩,
{ assume x y hx hy a b ha hb hab,
exact (@h (x, f x) (y, f y) ⟨hx, le_refl _⟩ ⟨hy, le_refl _⟩ a b ha hb hab).1 },
{ assume x y hx hy a b ha hb hab,
exact (@h (x, f x) (y, f y) ⟨hx, le_refl _⟩ ⟨hy, le_refl _⟩ a b ha hb hab).2 }
end
lemma concave_on_iff_convex_hypograph {γ : Type*} [ordered_add_comm_group γ]
[semimodule ℝ γ] [ordered_semimodule ℝ γ]
{f : E → γ} :
concave_on s f ↔ convex {p : E × γ | p.1 ∈ s ∧ p.2 ≤ f p.1} :=
@convex_on_iff_convex_epigraph _ _ _ _ (order_dual γ) _ _ _ f
/-- If a function is convex on s, it remains convex when precomposed by an affine map -/
lemma convex_on.comp_affine_map {f : F → β} (g : E →ᵃ[ℝ] F) {s : set F}
(hf : convex_on s f) : convex_on (g ⁻¹' s) (f ∘ g) :=
begin
refine ⟨hf.1.affine_preimage _,_⟩,
intros x y xs ys a b ha hb hab,
calc
(f ∘ g) (a • x + b • y) = f (g (a • x + b • y)) : rfl
... = f (a • (g x) + b • (g y)) : by rw [convex.combo_affine_apply hab]
... ≤ a • f (g x) + b • f (g y) : hf.2 xs ys ha hb hab
... = a • (f ∘ g) x + b • (f ∘ g) y : rfl
end
/-- If a function is concave on s, it remains concave when precomposed by an affine map -/
lemma concave_on.comp_affine_map {f : F → β} (g : E →ᵃ[ℝ] F) {s : set F}
(hf : concave_on s f) : concave_on (g ⁻¹' s) (f ∘ g) :=
@convex_on.comp_affine_map _ _ _ _ _ _ (order_dual β) _ _ f g s hf
/-- If g is convex on s, so is (g ∘ f) on f ⁻¹' s for a linear f. -/
lemma convex_on.comp_linear_map {g : F → β} {s : set F} (hg : convex_on s g) (f : E →ₗ[ℝ] F) :
convex_on (f ⁻¹' s) (g ∘ f) :=
hg.comp_affine_map f.to_affine_map
/-- If g is concave on s, so is (g ∘ f) on f ⁻¹' s for a linear f. -/
lemma concave_on.comp_linear_map {g : F → β} {s : set F} (hg : concave_on s g) (f : E →ₗ[ℝ] F) :
concave_on (f ⁻¹' s) (g ∘ f) :=
hg.comp_affine_map f.to_affine_map
/-- If a function is convex on s, it remains convex after a translation. -/
lemma convex_on.translate_right {f : E → β} {s : set E} {a : E} (hf : convex_on s f) :
convex_on ((λ z, a + z) ⁻¹' s) (f ∘ (λ z, a + z)) :=
hf.comp_affine_map $ affine_map.const ℝ E a +ᵥ affine_map.id ℝ E
/-- If a function is concave on s, it remains concave after a translation. -/
lemma concave_on.translate_right {f : E → β} {s : set E} {a : E} (hf : concave_on s f) :
concave_on ((λ z, a + z) ⁻¹' s) (f ∘ (λ z, a + z)) :=
hf.comp_affine_map $ affine_map.const ℝ E a +ᵥ affine_map.id ℝ E
/-- If a function is convex on s, it remains convex after a translation. -/
lemma convex_on.translate_left {f : E → β} {s : set E} {a : E} (hf : convex_on s f) :
convex_on ((λ z, a + z) ⁻¹' s) (f ∘ (λ z, z + a)) :=
by simpa only [add_comm] using hf.translate_right
/-- If a function is concave on s, it remains concave after a translation. -/
lemma concave_on.translate_left {f : E → β} {s : set E} {a : E} (hf : concave_on s f) :
concave_on ((λ z, a + z) ⁻¹' s) (f ∘ (λ z, z + a)) :=
by simpa only [add_comm] using hf.translate_right
end functions
section center_mass
/-- Center mass of a finite collection of points with prescribed weights.
Note that we require neither `0 ≤ w i` nor `∑ w = 1`. -/
noncomputable def finset.center_mass (t : finset ι) (w : ι → ℝ) (z : ι → E) : E :=
(∑ i in t, w i)⁻¹ • (∑ i in t, w i • z i)
variables (i j : ι) (c : ℝ) (t : finset ι) (w : ι → ℝ) (z : ι → E)
open finset
lemma finset.center_mass_empty : (∅ : finset ι).center_mass w z = 0 :=
by simp only [center_mass, sum_empty, smul_zero]
lemma finset.center_mass_pair (hne : i ≠ j) :
({i, j} : finset ι).center_mass w z = (w i / (w i + w j)) • z i + (w j / (w i + w j)) • z j :=
by simp only [center_mass, sum_pair hne, smul_add, (mul_smul _ _ _).symm, div_eq_inv_mul]
variable {w}
lemma finset.center_mass_insert (ha : i ∉ t) (hw : ∑ j in t, w j ≠ 0) :
(insert i t).center_mass w z = (w i / (w i + ∑ j in t, w j)) • z i +
((∑ j in t, w j) / (w i + ∑ j in t, w j)) • t.center_mass w z :=
begin
simp only [center_mass, sum_insert ha, smul_add, (mul_smul _ _ _).symm],
congr' 2,
{ apply mul_comm },
{ rw [div_mul_eq_mul_div, mul_inv_cancel hw, one_div] }
end
lemma finset.center_mass_singleton (hw : w i ≠ 0) : ({i} : finset ι).center_mass w z = z i :=
by rw [center_mass, sum_singleton, sum_singleton, ← mul_smul, inv_mul_cancel hw, one_smul]
lemma finset.center_mass_eq_of_sum_1 (hw : ∑ i in t, w i = 1) :
t.center_mass w z = ∑ i in t, w i • z i :=
by simp only [finset.center_mass, hw, inv_one, one_smul]
lemma finset.center_mass_smul : t.center_mass w (λ i, c • z i) = c • t.center_mass w z :=
by simp only [finset.center_mass, finset.smul_sum, (mul_smul _ _ _).symm, mul_comm c, mul_assoc]
/-- A convex combination of two centers of mass is a center of mass as well. This version
deals with two different index types. -/
lemma finset.center_mass_segment'
(s : finset ι) (t : finset ι') (ws : ι → ℝ) (zs : ι → E) (wt : ι' → ℝ) (zt : ι' → E)
(hws : ∑ i in s, ws i = 1) (hwt : ∑ i in t, wt i = 1) (a b : ℝ) (hab : a + b = 1):
a • s.center_mass ws zs + b • t.center_mass wt zt =
(s.image sum.inl ∪ t.image sum.inr).center_mass
(sum.elim (λ i, a * ws i) (λ j, b * wt j))
(sum.elim zs zt) :=
begin
rw [s.center_mass_eq_of_sum_1 _ hws, t.center_mass_eq_of_sum_1 _ hwt,
smul_sum, smul_sum, ← finset.sum_sum_elim, finset.center_mass_eq_of_sum_1],
{ congr' with ⟨⟩; simp only [sum.elim_inl, sum.elim_inr, mul_smul] },
{ rw [sum_sum_elim, ← mul_sum, ← mul_sum, hws, hwt, mul_one, mul_one, hab] }
end
/-- A convex combination of two centers of mass is a center of mass as well. This version
works if two centers of mass share the set of original points. -/
lemma finset.center_mass_segment
(s : finset ι) (w₁ w₂ : ι → ℝ) (z : ι → E)
(hw₁ : ∑ i in s, w₁ i = 1) (hw₂ : ∑ i in s, w₂ i = 1) (a b : ℝ) (hab : a + b = 1):
a • s.center_mass w₁ z + b • s.center_mass w₂ z =
s.center_mass (λ i, a * w₁ i + b * w₂ i) z :=
have hw : ∑ i in s, (a * w₁ i + b * w₂ i) = 1,
by simp only [mul_sum.symm, sum_add_distrib, mul_one, *],
by simp only [finset.center_mass_eq_of_sum_1, smul_sum, sum_add_distrib, add_smul, mul_smul, *]
lemma finset.center_mass_ite_eq (hi : i ∈ t) :
t.center_mass (λ j, if (i = j) then 1 else 0) z = z i :=
begin
rw [finset.center_mass_eq_of_sum_1],
transitivity ∑ j in t, if (i = j) then z i else 0,
{ congr' with i, split_ifs, exacts [h ▸ one_smul _ _, zero_smul _ _] },
{ rw [sum_ite_eq, if_pos hi] },
{ rw [sum_ite_eq, if_pos hi] }
end
variables {t w}
lemma finset.center_mass_subset {t' : finset ι} (ht : t ⊆ t')
(h : ∀ i ∈ t', i ∉ t → w i = 0) :
t.center_mass w z = t'.center_mass w z :=
begin
rw [center_mass, sum_subset ht h, smul_sum, center_mass, smul_sum],
apply sum_subset ht,
assume i hit' hit,
rw [h i hit' hit, zero_smul, smul_zero]
end
lemma finset.center_mass_filter_ne_zero :
(t.filter (λ i, w i ≠ 0)).center_mass w z = t.center_mass w z :=
finset.center_mass_subset z (filter_subset _ _) $ λ i hit hit',
by simpa only [hit, mem_filter, true_and, ne.def, not_not] using hit'
variable {z}
/-- Center mass of a finite subset of a convex set belongs to the set
provided that all weights are non-negative, and the total weight is positive. -/
lemma convex.center_mass_mem (hs : convex s) :
(∀ i ∈ t, 0 ≤ w i) → (0 < ∑ i in t, w i) → (∀ i ∈ t, z i ∈ s) → t.center_mass w z ∈ s :=
begin
induction t using finset.induction with i t hi ht, { simp [lt_irrefl] },
intros h₀ hpos hmem,
have zi : z i ∈ s, from hmem _ (mem_insert_self _ _),
have hs₀ : ∀ j ∈ t, 0 ≤ w j, from λ j hj, h₀ j $ mem_insert_of_mem hj,
rw [sum_insert hi] at hpos,
by_cases hsum_t : ∑ j in t, w j = 0,
{ have ws : ∀ j ∈ t, w j = 0, from (sum_eq_zero_iff_of_nonneg hs₀).1 hsum_t,
have wz : ∑ j in t, w j • z j = 0, from sum_eq_zero (λ i hi, by simp [ws i hi]),
simp only [center_mass, sum_insert hi, wz, hsum_t, add_zero],
simp only [hsum_t, add_zero] at hpos,
rw [← mul_smul, inv_mul_cancel (ne_of_gt hpos), one_smul],
exact zi },
{ rw [finset.center_mass_insert _ _ _ hi hsum_t],
refine convex_iff_div.1 hs zi (ht hs₀ _ _) _ (sum_nonneg hs₀) hpos,
{ exact lt_of_le_of_ne (sum_nonneg hs₀) (ne.symm hsum_t) },
{ intros j hj, exact hmem j (mem_insert_of_mem hj) },
{ exact h₀ _ (mem_insert_self _ _) } }
end
lemma convex.sum_mem (hs : convex s) (h₀ : ∀ i ∈ t, 0 ≤ w i) (h₁ : ∑ i in t, w i = 1)
(hz : ∀ i ∈ t, z i ∈ s) :
∑ i in t, w i • z i ∈ s :=
by simpa only [h₁, center_mass, inv_one, one_smul] using
hs.center_mass_mem h₀ (h₁.symm ▸ zero_lt_one) hz
lemma convex_iff_sum_mem :
convex s ↔
(∀ (t : finset E) (w : E → ℝ),
(∀ i ∈ t, 0 ≤ w i) → ∑ i in t, w i = 1 → (∀ x ∈ t, x ∈ s) → ∑ x in t, w x • x ∈ s ) :=
begin
refine ⟨λ hs t w hw₀ hw₁ hts, hs.sum_mem hw₀ hw₁ hts, _⟩,
intros h x y hx hy a b ha hb hab,
by_cases h_cases: x = y,
{ rw [h_cases, ←add_smul, hab, one_smul], exact hy },
{ convert h {x, y} (λ z, if z = y then b else a) _ _ _,
{ simp only [sum_pair h_cases, if_neg h_cases, if_pos rfl] },
{ simp_intros i hi,
cases hi; subst i; simp [ha, hb, if_neg h_cases] },
{ simp only [sum_pair h_cases, if_neg h_cases, if_pos rfl, hab] },
{ simp_intros i hi,
cases hi; subst i; simp [hx, hy, if_neg h_cases] } }
end
/-- Jensen's inequality, `finset.center_mass` version. -/
lemma convex_on.map_center_mass_le {f : E → ℝ} (hf : convex_on s f)
(h₀ : ∀ i ∈ t, 0 ≤ w i) (hpos : 0 < ∑ i in t, w i)
(hmem : ∀ i ∈ t, z i ∈ s) : f (t.center_mass w z) ≤ t.center_mass w (f ∘ z) :=
begin
have hmem' : ∀ i ∈ t, (z i, (f ∘ z) i) ∈ {p : E × ℝ | p.1 ∈ s ∧ f p.1 ≤ p.2},
from λ i hi, ⟨hmem i hi, le_refl _⟩,
convert (hf.convex_epigraph.center_mass_mem h₀ hpos hmem').2;
simp only [center_mass, function.comp, prod.smul_fst, prod.fst_sum, prod.smul_snd, prod.snd_sum]
end
/-- Jensen's inequality, `finset.sum` version. -/
lemma convex_on.map_sum_le {f : E → ℝ} (hf : convex_on s f)
(h₀ : ∀ i ∈ t, 0 ≤ w i) (h₁ : ∑ i in t, w i = 1)
(hmem : ∀ i ∈ t, z i ∈ s) : f (∑ i in t, w i • z i) ≤ ∑ i in t, w i * (f (z i)) :=
by simpa only [center_mass, h₁, inv_one, one_smul]
using hf.map_center_mass_le h₀ (h₁.symm ▸ zero_lt_one) hmem
/-- If a function `f` is convex on `s` takes value `y` at the center mass of some points
`z i ∈ s`, then for some `i` we have `y ≤ f (z i)`. -/
lemma convex_on.exists_ge_of_center_mass {f : E → ℝ} (h : convex_on s f)
(hw₀ : ∀ i ∈ t, 0 ≤ w i) (hws : 0 < ∑ i in t, w i) (hz : ∀ i ∈ t, z i ∈ s) :
∃ i ∈ t, f (t.center_mass w z) ≤ f (z i) :=
begin
set y := t.center_mass w z,
have : f y ≤ t.center_mass w (f ∘ z) := h.map_center_mass_le hw₀ hws hz,
rw ← sum_filter_ne_zero at hws,
rw [← finset.center_mass_filter_ne_zero (f ∘ z), center_mass, smul_eq_mul,
← div_eq_inv_mul, le_div_iff hws, mul_sum] at this,
replace : ∃ i ∈ t.filter (λ i, w i ≠ 0), f y * w i ≤ w i • (f ∘ z) i :=
exists_le_of_sum_le (nonempty_of_sum_ne_zero (ne_of_gt hws)) this,
rcases this with ⟨i, hi, H⟩,
rw [mem_filter] at hi,
use [i, hi.1],
simp only [smul_eq_mul, mul_comm (w i)] at H,
refine (mul_le_mul_right _).1 H,
exact lt_of_le_of_ne (hw₀ i hi.1) hi.2.symm
end
end center_mass
section convex_hull
variable {t : set E}
/-- Convex hull of a set `s` is the minimal convex set that includes `s` -/
def convex_hull (s : set E) : set E :=
⋂ (t : set E) (hst : s ⊆ t) (ht : convex t), t
variable (s)
lemma subset_convex_hull : s ⊆ convex_hull s :=
set.subset_Inter $ λ t, set.subset_Inter $ λ hst, set.subset_Inter $ λ ht, hst
lemma convex_convex_hull : convex (convex_hull s) :=
convex_Inter $ λ t, convex_Inter $ λ ht, convex_Inter id
variable {s}
lemma convex_hull_min (hst : s ⊆ t) (ht : convex t) : convex_hull s ⊆ t :=
set.Inter_subset_of_subset t $ set.Inter_subset_of_subset hst $ set.Inter_subset _ ht
lemma convex_hull_mono (hst : s ⊆ t) : convex_hull s ⊆ convex_hull t :=
convex_hull_min (set.subset.trans hst $ subset_convex_hull t) (convex_convex_hull t)
lemma convex.convex_hull_eq {s : set E} (hs : convex s) : convex_hull s = s :=
set.subset.antisymm (convex_hull_min (set.subset.refl _) hs) (subset_convex_hull s)
@[simp]
lemma convex_hull_singleton {x : E} : convex_hull ({x} : set E) = {x} :=
(convex_singleton x).convex_hull_eq
lemma is_linear_map.image_convex_hull {f : E → F} (hf : is_linear_map ℝ f) :
f '' (convex_hull s) = convex_hull (f '' s) :=
begin
refine set.subset.antisymm _ _,
{ rw [set.image_subset_iff],
exact convex_hull_min (set.image_subset_iff.1 $ subset_convex_hull $ f '' s)
((convex_convex_hull (f '' s)).is_linear_preimage hf) },
{ exact convex_hull_min (set.image_subset _ $ subset_convex_hull s)
((convex_convex_hull s).is_linear_image hf) }
end
lemma linear_map.image_convex_hull (f : E →ₗ[ℝ] F) :
f '' (convex_hull s) = convex_hull (f '' s) :=
f.is_linear.image_convex_hull
lemma finset.center_mass_mem_convex_hull (t : finset ι) {w : ι → ℝ} (hw₀ : ∀ i ∈ t, 0 ≤ w i)
(hws : 0 < ∑ i in t, w i) {z : ι → E} (hz : ∀ i ∈ t, z i ∈ s) :
t.center_mass w z ∈ convex_hull s :=
(convex_convex_hull s).center_mass_mem hw₀ hws (λ i hi, subset_convex_hull s $ hz i hi)
-- TODO : Do we need other versions of the next lemma?
/-- Convex hull of `s` is equal to the set of all centers of masses of `finset`s `t`, `z '' t ⊆ s`.
This version allows finsets in any type in any universe. -/
lemma convex_hull_eq (s : set E) :
convex_hull s = {x : E | ∃ (ι : Type u') (t : finset ι) (w : ι → ℝ) (z : ι → E)
(hw₀ : ∀ i ∈ t, 0 ≤ w i) (hw₁ : ∑ i in t, w i = 1) (hz : ∀ i ∈ t, z i ∈ s) , t.center_mass w z = x} :=
begin
refine subset.antisymm (convex_hull_min _ _) _,
{ intros x hx,
use [punit, {punit.star}, λ _, 1, λ _, x, λ _ _, zero_le_one,
finset.sum_singleton, λ _ _, hx],
simp only [finset.center_mass, finset.sum_singleton, inv_one, one_smul] },
{ rintros x y ⟨ι, sx, wx, zx, hwx₀, hwx₁, hzx, rfl⟩ ⟨ι', sy, wy, zy, hwy₀, hwy₁, hzy, rfl⟩
a b ha hb hab,
rw [finset.center_mass_segment' _ _ _ _ _ _ hwx₁ hwy₁ _ _ hab],
refine ⟨_, _, _, _, _, _, _, rfl⟩,
{ rintros i hi,
rw [finset.mem_union, finset.mem_image, finset.mem_image] at hi,
rcases hi with ⟨j, hj, rfl⟩|⟨j, hj, rfl⟩;
simp only [sum.elim_inl, sum.elim_inr];
apply_rules [mul_nonneg, hwx₀, hwy₀] },
{ simp [finset.sum_sum_elim, finset.mul_sum.symm, *] },
{ intros i hi,
rw [finset.mem_union, finset.mem_image, finset.mem_image] at hi,
rcases hi with ⟨j, hj, rfl⟩|⟨j, hj, rfl⟩;
simp only [sum.elim_inl, sum.elim_inr]; apply_rules [hzx, hzy] } },
{ rintros _ ⟨ι, t, w, z, hw₀, hw₁, hz, rfl⟩,
exact t.center_mass_mem_convex_hull hw₀ (hw₁.symm ▸ zero_lt_one) hz }
end
/-- Maximum principle for convex functions. If a function `f` is convex on the convex hull of `s`,
then `f` can't have a maximum on `convex_hull s` outside of `s`. -/
lemma convex_on.exists_ge_of_mem_convex_hull {f : E → ℝ} (hf : convex_on (convex_hull s) f)
{x} (hx : x ∈ convex_hull s) : ∃ y ∈ s, f x ≤ f y :=
begin
rw convex_hull_eq at hx,
rcases hx with ⟨α, t, w, z, hw₀, hw₁, hz, rfl⟩,
rcases hf.exists_ge_of_center_mass hw₀ (hw₁.symm ▸ zero_lt_one)
(λ i hi, subset_convex_hull s (hz i hi)) with ⟨i, hit, Hi⟩,
exact ⟨z i, hz i hit, Hi⟩
end
lemma finset.convex_hull_eq (s : finset E) :
convex_hull ↑s = {x : E | ∃ (w : E → ℝ) (hw₀ : ∀ y ∈ s, 0 ≤ w y) (hw₁ : ∑ y in s, w y = 1),
s.center_mass w id = x} :=
begin
refine subset.antisymm (convex_hull_min _ _) _,
{ intros x hx,
rw [finset.mem_coe] at hx,
refine ⟨_, _, _, finset.center_mass_ite_eq _ _ _ hx⟩,
{ intros, split_ifs, exacts [zero_le_one, le_refl 0] },
{ rw [finset.sum_ite_eq, if_pos hx] } },
{ rintros x y ⟨wx, hwx₀, hwx₁, rfl⟩ ⟨wy, hwy₀, hwy₁, rfl⟩
a b ha hb hab,
rw [finset.center_mass_segment _ _ _ _ hwx₁ hwy₁ _ _ hab],
refine ⟨_, _, _, rfl⟩,
{ rintros i hi,
apply_rules [add_nonneg, mul_nonneg, hwx₀, hwy₀], },
{ simp only [finset.sum_add_distrib, finset.mul_sum.symm, mul_one, *] } },
{ rintros _ ⟨w, hw₀, hw₁, rfl⟩,
exact s.center_mass_mem_convex_hull (λ x hx, hw₀ _ hx)
(hw₁.symm ▸ zero_lt_one) (λ x hx, hx) }
end
lemma set.finite.convex_hull_eq {s : set E} (hs : finite s) :
convex_hull s = {x : E | ∃ (w : E → ℝ) (hw₀ : ∀ y ∈ s, 0 ≤ w y) (hw₁ : ∑ y in hs.to_finset, w y = 1),
hs.to_finset.center_mass w id = x} :=
by simpa only [set.finite.coe_to_finset, set.finite.mem_to_finset, exists_prop]
using hs.to_finset.convex_hull_eq
lemma convex_hull_eq_union_convex_hull_finite_subsets (s : set E) :
convex_hull s = ⋃ (t : finset E) (w : ↑t ⊆ s), convex_hull ↑t :=
begin
refine subset.antisymm _ _,
{ rw [convex_hull_eq.{u}],
rintros x ⟨ι, t, w, z, hw₀, hw₁, hz, rfl⟩,
simp only [mem_Union],
refine ⟨t.image z, _, _⟩,
{ rw [finset.coe_image, image_subset_iff],
exact hz },
{ apply t.center_mass_mem_convex_hull hw₀,
{ simp only [hw₁, zero_lt_one] },
{ exact λ i hi, finset.mem_coe.2 (finset.mem_image_of_mem _ hi) } } },
{ exact Union_subset (λ i, Union_subset convex_hull_mono), },
end
lemma is_linear_map.convex_hull_image {f : E → F} (hf : is_linear_map ℝ f) (s : set E) :
convex_hull (f '' s) = f '' convex_hull s :=
set.subset.antisymm (convex_hull_min (image_subset _ (subset_convex_hull s)) $
(convex_convex_hull s).is_linear_image hf)
(image_subset_iff.2 $ convex_hull_min
(image_subset_iff.1 $ subset_convex_hull _)
((convex_convex_hull _).is_linear_preimage hf))
lemma linear_map.convex_hull_image (f : E →ₗ[ℝ] F) (s : set E) :
convex_hull (f '' s) = f '' convex_hull s :=
f.is_linear.convex_hull_image s
end convex_hull
/-! ### Simplex -/
section simplex
variables (ι) [fintype ι] {f : ι → ℝ}
/-- Standard simplex in the space of functions `ι → ℝ` is the set
of vectors with non-negative coordinates with total sum `1`. -/
def std_simplex (ι : Type*) [fintype ι] : set (ι → ℝ) :=
{ f | (∀ x, 0 ≤ f x) ∧ ∑ x, f x = 1 }
lemma std_simplex_eq_inter :
std_simplex ι = (⋂ x, {f | 0 ≤ f x}) ∩ {f | ∑ x, f x = 1} :=
by { ext f, simp only [std_simplex, set.mem_inter_eq, set.mem_Inter, set.mem_set_of_eq] }
lemma convex_std_simplex : convex (std_simplex ι) :=
begin
refine λ f g hf hg a b ha hb hab, ⟨λ x, _, _⟩,
{ apply_rules [add_nonneg, mul_nonneg, hf.1, hg.1] },
{ erw [finset.sum_add_distrib, ← finset.smul_sum, ← finset.smul_sum, hf.2, hg.2,
smul_eq_mul, smul_eq_mul, mul_one, mul_one],
exact hab }
end
variable {ι}
lemma ite_eq_mem_std_simplex (i : ι) : (λ j, ite (i = j) (1:ℝ) 0) ∈ std_simplex ι :=
⟨λ j, by simp only; split_ifs; norm_num, by rw [finset.sum_ite_eq, if_pos (finset.mem_univ _)] ⟩
/-- `std_simplex ι` is the convex hull of the canonical basis in `ι → ℝ`. -/
lemma convex_hull_basis_eq_std_simplex :
convex_hull (range $ λ(i j:ι), if i = j then (1:ℝ) else 0) = std_simplex ι :=
begin
refine subset.antisymm (convex_hull_min _ (convex_std_simplex ι)) _,
{ rintros _ ⟨i, rfl⟩,
exact ite_eq_mem_std_simplex i },
{ rintros w ⟨hw₀, hw₁⟩,
rw [pi_eq_sum_univ w, ← finset.univ.center_mass_eq_of_sum_1 _ hw₁],
exact finset.univ.center_mass_mem_convex_hull (λ i hi, hw₀ i)
(hw₁.symm ▸ zero_lt_one) (λ i hi, mem_range_self i) }
end
variable {ι}
/-- Convex hull of a finite set is the image of the standard simplex in `s → ℝ`
under the linear map sending each function `w` to `∑ x in s, w x • x`.
Since we have no sums over finite sets, we use sum over `@finset.univ _ hs.fintype`.
The map is defined in terms of operations on `(s → ℝ) →ₗ[ℝ] ℝ` so that later we will not need
to prove that this map is linear. -/
lemma set.finite.convex_hull_eq_image {s : set E} (hs : finite s) :
convex_hull s = by haveI := hs.fintype; exact
(⇑(∑ x : s, (@linear_map.proj ℝ s _ (λ i, ℝ) _ _ x).smul_right x.1)) '' (std_simplex s) :=
begin
rw [← convex_hull_basis_eq_std_simplex, ← linear_map.convex_hull_image, ← set.range_comp, (∘)],
apply congr_arg,
convert subtype.range_coe.symm,
ext x,
simp [linear_map.sum_apply, ite_smul, finset.filter_eq]
end
/-- All values of a function `f ∈ std_simplex ι` belong to `[0, 1]`. -/
lemma mem_Icc_of_mem_std_simplex (hf : f ∈ std_simplex ι) (x) :
f x ∈ I :=
⟨hf.1 x, hf.2 ▸ finset.single_le_sum (λ y hy, hf.1 y) (finset.mem_univ x)⟩
end simplex
|
94f36aa42e4335d73e8fac7b214ed7808d38e8c5
|
c7e0fa0422da099d4b66b195c60455d87facee0d
|
/polynomial.lean
|
0e3be9eab4a7dbb518117a1df46532e2c351a98c
|
[] |
no_license
|
viljamivirolainen/linear
|
12d725a90a44ae2760f59e9a9d9dd12aae36300a
|
dcf28df05e06da1b4fc76bce61b8fa0741300dc8
|
refs/heads/master
| 1,678,895,327,362
| 1,516,045,398,000
| 1,516,045,398,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 905
|
lean
|
import .seq .vector_space .subspace .function
def fin.max {n} : fin (n + 1) :=
⟨n, nat.le_refl _⟩
def monoid.pow {R} [monoid R] (r : R) : ℕ → R :=
nat.rec 1 (λ _, (*) r)
notation x `^` n := monoid.pow x n
-- A function R → R is a polynomial of degree...
inductive is_polynomial_of_degree {R} [ring R] : (R → R) → ℤ → Prop
| intro_zero :
is_polynomial_of_degree (λ _, 0) (-1)
| intro_sum {n} (a : seq R (n + 1)) :
a fin.max ≠ 0 → is_polynomial_of_degree (λ x, seq.sum (λ i, a i * x ^ i.val)) n
def P (R) [ring R] : set (R → R) :=
λ f, ∃ n, is_polynomial_of_degree f n
def P_deg_le (n) (R) [ring R] : set (R → R) :=
λ f, ∃ m ≤ n, is_polynomial_of_degree f m
open is_polynomial_of_degree
instance P_is_subspace {F} [f : field F] : @subspace (F → F) F _ _ (P F) :=
{ contains_zero := ⟨-1, intro_zero⟩,
add_closed := sorry,
mul_closed := sorry }
|
db21885833d4e63d7ca02ddced32040f11a288db
|
0e9e4fa61bad11733d146f25076045b2ac9f9fbe
|
/library/init/meta/simp_tactic.lean
|
2d45bb1763967abe01abd27e71bcbb5dcf297acb
|
[
"Apache-2.0"
] |
permissive
|
Achierius/lean
|
3d9cd6633ebee07987d062c9921b77d90a971343
|
6b8a91e5a76ed78eb5c075cecb0a991ee97a594f
|
refs/heads/master
| 1,676,410,367,196
| 1,610,127,822,000
| 1,610,127,822,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 27,811
|
lean
|
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import init.meta.tactic init.meta.attribute init.meta.constructor_tactic
import init.meta.relation_tactics init.meta.occurrences
import init.data.option.basic
open tactic
def simp.default_max_steps := 10000000
/-- Prefix the given `attr_name` with `"simp_attr"`. -/
meta constant mk_simp_attr_decl_name (attr_name : name) : name
/-- Simp lemmas are used by the "simplifier" family of tactics.
`simp_lemmas` is essentially a pair of tables `rb_map (expr_type × name) (priority_list simp_lemma)`.
One of the tables is for congruences and one is for everything else.
An individual simp lemma is:
- A kind which can be `Refl`, `Simp` or `Congr`.
- A pair of `expr`s `l ~> r`. The rb map is indexed by the name of `get_app_fn(l)`.
- A proof that `l = r` or `l ↔ r`.
- A list of the metavariables that must be filled before the proof can be applied.
- A priority number
-/
meta constant simp_lemmas : Type
/-- Make a new table of simp lemmas -/
meta constant simp_lemmas.mk : simp_lemmas
/-- Merge the simp_lemma tables. -/
meta constant simp_lemmas.join : simp_lemmas → simp_lemmas → simp_lemmas
/-- Remove the given lemmas from the table. Use the names of the lemmas. -/
meta constant simp_lemmas.erase : simp_lemmas → list name → simp_lemmas
/-- Makes the default simp_lemmas table which is composed of all lemmas tagged with `simp`. -/
meta constant simp_lemmas.mk_default : tactic simp_lemmas
/-- Add a simplification lemma by an expression `p`. Some conditions on `p` must hold for it to be added, see list below.
If your lemma is not being added, you can see the reasons by setting `set_option trace.simp_lemmas true`.
- `p` must have the type `Π (h₁ : _) ... (hₙ : _), LHS ~ RHS` for some reflexive, transitive relation (usually `=`).
- Any of the hypotheses `hᵢ` should either be present in `LHS` or otherwise a `Prop` or a typeclass instance.
- `LHS` should not occur within `RHS`.
- `LHS` should not occur within a hypothesis `hᵢ`.
-/
meta constant simp_lemmas.add (s : simp_lemmas) (e : expr) (symm : bool := false) : tactic simp_lemmas
/-- Add a simplification lemma by it's declaration name. See `simp_lemmas.add` for more information.-/
meta constant simp_lemmas.add_simp (s : simp_lemmas) (id : name) (symm : bool := false) : tactic simp_lemmas
/-- Adds a congruence simp lemma to simp_lemmas.
A congruence simp lemma is a lemma that breaks the simplification down into separate problems.
For example, to simplify `a ∧ b` to `c ∧ d`, we should try to simp `a` to `c` and `b` to `d`.
For examples of congruence simp lemmas look for lemmas with the `@[congr]` attribute.
```lean
lemma if_simp_congr ... (h_c : b ↔ c) (h_t : x = u) (h_e : y = v) : ite b x y = ite c u v := ...
lemma imp_congr_right (h : a → (b ↔ c)) : (a → b) ↔ (a → c) := ...
lemma and_congr (h₁ : a ↔ c) (h₂ : b ↔ d) : (a ∧ b) ↔ (c ∧ d) := ...
```
-/
meta constant simp_lemmas.add_congr : simp_lemmas → name → tactic simp_lemmas
/-- Add expressions to a set of simp lemmas using `simp_lemmas.add`.
This is the new version of `simp_lemmas.append`,
which also allows you to set the `symm` flag.
-/
meta def simp_lemmas.append_with_symm (s : simp_lemmas) (hs : list (expr × bool)) :
tactic simp_lemmas :=
hs.mfoldl (λ s h, simp_lemmas.add s h.fst h.snd) s
/-- Add expressions to a set of simp lemmas using `simp_lemmas.add`.
This is the backwards-compatibility version of `simp_lemmas.append_with_symm`,
and sets all `symm` flags to `ff`.
-/
meta def simp_lemmas.append (s : simp_lemmas) (hs : list expr) : tactic simp_lemmas :=
hs.mfoldl (λ s h, simp_lemmas.add s h ff) s
/-- `simp_lemmas.rewrite s e prove R` apply a simplification lemma from 's'
- 'e' is the expression to be "simplified"
- 'prove' is used to discharge proof obligations.
- 'r' is the equivalence relation being used (e.g., 'eq', 'iff')
- 'md' is the transparency; how aggresively should the simplifier perform reductions.
Result (new_e, pr) is the new expression 'new_e' and a proof (pr : e R new_e) -/
meta constant simp_lemmas.rewrite (s : simp_lemmas) (e : expr)
(prove : tactic unit := failed) (r : name := `eq) (md := reducible)
: tactic (expr × expr)
meta constant simp_lemmas.rewrites (s : simp_lemmas) (e : expr)
(prove : tactic unit := failed) (r : name := `eq) (md := reducible)
: tactic $ list (expr × expr)
/-- `simp_lemmas.drewrite s e` tries to rewrite 'e' using only refl lemmas in 's' -/
meta constant simp_lemmas.drewrite (s : simp_lemmas) (e : expr) (md := reducible) : tactic expr
meta constant is_valid_simp_lemma_cnst : name → tactic bool
meta constant is_valid_simp_lemma : expr → tactic bool
meta constant simp_lemmas.pp : simp_lemmas → tactic format
meta instance : has_to_tactic_format simp_lemmas :=
⟨simp_lemmas.pp⟩
namespace tactic
/- Remark: `transform` should not change the target. -/
/-- Revert a local constant, change its type using `transform`. -/
meta def revert_and_transform (transform : expr → tactic expr) (h : expr) : tactic unit :=
do num_reverted : ℕ ← revert h,
t ← target,
match t with
| expr.pi n bi d b :=
do h_simp ← transform d,
unsafe_change $ expr.pi n bi h_simp b
| expr.elet n g e f :=
do h_simp ← transform g,
unsafe_change $ expr.elet n h_simp e f
| _ := fail "reverting hypothesis created neither a pi nor an elet expr (unreachable?)"
end,
intron num_reverted
/-- `get_eqn_lemmas_for deps d` returns the automatically generated equational lemmas for definition d.
If deps is tt, then lemmas for automatically generated auxiliary declarations used to define d are also included. -/
meta def get_eqn_lemmas_for (deps : bool) (d : name) : tactic (list name) := do
env ← get_env,
pure $ if deps then env.get_ext_eqn_lemmas_for d else env.get_eqn_lemmas_for d
structure dsimp_config :=
(md := reducible) -- reduction mode: how aggressively constants are replaced with their definitions.
(max_steps : nat := simp.default_max_steps) -- The maximum number of steps allowed before failing.
(canonize_instances : bool := tt) -- See the documentation in `src/library/defeq_canonizer.h`
(single_pass : bool := ff) -- Visit each subterm no more than once.
(fail_if_unchanged := tt) -- Don't throw if dsimp didn't do anything.
(eta := tt) -- allow eta-equivalence: `(λ x, F $ x) ↝ F`
(zeta : bool := tt) -- do zeta-reductions: `let x : a := b in c ↝ c[x/b]`.
(beta : bool := tt) -- do beta-reductions: `(λ x, E) $ (y) ↝ E[x/y]`.
(proj : bool := tt) -- reduce projections: `⟨a,b⟩.1 ↝ a`.
(iota : bool := tt) -- reduce recursors for inductive datatypes: eg `nat.rec_on (succ n) Z R ↝ R n $ nat.rec_on n Z R`
(unfold_reducible := ff) -- if tt, definitions with `reducible` transparency will be unfolded (delta-reduced)
(memoize := tt) -- Perform caching of dsimps of subterms.
end tactic
/-- (Definitional) Simplify the given expression using *only* reflexivity equality lemmas from the given set of lemmas.
The resulting expression is definitionally equal to the input.
The list `u` contains defintions to be delta-reduced, and projections to be reduced.-/
meta constant simp_lemmas.dsimplify (s : simp_lemmas) (u : list name := []) (e : expr) (cfg : tactic.dsimp_config := {}) : tactic expr
namespace tactic
/- Remark: the configuration parameters `cfg.md` and `cfg.eta` are ignored by this tactic. -/
meta constant dsimplify_core
/- The user state type. -/
{α : Type}
/- Initial user data -/
(a : α)
/- (pre a e) is invoked before visiting the children of subterm 'e',
if it succeeds the result (new_a, new_e, flag) where
- 'new_a' is the new value for the user data
- 'new_e' is a new expression that must be definitionally equal to 'e',
- 'flag' if tt 'new_e' children should be visited, and 'post' invoked. -/
(pre : α → expr → tactic (α × expr × bool))
/- (post a e) is invoked after visiting the children of subterm 'e',
The output is similar to (pre a e), but the 'flag' indicates whether
the new expression should be revisited or not. -/
(post : α → expr → tactic (α × expr × bool))
(e : expr)
(cfg : dsimp_config := {})
: tactic (α × expr)
meta def dsimplify
(pre : expr → tactic (expr × bool))
(post : expr → tactic (expr × bool))
: expr → tactic expr :=
λ e, do (a, new_e) ← dsimplify_core ()
(λ u e, do r ← pre e, return (u, r))
(λ u e, do r ← post e, return (u, r)) e,
return new_e
meta def get_simp_lemmas_or_default : option simp_lemmas → tactic simp_lemmas
| none := simp_lemmas.mk_default
| (some s) := return s
meta def dsimp_target (s : option simp_lemmas := none) (u : list name := []) (cfg : dsimp_config := {}) : tactic unit :=
do
s ← get_simp_lemmas_or_default s,
t ← target >>= instantiate_mvars,
s.dsimplify u t cfg >>= unsafe_change
meta def dsimp_hyp (h : expr) (s : option simp_lemmas := none) (u : list name := []) (cfg : dsimp_config := {}) : tactic unit :=
do s ← get_simp_lemmas_or_default s, revert_and_transform (λ e, s.dsimplify u e cfg) h
/- Remark: we use transparency.instances by default to make sure that we
can unfold projections of type classes. Example:
(@has_add.add nat nat.has_add a b)
-/
/-- Tries to unfold `e` if it is a constant or a constant application.
Remark: this is not a recursive procedure. -/
meta constant dunfold_head (e : expr) (md := transparency.instances) : tactic expr
structure dunfold_config extends dsimp_config :=
(md := transparency.instances)
/- Remark: in principle, dunfold can be implemented on top of dsimp. We don't do it for
performance reasons. -/
meta constant dunfold (cs : list name) (e : expr) (cfg : dunfold_config := {}) : tactic expr
meta def dunfold_target (cs : list name) (cfg : dunfold_config := {}) : tactic unit :=
do t ← target, dunfold cs t cfg >>= unsafe_change
meta def dunfold_hyp (cs : list name) (h : expr) (cfg : dunfold_config := {}) : tactic unit :=
revert_and_transform (λ e, dunfold cs e cfg) h
structure delta_config :=
(max_steps := simp.default_max_steps)
(visit_instances := tt)
private meta def is_delta_target (e : expr) (cs : list name) : bool :=
cs.any (λ c,
if e.is_app_of c then tt /- Exact match -/
else let f := e.get_app_fn in
/- f is an auxiliary constant generated when compiling c -/
f.is_constant && f.const_name.is_internal && (f.const_name.get_prefix = c))
/-- Delta reduce the given constant names -/
meta def delta (cs : list name) (e : expr) (cfg : delta_config := {}) : tactic expr :=
let unfold (u : unit) (e : expr) : tactic (unit × expr × bool) := do
guard (is_delta_target e cs),
(expr.const f_name f_lvls) ← return e.get_app_fn,
env ← get_env,
decl ← env.get f_name,
new_f ← decl.instantiate_value_univ_params f_lvls,
new_e ← head_beta (expr.mk_app new_f e.get_app_args),
return (u, new_e, tt)
in do (c, new_e) ← dsimplify_core () (λ c e, failed) unfold e {max_steps := cfg.max_steps, canonize_instances := cfg.visit_instances},
return new_e
meta def delta_target (cs : list name) (cfg : delta_config := {}) : tactic unit :=
do t ← target, delta cs t cfg >>= unsafe_change
meta def delta_hyp (cs : list name) (h : expr) (cfg : delta_config := {}) :tactic unit :=
revert_and_transform (λ e, delta cs e cfg) h
structure unfold_proj_config extends dsimp_config :=
(md := transparency.instances)
/-- If `e` is a projection application, try to unfold it, otherwise fail. -/
meta constant unfold_proj (e : expr) (md := transparency.instances) : tactic expr
meta def unfold_projs (e : expr) (cfg : unfold_proj_config := {}) : tactic expr :=
let unfold (changed : bool) (e : expr) : tactic (bool × expr × bool) := do
new_e ← unfold_proj e cfg.md,
return (tt, new_e, tt)
in do (tt, new_e) ← dsimplify_core ff (λ c e, failed) unfold e cfg.to_dsimp_config | fail "no projections to unfold",
return new_e
meta def unfold_projs_target (cfg : unfold_proj_config := {}) : tactic unit :=
do t ← target, unfold_projs t cfg >>= unsafe_change
meta def unfold_projs_hyp (h : expr) (cfg : unfold_proj_config := {}) : tactic unit :=
revert_and_transform (λ e, unfold_projs e cfg) h
structure simp_config :=
(max_steps : nat := simp.default_max_steps)
(contextual : bool := ff)
(lift_eq : bool := tt)
(canonize_instances : bool := tt)
(canonize_proofs : bool := ff)
(use_axioms : bool := tt)
(zeta : bool := tt)
(beta : bool := tt)
(eta : bool := tt)
(proj : bool := tt) -- reduce projections
(iota : bool := tt)
(iota_eqn : bool := ff) -- reduce using all equation lemmas generated by equation/pattern-matching compiler
(constructor_eq : bool := tt)
(single_pass : bool := ff)
(fail_if_unchanged := tt)
(memoize := tt)
(trace_lemmas := ff)
/--
`simplify s e cfg r prove` simplify `e` using `s` using bottom-up traversal.
`discharger` is a tactic for dischaging new subgoals created by the simplifier.
If it fails, the simplifier tries to discharge the subgoal by simplifying it to `true`.
The parameter `to_unfold` specifies definitions that should be delta-reduced,
and projection applications that should be unfolded.
-/
meta constant simplify (s : simp_lemmas) (to_unfold : list name := []) (e : expr) (cfg : simp_config := {}) (r : name := `eq)
(discharger : tactic unit := failed) : tactic (expr × expr × name_set)
meta def simp_target (s : simp_lemmas) (to_unfold : list name := []) (cfg : simp_config := {}) (discharger : tactic unit := failed) : tactic name_set :=
do t ← target >>= instantiate_mvars,
(new_t, pr, lms) ← simplify s to_unfold t cfg `eq discharger,
replace_target new_t pr,
return lms
meta def simp_hyp (s : simp_lemmas) (to_unfold : list name := []) (h : expr) (cfg : simp_config := {}) (discharger : tactic unit := failed) : tactic (expr × name_set) :=
do when (expr.is_local_constant h = ff) (fail "tactic simp_at failed, the given expression is not a hypothesis"),
htype ← infer_type h,
(h_new_type, pr, lms) ← simplify s to_unfold htype cfg `eq discharger,
new_hyp ← replace_hyp h h_new_type pr,
return (new_hyp, lms)
/--
`ext_simplify_core a c s discharger pre post r e`:
- `a : α` - initial user data
- `c : simp_config` - simp configuration options
- `s : simp_lemmas` - the set of simp_lemmas to use. Remark: the simplification lemmas are not applied automatically like in the simplify tactic. The caller must use them at pre/post.
- `discharger : α → tactic α` - tactic for dischaging hypothesis in conditional rewriting rules. The argument 'α' is the current user data.
- `pre a s r p e` is invoked before visiting the children of subterm 'e'.
+ arguments:
- `a` is the current user data
- `s` is the updated set of lemmas if 'contextual' is `tt`,
- `r` is the simplification relation being used,
- `p` is the "parent" expression (if there is one).
- `e` is the current subexpression in question.
+ if it succeeds the result is `(new_a, new_e, new_pr, flag)` where
- `new_a` is the new value for the user data
- `new_e` is a new expression s.t. `r e new_e`
- `new_pr` is a proof for `e r new_e`, If it is none, the proof is assumed to be by reflexivity
- `flag` if tt `new_e` children should be visited, and `post` invoked.
- `(post a s r p e)` is invoked after visiting the children of subterm `e`,
The output is similar to `(pre a r s p e)`, but the 'flag' indicates whether the new expression should be revisited or not.
- `r` is the simplification relation. Usually `=` or `↔`.
- `e` is the input expression to be simplified.
The method returns `(a,e,pr)` where
- `a` is the final user data
- `e` is the new expression
- `pr` is the proof that the given expression equals the input expression.
Note that `ext_simplify_core` will succeed even if `pre` and `post` fail, as failures are used to indicate that the method should move on to the next subterm.
If it is desirable to propagate errors from `pre`, they can be propagated through the "user data".
An easy way to do this is to call `tactic.capture (do ...)` in the parts of `pre`/`post` where errors matter, and then use `tactic.unwrap a` on the result.
Additionally, `ext_simplify_core` does not propagate changes made to the tactic state by `pre` and `post.
If it is desirable to propagate changes to the tactic state in addition to errors, use `tactic.resume` instead of `tactic.unwrap`.
-/
meta constant ext_simplify_core
{α : Type}
(a : α)
(c : simp_config)
(s : simp_lemmas)
(discharger : α → tactic α)
(pre : α → simp_lemmas → name → option expr → expr → tactic (α × expr × option expr × bool))
(post : α → simp_lemmas → name → option expr → expr → tactic (α × expr × option expr × bool))
(r : name) :
expr → tactic (α × expr × expr)
private meta def is_equation : expr → bool
| (expr.pi n bi d b) := is_equation b
| e := match (expr.is_eq e) with (some a) := tt | none := ff end
meta def collect_ctx_simps : tactic (list expr) :=
local_context
section simp_intros
meta def intro1_aux : bool → list name → tactic expr
| ff _ := intro1
| tt (n::ns) := intro n
| _ _ := failed
structure simp_intros_config extends simp_config :=
(use_hyps := ff)
meta def simp_intros_aux (cfg : simp_config) (use_hyps : bool) (to_unfold : list name) : simp_lemmas → bool → list name → tactic simp_lemmas
| S tt [] := try (simp_target S to_unfold cfg) >> return S
| S use_ns ns := do
t ← target,
if t.is_napp_of `not 1 then
intro1_aux use_ns ns >> simp_intros_aux S use_ns ns.tail
else if t.is_arrow then
do {
d ← return t.binding_domain,
(new_d, h_d_eq_new_d, lms) ← simplify S to_unfold d cfg,
h_d ← intro1_aux use_ns ns,
h_new_d ← mk_eq_mp h_d_eq_new_d h_d,
assertv_core h_d.local_pp_name new_d h_new_d,
clear h_d,
h_new ← intro1,
new_S ← if use_hyps then mcond (is_prop new_d) (S.add h_new ff) (return S)
else return S,
simp_intros_aux new_S use_ns ns.tail
}
<|>
-- failed to simplify... we just introduce and continue
(intro1_aux use_ns ns >> simp_intros_aux S use_ns ns.tail)
else if t.is_pi || t.is_let then
intro1_aux use_ns ns >> simp_intros_aux S use_ns ns.tail
else do
new_t ← whnf t reducible,
if new_t.is_pi then unsafe_change new_t >> simp_intros_aux S use_ns ns
else
try (simp_target S to_unfold cfg) >>
mcond (expr.is_pi <$> target)
(simp_intros_aux S use_ns ns)
(if use_ns ∧ ¬ns.empty then failed else return S)
meta def simp_intros (s : simp_lemmas) (to_unfold : list name := []) (ids : list name := []) (cfg : simp_intros_config := {}) : tactic unit :=
step $ simp_intros_aux cfg.to_simp_config cfg.use_hyps to_unfold s (bnot ids.empty) ids
end simp_intros
meta def mk_eq_simp_ext (simp_ext : expr → tactic (expr × expr)) : tactic unit :=
do (lhs, rhs) ← target >>= match_eq,
(new_rhs, heq) ← simp_ext lhs,
unify rhs new_rhs,
exact heq
/- Simp attribute support -/
meta def to_simp_lemmas : simp_lemmas → list name → tactic simp_lemmas
| S [] := return S
| S (n::ns) := do S' ← S.add_simp n ff, to_simp_lemmas S' ns
meta def mk_simp_attr (attr_name : name) (attr_deps : list name := []) : command :=
do let t := `(user_attribute simp_lemmas),
let v := `({name := attr_name,
descr := "simplifier attribute",
cache_cfg := {
mk_cache := λ ns, do {
s ← tactic.to_simp_lemmas simp_lemmas.mk ns,
s ← attr_deps.mfoldl
(λ s attr_name, do
ns ← attribute.get_instances attr_name,
to_simp_lemmas s ns)
s,
return s },
dependencies := `reducibility :: attr_deps}} : user_attribute simp_lemmas),
let n := mk_simp_attr_decl_name attr_name,
add_decl (declaration.defn n [] t v reducibility_hints.abbrev ff),
attribute.register n
/--
### Example usage:
```lean
-- make a new simp attribute called "my_reduction"
run_cmd mk_simp_attr `my_reduction
-- Add "my_reduction" attributes to these if-reductions
attribute [my_reduction] if_pos if_neg dif_pos dif_neg
-- will return the simp_lemmas with the `my_reduction` attribute.
#eval get_user_simp_lemmas `my_reduction
```
-/
meta def get_user_simp_lemmas (attr_name : name) : tactic simp_lemmas :=
if attr_name = `default then simp_lemmas.mk_default
else get_attribute_cache_dyn (mk_simp_attr_decl_name attr_name)
meta def join_user_simp_lemmas_core : simp_lemmas → list name → tactic simp_lemmas
| S [] := return S
| S (attr_name::R) := do S' ← get_user_simp_lemmas attr_name, join_user_simp_lemmas_core (S.join S') R
meta def join_user_simp_lemmas (no_dflt : bool) (attrs : list name) : tactic simp_lemmas :=
if no_dflt then
join_user_simp_lemmas_core simp_lemmas.mk attrs
else do
s ← simp_lemmas.mk_default,
join_user_simp_lemmas_core s attrs
meta def simplify_top_down {α} (a : α) (pre : α → expr → tactic (α × expr × expr)) (e : expr) (cfg : simp_config := {}) : tactic (α × expr × expr) :=
ext_simplify_core a cfg simp_lemmas.mk (λ _, failed)
(λ a _ _ _ e, do (new_a, new_e, pr) ← pre a e, guard (¬ new_e =ₐ e), return (new_a, new_e, some pr, tt))
(λ _ _ _ _ _, failed)
`eq e
meta def simp_top_down (pre : expr → tactic (expr × expr)) (cfg : simp_config := {}) : tactic unit :=
do t ← target,
(_, new_target, pr) ← simplify_top_down () (λ _ e, do (new_e, pr) ← pre e, return ((), new_e, pr)) t cfg,
replace_target new_target pr
meta def simplify_bottom_up {α} (a : α) (post : α → expr → tactic (α × expr × expr)) (e : expr) (cfg : simp_config := {}) : tactic (α × expr × expr) :=
ext_simplify_core a cfg simp_lemmas.mk (λ _, failed)
(λ _ _ _ _ _, failed)
(λ a _ _ _ e, do (new_a, new_e, pr) ← post a e, guard (¬ new_e =ₐ e), return (new_a, new_e, some pr, tt))
`eq e
meta def simp_bottom_up (post : expr → tactic (expr × expr)) (cfg : simp_config := {}) : tactic unit :=
do t ← target,
(_, new_target, pr) ← simplify_bottom_up () (λ _ e, do (new_e, pr) ← post e, return ((), new_e, pr)) t cfg,
replace_target new_target pr
private meta def remove_deps (s : name_set) (h : expr) : name_set :=
if s.empty then s
else h.fold s (λ e o s, if e.is_local_constant then s.erase e.local_uniq_name else s)
/- Return the list of hypothesis that are propositions and do not have
forward dependencies. -/
meta def non_dep_prop_hyps : tactic (list expr) :=
do
ctx ← local_context,
s ← ctx.mfoldl (λ s h, do
h_type ← infer_type h,
let s := remove_deps s h_type,
h_val ← head_zeta h,
let s := if h_val =ₐ h then s else remove_deps s h_val,
mcond (is_prop h_type)
(return $ s.insert h.local_uniq_name)
(return s)) mk_name_set,
t ← target,
let s := remove_deps s t,
return $ ctx.filter (λ h, s.contains h.local_uniq_name)
section simp_all
meta structure simp_all_entry :=
(h : expr) -- hypothesis
(new_type : expr) -- new type
(pr : option expr) -- proof that type of h is equal to new_type
(s : simp_lemmas) -- simplification lemmas for simplifying new_type
private meta def update_simp_lemmas (es : list simp_all_entry) (h : expr) : tactic (list simp_all_entry) :=
es.mmap $ λ e, do new_s ← e.s.add h ff, return {s := new_s, ..e}
/- Helper tactic for `init`.
Remark: the following tactic is quadratic on the length of list expr (the list of non dependent propositions).
We can make it more efficient as soon as we have an efficient simp_lemmas.erase. -/
private meta def init_aux : list expr → simp_lemmas → list simp_all_entry → tactic (simp_lemmas × list simp_all_entry)
| [] s r := return (s, r)
| (h::hs) s r := do
new_r ← update_simp_lemmas r h,
new_s ← s.add h ff,
h_type ← infer_type h,
init_aux hs new_s (⟨h, h_type, none, s⟩::new_r)
private meta def init (s : simp_lemmas) (hs : list expr) : tactic (simp_lemmas × list simp_all_entry) :=
init_aux hs s []
private meta def add_new_hyps (es : list simp_all_entry) : tactic unit :=
es.mmap' $ λ e,
match e.pr with
| none := return ()
| some pr :=
assert e.h.local_pp_name e.new_type >>
mk_eq_mp pr e.h >>= exact
end
private meta def clear_old_hyps (es : list simp_all_entry) : tactic unit :=
es.mmap' $ λ e, when (e.pr ≠ none) (try (clear e.h))
private meta def join_pr : option expr → expr → tactic expr
| none pr₂ := return pr₂
| (some pr₁) pr₂ := mk_eq_trans pr₁ pr₂
private meta def loop (cfg : simp_config) (discharger : tactic unit) (to_unfold : list name)
: list simp_all_entry → list simp_all_entry → simp_lemmas → bool → tactic name_set
| [] r s m :=
if m then loop r [] s ff
else do
add_new_hyps r,
(lms, target_changed) ← (simp_target s to_unfold cfg discharger >>= λ ns, return (ns, tt)) <|>
(return (mk_name_set, ff)),
guard (cfg.fail_if_unchanged = ff ∨ target_changed ∨ r.any (λ e, e.pr ≠ none)) <|> fail "simp_all tactic failed to simplify",
clear_old_hyps r,
return lms
| (e::es) r s m := do
let ⟨h, h_type, h_pr, s'⟩ := e,
(new_h_type, new_pr, lms) ← simplify s' to_unfold h_type {fail_if_unchanged := ff, ..cfg} `eq discharger,
if h_type =ₐ new_h_type then do
new_lms ← loop es (e::r) s m,
return (new_lms.fold lms (λ n ns, name_set.insert ns n))
else do
new_pr ← join_pr h_pr new_pr,
new_fact_pr ← mk_eq_mp new_pr h,
if new_h_type = `(false) then do
tgt ← target,
to_expr ``(@false.rec %%tgt %%new_fact_pr) >>= exact,
return (mk_name_set)
else do
h0_type ← infer_type h,
let new_fact_pr := mk_id_proof new_h_type new_fact_pr,
new_es ← update_simp_lemmas es new_fact_pr,
new_r ← update_simp_lemmas r new_fact_pr,
let new_r := {new_type := new_h_type, pr := new_pr, ..e} :: new_r,
new_s ← s.add new_fact_pr ff,
new_lms ← loop new_es new_r new_s tt,
return (new_lms.fold lms (λ n ns, name_set.insert ns n))
meta def simp_all (s : simp_lemmas) (to_unfold : list name) (cfg : simp_config := {}) (discharger : tactic unit := failed) : tactic name_set :=
do hs ← non_dep_prop_hyps,
(s, es) ← init s hs,
loop cfg discharger to_unfold es [] s ff
end simp_all
/- debugging support for algebraic normalizer -/
meta constant trace_algebra_info : expr → tactic unit
end tactic
export tactic (mk_simp_attr)
run_cmd mk_simp_attr `norm [`simp]
|
d5210da81d626135bf45fbfc44e36d3abfb8b67b
|
284f2c54d8342aba193603ced8192501ad01db5c
|
/src/utils/default.lean
|
ef73d76a4cbc88f80687e3ea3fb2fdca3e90daf3
|
[] |
no_license
|
UVM-M52/quiz-5-ftclark3
|
146a3af5f715010feea454ac4699a484c04b8b37
|
eefb07d4966166e8dc5576beed2cf443ecf62c0a
|
refs/heads/master
| 1,616,553,238,217
| 1,584,718,913,000
| 1,584,718,913,000
| 247,507,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 43
|
lean
|
import .cmp
import .int_refl
import .real
|
0b99f6cbbd8586dd2ef1cbd2260c198917191d94
|
35677d2df3f081738fa6b08138e03ee36bc33cad
|
/src/order/copy.lean
|
6d9adfa5ae98e95154e9df937723cfb4446ac399
|
[
"Apache-2.0"
] |
permissive
|
gebner/mathlib
|
eab0150cc4f79ec45d2016a8c21750244a2e7ff0
|
cc6a6edc397c55118df62831e23bfbd6e6c6b4ab
|
refs/heads/master
| 1,625,574,853,976
| 1,586,712,827,000
| 1,586,712,827,000
| 99,101,412
| 1
| 0
|
Apache-2.0
| 1,586,716,389,000
| 1,501,667,958,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,536
|
lean
|
/-
Copyright (c) 2020 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import order.conditionally_complete_lattice
/-!
# Tooling to make copies of lattice structures
Sometimes it is useful to make a copy of a lattice structure
where one replaces the data parts with provably equal definitions
that have better definitional properties.
-/
universe variable u
variables {α : Type u}
/-- A function to create a provable equal copy of a bounded lattice
with possibly different definitional equalities. -/
def bounded_lattice.copy (c : bounded_lattice α)
(le : α → α → Prop) (eq_le : le = @bounded_lattice.le α c)
(top : α) (eq_top : top = @bounded_lattice.top α c)
(bot : α) (eq_bot : bot = @bounded_lattice.bot α c)
(sup : α → α → α) (eq_sup : sup = @bounded_lattice.sup α c)
(inf : α → α → α) (eq_inf : inf = @bounded_lattice.inf α c) :
bounded_lattice α :=
begin
refine { le := le, top := top, bot := bot, sup := sup, inf := inf, .. },
all_goals { subst_vars, unfreezeI, cases c, assumption }
end
/-- A function to create a provable equal copy of a distributive lattice
with possibly different definitional equalities. -/
def distrib_lattice.copy (c : distrib_lattice α)
(le : α → α → Prop) (eq_le : le = @distrib_lattice.le α c)
(sup : α → α → α) (eq_sup : sup = @distrib_lattice.sup α c)
(inf : α → α → α) (eq_inf : inf = @distrib_lattice.inf α c) :
distrib_lattice α :=
begin
refine { le := le, sup := sup, inf := inf, .. },
all_goals { subst_vars, unfreezeI, cases c, assumption }
end
/-- A function to create a provable equal copy of a complete lattice
with possibly different definitional equalities. -/
def complete_lattice.copy (c : complete_lattice α)
(le : α → α → Prop) (eq_le : le = @complete_lattice.le α c)
(top : α) (eq_top : top = @complete_lattice.top α c)
(bot : α) (eq_bot : bot = @complete_lattice.bot α c)
(sup : α → α → α) (eq_sup : sup = @complete_lattice.sup α c)
(inf : α → α → α) (eq_inf : inf = @complete_lattice.inf α c)
(Sup : set α → α) (eq_Sup : Sup = @complete_lattice.Sup α c)
(Inf : set α → α) (eq_Inf : Inf = @complete_lattice.Inf α c) :
complete_lattice α :=
begin
refine { le := le, top := top, bot := bot, sup := sup, inf := inf, Sup := Sup, Inf := Inf,
.. bounded_lattice.copy (@complete_lattice.to_bounded_lattice α c)
le eq_le top eq_top bot eq_bot sup eq_sup inf eq_inf,
.. },
all_goals { subst_vars, unfreezeI, cases c, assumption }
end
/-- A function to create a provable equal copy of a complete distributive lattice
with possibly different definitional equalities. -/
def complete_distrib_lattice.copy (c : complete_distrib_lattice α)
(le : α → α → Prop) (eq_le : le = @complete_distrib_lattice.le α c)
(top : α) (eq_top : top = @complete_distrib_lattice.top α c)
(bot : α) (eq_bot : bot = @complete_distrib_lattice.bot α c)
(sup : α → α → α) (eq_sup : sup = @complete_distrib_lattice.sup α c)
(inf : α → α → α) (eq_inf : inf = @complete_distrib_lattice.inf α c)
(Sup : set α → α) (eq_Sup : Sup = @complete_distrib_lattice.Sup α c)
(Inf : set α → α) (eq_Inf : Inf = @complete_distrib_lattice.Inf α c) :
complete_distrib_lattice α :=
begin
refine { le := le, top := top, bot := bot, sup := sup, inf := inf, Sup := Sup, Inf := Inf,
.. complete_lattice.copy (@complete_distrib_lattice.to_complete_lattice α c)
le eq_le top eq_top bot eq_bot sup eq_sup inf eq_inf Sup eq_Sup Inf eq_Inf,
.. },
all_goals { subst_vars, unfreezeI, cases c, assumption }
end
/-- A function to create a provable equal copy of a conditionally complete lattice
with possibly different definitional equalities. -/
def conditionally_complete_lattice.copy (c : conditionally_complete_lattice α)
(le : α → α → Prop) (eq_le : le = @conditionally_complete_lattice.le α c)
(sup : α → α → α) (eq_sup : sup = @conditionally_complete_lattice.sup α c)
(inf : α → α → α) (eq_inf : inf = @conditionally_complete_lattice.inf α c)
(Sup : set α → α) (eq_Sup : Sup = @conditionally_complete_lattice.Sup α c)
(Inf : set α → α) (eq_Inf : Inf = @conditionally_complete_lattice.Inf α c) :
conditionally_complete_lattice α :=
begin
refine { le := le, sup := sup, inf := inf, Sup := Sup, Inf := Inf, ..},
all_goals { subst_vars, unfreezeI, cases c, assumption }
end
|
feac7326dad8f459cd660a3f40c802d21c5e0a02
|
9028d228ac200bbefe3a711342514dd4e4458bff
|
/src/data/set/basic.lean
|
ba571baa93897dd16968fd8f5c903d2476f4b7f6
|
[
"Apache-2.0"
] |
permissive
|
mcncm/mathlib
|
8d25099344d9d2bee62822cb9ed43aa3e09fa05e
|
fde3d78cadeec5ef827b16ae55664ef115e66f57
|
refs/heads/master
| 1,672,743,316,277
| 1,602,618,514,000
| 1,602,618,514,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 90,196
|
lean
|
/-
Copyright (c) 2014 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura
-/
import logic.unique
import order.boolean_algebra
/-!
# Basic properties of sets
Sets in Lean are homogeneous; all their elements have the same type. Sets whose elements
have type `X` are thus defined as `set X := X → Prop`. Note that this function need not
be decidable. The definition is in the core library.
This file provides some basic definitions related to sets and functions not present in the core
library, as well as extra lemmas for functions in the core library (empty set, univ, union,
intersection, insert, singleton, set-theoretic difference, complement, and powerset).
Note that a set is a term, not a type. There is a coersion from `set α` to `Type*` sending
`s` to the corresponding subtype `↥s`.
See also the file `set_theory/zfc.lean`, which contains an encoding of ZFC set theory in Lean.
## Main definitions
Notation used here:
- `f : α → β` is a function,
- `s : set α` and `s₁ s₂ : set α` are subsets of `α`
- `t : set β` is a subset of `β`.
Definitions in the file:
* `strict_subset s₁ s₂ : Prop` : the predicate `s₁ ⊆ s₂` but `s₁ ≠ s₂`.
* `nonempty s : Prop` : the predicate `s ≠ ∅`. Note that this is the preferred way to express the
fact that `s` has an element (see the Implementation Notes).
* `preimage f t : set α` : the preimage f⁻¹(t) (written `f ⁻¹' t` in Lean) of a subset of β.
* `subsingleton s : Prop` : the predicate saying that `s` has at most one element.
* `range f : set β` : the image of `univ` under `f`.
Also works for `{p : Prop} (f : p → α)` (unlike `image`)
* `prod s t : set (α × β)` : the subset `s × t`.
* `inclusion s₁ s₂ : ↥s₁ → ↥s₂` : the map `↥s₁ → ↥s₂` induced by an inclusion `s₁ ⊆ s₂`.
## Notation
* `f ⁻¹' t` for `preimage f t`
* `f '' s` for `image f s`
* `sᶜ` for the complement of `s`
## Implementation notes
* `s.nonempty` is to be preferred to `s ≠ ∅` or `∃ x, x ∈ s`. It has the advantage that
the `s.nonempty` dot notation can be used.
* For `s : set α`, do not use `subtype s`. Instead use `↥s` or `(s : Type*)` or `s`.
## Tags
set, sets, subset, subsets, image, preimage, pre-image, range, union, intersection, insert,
singleton, complement, powerset
-/
/-! ### Set coercion to a type -/
open function
universe variables u v w x
run_cmd do e ← tactic.get_env,
tactic.set_env $ e.mk_protected `set.compl
namespace set
variable {α : Type*}
instance : has_le (set α) := ⟨(⊆)⟩
instance : has_lt (set α) := ⟨λ s t, s ≤ t ∧ ¬t ≤ s⟩
instance {α : Type*} : boolean_algebra (set α) :=
{ sup := (∪),
le := (≤),
lt := (<),
inf := (∩),
bot := ∅,
compl := set.compl,
top := univ,
sdiff := (\),
.. (infer_instance : boolean_algebra (α → Prop)) }
@[simp] lemma bot_eq_empty : (⊥ : set α) = ∅ := rfl
@[simp] lemma sup_eq_union (s t : set α) : s ⊔ t = s ∪ t := rfl
@[simp] lemma inf_eq_inter (s t : set α) : s ⊓ t = s ∩ t := rfl
@[simp] lemma le_eq_subset (s t : set α) : s ≤ t = (s ⊆ t) := rfl
/-- Coercion from a set to the corresponding subtype. -/
instance {α : Type*} : has_coe_to_sort (set α) := ⟨_, λ s, {x // x ∈ s}⟩
end set
section set_coe
variables {α : Type u}
theorem set.set_coe_eq_subtype (s : set α) :
coe_sort.{(u+1) (u+2)} s = {x // x ∈ s} := rfl
@[simp] theorem set_coe.forall {s : set α} {p : s → Prop} :
(∀ x : s, p x) ↔ (∀ x (h : x ∈ s), p ⟨x, h⟩) :=
subtype.forall
@[simp] theorem set_coe.exists {s : set α} {p : s → Prop} :
(∃ x : s, p x) ↔ (∃ x (h : x ∈ s), p ⟨x, h⟩) :=
subtype.exists
theorem set_coe.exists' {s : set α} {p : Π x, x ∈ s → Prop} :
(∃ x (h : x ∈ s), p x h) ↔ (∃ x : s, p x x.2) :=
(@set_coe.exists _ _ $ λ x, p x.1 x.2).symm
theorem set_coe.forall' {s : set α} {p : Π x, x ∈ s → Prop} :
(∀ x (h : x ∈ s), p x h) ↔ (∀ x : s, p x x.2) :=
(@set_coe.forall _ _ $ λ x, p x.1 x.2).symm
@[simp] theorem set_coe_cast : ∀ {s t : set α} (H' : s = t) (H : @eq (Type u) s t) (x : s),
cast H x = ⟨x.1, H' ▸ x.2⟩
| s _ rfl _ ⟨x, h⟩ := rfl
theorem set_coe.ext {s : set α} {a b : s} : (↑a : α) = ↑b → a = b :=
subtype.eq
theorem set_coe.ext_iff {s : set α} {a b : s} : (↑a : α) = ↑b ↔ a = b :=
iff.intro set_coe.ext (assume h, h ▸ rfl)
end set_coe
/-- See also `subtype.prop` -/
lemma subtype.mem {α : Type*} {s : set α} (p : s) : (p : α) ∈ s := p.prop
lemma eq.subset {α} {s t : set α} : s = t → s ⊆ t :=
by { rintro rfl x hx, exact hx }
namespace set
variables {α : Type u} {β : Type v} {γ : Type w} {ι : Sort x} {a : α} {s t : set α}
instance : inhabited (set α) := ⟨∅⟩
@[ext]
theorem ext {a b : set α} (h : ∀ x, x ∈ a ↔ x ∈ b) : a = b :=
funext (assume x, propext (h x))
theorem ext_iff {s t : set α} : s = t ↔ ∀ x, x ∈ s ↔ x ∈ t :=
⟨λ h x, by rw h, ext⟩
@[trans] theorem mem_of_mem_of_subset {x : α} {s t : set α} (hx : x ∈ s) (h : s ⊆ t) : x ∈ t := h hx
/-! ### Lemmas about `mem` and `set_of` -/
@[simp] theorem mem_set_of_eq {a : α} {p : α → Prop} : a ∈ {a | p a} = p a := rfl
theorem nmem_set_of_eq {a : α} {P : α → Prop} : a ∉ {a : α | P a} = ¬ P a := rfl
@[simp] theorem set_of_mem_eq {s : set α} : {x | x ∈ s} = s := rfl
theorem set_of_set {s : set α} : set_of s = s := rfl
lemma set_of_app_iff {p : α → Prop} {x : α} : { x | p x } x ↔ p x := iff.rfl
theorem mem_def {a : α} {s : set α} : a ∈ s ↔ s a := iff.rfl
instance decidable_mem (s : set α) [H : decidable_pred s] : ∀ a, decidable (a ∈ s) := H
instance decidable_set_of (p : α → Prop) [H : decidable_pred p] : decidable_pred {a | p a} := H
@[simp] theorem set_of_subset_set_of {p q : α → Prop} :
{a | p a} ⊆ {a | q a} ↔ (∀a, p a → q a) := iff.rfl
@[simp] lemma sep_set_of {p q : α → Prop} : {a ∈ {a | p a } | q a} = {a | p a ∧ q a} := rfl
lemma set_of_and {p q : α → Prop} : {a | p a ∧ q a} = {a | p a} ∩ {a | q a} := rfl
lemma set_of_or {p q : α → Prop} : {a | p a ∨ q a} = {a | p a} ∪ {a | q a} := rfl
/-! ### Lemmas about subsets -/
-- TODO(Jeremy): write a tactic to unfold specific instances of generic notation?
theorem subset_def {s t : set α} : (s ⊆ t) = ∀ x, x ∈ s → x ∈ t := rfl
@[refl] theorem subset.refl (a : set α) : a ⊆ a := assume x, id
theorem subset.rfl {s : set α} : s ⊆ s := subset.refl s
@[trans] theorem subset.trans {a b c : set α} (ab : a ⊆ b) (bc : b ⊆ c) : a ⊆ c :=
assume x h, bc (ab h)
@[trans] theorem mem_of_eq_of_mem {x y : α} {s : set α} (hx : x = y) (h : y ∈ s) : x ∈ s :=
hx.symm ▸ h
theorem subset.antisymm {a b : set α} (h₁ : a ⊆ b) (h₂ : b ⊆ a) : a = b :=
ext (λ x, iff.intro (λ ina, h₁ ina) (λ inb, h₂ inb))
theorem subset.antisymm_iff {a b : set α} : a = b ↔ a ⊆ b ∧ b ⊆ a :=
⟨λ e, e ▸ ⟨subset.refl _, subset.refl _⟩,
λ ⟨h₁, h₂⟩, subset.antisymm h₁ h₂⟩
-- an alternative name
theorem eq_of_subset_of_subset {a b : set α} (h₁ : a ⊆ b) (h₂ : b ⊆ a) : a = b :=
subset.antisymm h₁ h₂
theorem mem_of_subset_of_mem {s₁ s₂ : set α} {a : α} : s₁ ⊆ s₂ → a ∈ s₁ → a ∈ s₂ :=
assume h₁ h₂, h₁ h₂
theorem not_subset : (¬ s ⊆ t) ↔ ∃a ∈ s, a ∉ t :=
by simp [subset_def, not_forall]
/-! ### Definition of strict subsets `s ⊂ t` and basic properties. -/
instance : has_ssubset (set α) := ⟨(<)⟩
@[simp] lemma lt_eq_ssubset (s t : set α) : s < t = (s ⊂ t) := rfl
theorem ssubset_def : (s ⊂ t) = (s ⊆ t ∧ ¬ (t ⊆ s)) := rfl
theorem eq_or_ssubset_of_subset (h : s ⊆ t) : s = t ∨ s ⊂ t :=
classical.by_cases
(λ H : t ⊆ s, or.inl $ subset.antisymm h H)
(λ H, or.inr ⟨h, H⟩)
lemma exists_of_ssubset {s t : set α} (h : s ⊂ t) : (∃x∈t, x ∉ s) :=
not_subset.1 h.2
lemma ssubset_iff_subset_ne {s t : set α} : s ⊂ t ↔ s ⊆ t ∧ s ≠ t :=
by split; simp [set.ssubset_def, ne.def, set.subset.antisymm_iff] {contextual := tt}
lemma ssubset_iff_of_subset {s t : set α} (h : s ⊆ t) : s ⊂ t ↔ ∃ x ∈ t, x ∉ s :=
⟨exists_of_ssubset, λ ⟨x, hxt, hxs⟩, ⟨h, λ h, hxs $ h hxt⟩⟩
theorem not_mem_empty (x : α) : ¬ (x ∈ (∅ : set α)) :=
assume h : x ∈ ∅, h
@[simp] theorem not_not_mem : ¬ (a ∉ s) ↔ a ∈ s :=
by { classical, exact not_not }
/-! ### Non-empty sets -/
/-- The property `s.nonempty` expresses the fact that the set `s` is not empty. It should be used
in theorem assumptions instead of `∃ x, x ∈ s` or `s ≠ ∅` as it gives access to a nice API thanks
to the dot notation. -/
protected def nonempty (s : set α) : Prop := ∃ x, x ∈ s
lemma nonempty_def : s.nonempty ↔ ∃ x, x ∈ s := iff.rfl
lemma nonempty_of_mem {x} (h : x ∈ s) : s.nonempty := ⟨x, h⟩
theorem nonempty.not_subset_empty : s.nonempty → ¬(s ⊆ ∅)
| ⟨x, hx⟩ hs := hs hx
theorem nonempty.ne_empty : s.nonempty → s ≠ ∅
| ⟨x, hx⟩ hs := by { rw hs at hx, exact hx }
/-- Extract a witness from `s.nonempty`. This function might be used instead of case analysis
on the argument. Note that it makes a proof depend on the `classical.choice` axiom. -/
protected noncomputable def nonempty.some (h : s.nonempty) : α := classical.some h
protected lemma nonempty.some_mem (h : s.nonempty) : h.some ∈ s := classical.some_spec h
lemma nonempty.mono (ht : s ⊆ t) (hs : s.nonempty) : t.nonempty := hs.imp ht
lemma nonempty_of_not_subset (h : ¬s ⊆ t) : (s \ t).nonempty :=
let ⟨x, xs, xt⟩ := not_subset.1 h in ⟨x, xs, xt⟩
lemma nonempty_of_ssubset (ht : s ⊂ t) : (t \ s).nonempty :=
nonempty_of_not_subset ht.2
lemma nonempty.of_diff (h : (s \ t).nonempty) : s.nonempty := h.imp $ λ _, and.left
lemma nonempty_of_ssubset' (ht : s ⊂ t) : t.nonempty := (nonempty_of_ssubset ht).of_diff
lemma nonempty.inl (hs : s.nonempty) : (s ∪ t).nonempty := hs.imp $ λ _, or.inl
lemma nonempty.inr (ht : t.nonempty) : (s ∪ t).nonempty := ht.imp $ λ _, or.inr
@[simp] lemma union_nonempty : (s ∪ t).nonempty ↔ s.nonempty ∨ t.nonempty := exists_or_distrib
lemma nonempty.left (h : (s ∩ t).nonempty) : s.nonempty := h.imp $ λ _, and.left
lemma nonempty.right (h : (s ∩ t).nonempty) : t.nonempty := h.imp $ λ _, and.right
lemma nonempty_inter_iff_exists_right : (s ∩ t).nonempty ↔ ∃ x : t, ↑x ∈ s :=
⟨λ ⟨x, xs, xt⟩, ⟨⟨x, xt⟩, xs⟩, λ ⟨⟨x, xt⟩, xs⟩, ⟨x, xs, xt⟩⟩
lemma nonempty_inter_iff_exists_left : (s ∩ t).nonempty ↔ ∃ x : s, ↑x ∈ t :=
⟨λ ⟨x, xs, xt⟩, ⟨⟨x, xs⟩, xt⟩, λ ⟨⟨x, xt⟩, xs⟩, ⟨x, xt, xs⟩⟩
lemma nonempty_iff_univ_nonempty : nonempty α ↔ (univ : set α).nonempty :=
⟨λ ⟨x⟩, ⟨x, trivial⟩, λ ⟨x, _⟩, ⟨x⟩⟩
@[simp] lemma univ_nonempty : ∀ [h : nonempty α], (univ : set α).nonempty
| ⟨x⟩ := ⟨x, trivial⟩
lemma nonempty.to_subtype (h : s.nonempty) : nonempty s :=
nonempty_subtype.2 h
/-! ### Lemmas about the empty set -/
theorem empty_def : (∅ : set α) = {x | false} := rfl
@[simp] theorem mem_empty_eq (x : α) : x ∈ (∅ : set α) = false := rfl
@[simp] theorem set_of_false : {a : α | false} = ∅ := rfl
theorem eq_empty_iff_forall_not_mem {s : set α} : s = ∅ ↔ ∀ x, x ∉ s :=
by simp [ext_iff]
@[simp] theorem empty_subset (s : set α) : ∅ ⊆ s :=
assume x, assume h, false.elim h
theorem subset_empty_iff {s : set α} : s ⊆ ∅ ↔ s = ∅ :=
by simp [subset.antisymm_iff]
theorem eq_empty_of_subset_empty {s : set α} : s ⊆ ∅ → s = ∅ :=
subset_empty_iff.1
theorem eq_empty_of_not_nonempty (h : ¬nonempty α) (s : set α) : s = ∅ :=
eq_empty_of_subset_empty $ λ x hx, h ⟨x⟩
lemma not_nonempty_iff_eq_empty {s : set α} : ¬s.nonempty ↔ s = ∅ :=
by simp only [set.nonempty, eq_empty_iff_forall_not_mem, not_exists]
lemma empty_not_nonempty : ¬(∅ : set α).nonempty :=
not_nonempty_iff_eq_empty.2 rfl
lemma eq_empty_or_nonempty (s : set α) : s = ∅ ∨ s.nonempty :=
classical.by_cases or.inr (λ h, or.inl $ not_nonempty_iff_eq_empty.1 h)
theorem ne_empty_iff_nonempty : s ≠ ∅ ↔ s.nonempty :=
(not_congr not_nonempty_iff_eq_empty.symm).trans not_not
theorem subset_eq_empty {s t : set α} (h : t ⊆ s) (e : s = ∅) : t = ∅ :=
subset_empty_iff.1 $ e ▸ h
theorem ball_empty_iff {p : α → Prop} :
(∀ x ∈ (∅ : set α), p x) ↔ true :=
by simp [iff_def]
/-!
### Universal set.
In Lean `@univ α` (or `univ : set α`) is the set that contains all elements of type `α`.
Mathematically it is the same as `α` but it has a different type.
-/
@[simp] theorem set_of_true : {x : α | true} = univ := rfl
@[simp] theorem mem_univ (x : α) : x ∈ @univ α := trivial
theorem empty_ne_univ [h : nonempty α] : (∅ : set α) ≠ univ :=
by simp [ext_iff]
@[simp] theorem subset_univ (s : set α) : s ⊆ univ := λ x H, trivial
theorem univ_subset_iff {s : set α} : univ ⊆ s ↔ s = univ :=
by simp [subset.antisymm_iff]
theorem eq_univ_of_univ_subset {s : set α} : univ ⊆ s → s = univ :=
univ_subset_iff.1
theorem eq_univ_iff_forall {s : set α} : s = univ ↔ ∀ x, x ∈ s :=
by simp [ext_iff]
theorem eq_univ_of_forall {s : set α} : (∀ x, x ∈ s) → s = univ := eq_univ_iff_forall.2
lemma eq_univ_of_subset {s t : set α} (h : s ⊆ t) (hs : s = univ) : t = univ :=
eq_univ_of_univ_subset $ hs ▸ h
@[simp] lemma univ_eq_empty_iff : (univ : set α) = ∅ ↔ ¬ nonempty α :=
eq_empty_iff_forall_not_mem.trans ⟨λ H ⟨x⟩, H x trivial, λ H x _, H ⟨x⟩⟩
lemma exists_mem_of_nonempty (α) : ∀ [nonempty α], ∃x:α, x ∈ (univ : set α)
| ⟨x⟩ := ⟨x, trivial⟩
instance univ_decidable : decidable_pred (@set.univ α) :=
λ x, is_true trivial
/-- `diagonal α` is the subset of `α × α` consisting of all pairs of the form `(a, a)`. -/
def diagonal (α : Type*) : set (α × α) := {p | p.1 = p.2}
@[simp]
lemma mem_diagonal {α : Type*} (x : α) : (x, x) ∈ diagonal α :=
by simp [diagonal]
/-! ### Lemmas about union -/
theorem union_def {s₁ s₂ : set α} : s₁ ∪ s₂ = {a | a ∈ s₁ ∨ a ∈ s₂} := rfl
theorem mem_union_left {x : α} {a : set α} (b : set α) : x ∈ a → x ∈ a ∪ b := or.inl
theorem mem_union_right {x : α} {b : set α} (a : set α) : x ∈ b → x ∈ a ∪ b := or.inr
theorem mem_or_mem_of_mem_union {x : α} {a b : set α} (H : x ∈ a ∪ b) : x ∈ a ∨ x ∈ b := H
theorem mem_union.elim {x : α} {a b : set α} {P : Prop}
(H₁ : x ∈ a ∪ b) (H₂ : x ∈ a → P) (H₃ : x ∈ b → P) : P :=
or.elim H₁ H₂ H₃
theorem mem_union (x : α) (a b : set α) : x ∈ a ∪ b ↔ x ∈ a ∨ x ∈ b := iff.rfl
@[simp] theorem mem_union_eq (x : α) (a b : set α) : x ∈ a ∪ b = (x ∈ a ∨ x ∈ b) := rfl
@[simp] theorem union_self (a : set α) : a ∪ a = a :=
ext (assume x, or_self _)
@[simp] theorem union_empty (a : set α) : a ∪ ∅ = a :=
ext (assume x, or_false _)
@[simp] theorem empty_union (a : set α) : ∅ ∪ a = a :=
ext (assume x, false_or _)
theorem union_comm (a b : set α) : a ∪ b = b ∪ a :=
ext (assume x, or.comm)
theorem union_assoc (a b c : set α) : (a ∪ b) ∪ c = a ∪ (b ∪ c) :=
ext (assume x, or.assoc)
instance union_is_assoc : is_associative (set α) (∪) :=
⟨union_assoc⟩
instance union_is_comm : is_commutative (set α) (∪) :=
⟨union_comm⟩
theorem union_left_comm (s₁ s₂ s₃ : set α) : s₁ ∪ (s₂ ∪ s₃) = s₂ ∪ (s₁ ∪ s₃) :=
by finish
theorem union_right_comm (s₁ s₂ s₃ : set α) : (s₁ ∪ s₂) ∪ s₃ = (s₁ ∪ s₃) ∪ s₂ :=
by finish
theorem union_eq_self_of_subset_left {s t : set α} (h : s ⊆ t) : s ∪ t = t :=
by finish [subset_def, ext_iff, iff_def]
theorem union_eq_self_of_subset_right {s t : set α} (h : t ⊆ s) : s ∪ t = s :=
by finish [subset_def, ext_iff, iff_def]
@[simp] theorem subset_union_left (s t : set α) : s ⊆ s ∪ t := λ x, or.inl
@[simp] theorem subset_union_right (s t : set α) : t ⊆ s ∪ t := λ x, or.inr
theorem union_subset {s t r : set α} (sr : s ⊆ r) (tr : t ⊆ r) : s ∪ t ⊆ r :=
by finish [subset_def, union_def]
@[simp] theorem union_subset_iff {s t u : set α} : s ∪ t ⊆ u ↔ s ⊆ u ∧ t ⊆ u :=
by finish [iff_def, subset_def]
theorem union_subset_union {s₁ s₂ t₁ t₂ : set α} (h₁ : s₁ ⊆ s₂) (h₂ : t₁ ⊆ t₂) : s₁ ∪ t₁ ⊆ s₂ ∪ t₂ :=
by finish [subset_def]
theorem union_subset_union_left {s₁ s₂ : set α} (t) (h : s₁ ⊆ s₂) : s₁ ∪ t ⊆ s₂ ∪ t :=
union_subset_union h (by refl)
theorem union_subset_union_right (s) {t₁ t₂ : set α} (h : t₁ ⊆ t₂) : s ∪ t₁ ⊆ s ∪ t₂ :=
union_subset_union (by refl) h
lemma subset_union_of_subset_left {s t : set α} (h : s ⊆ t) (u : set α) : s ⊆ t ∪ u :=
subset.trans h (subset_union_left t u)
lemma subset_union_of_subset_right {s u : set α} (h : s ⊆ u) (t : set α) : s ⊆ t ∪ u :=
subset.trans h (subset_union_right t u)
@[simp] theorem union_empty_iff {s t : set α} : s ∪ t = ∅ ↔ s = ∅ ∧ t = ∅ :=
⟨by finish [ext_iff], by finish [ext_iff]⟩
/-! ### Lemmas about intersection -/
theorem inter_def {s₁ s₂ : set α} : s₁ ∩ s₂ = {a | a ∈ s₁ ∧ a ∈ s₂} := rfl
theorem mem_inter_iff (x : α) (a b : set α) : x ∈ a ∩ b ↔ x ∈ a ∧ x ∈ b := iff.rfl
@[simp] theorem mem_inter_eq (x : α) (a b : set α) : x ∈ a ∩ b = (x ∈ a ∧ x ∈ b) := rfl
theorem mem_inter {x : α} {a b : set α} (ha : x ∈ a) (hb : x ∈ b) : x ∈ a ∩ b :=
⟨ha, hb⟩
theorem mem_of_mem_inter_left {x : α} {a b : set α} (h : x ∈ a ∩ b) : x ∈ a :=
h.left
theorem mem_of_mem_inter_right {x : α} {a b : set α} (h : x ∈ a ∩ b) : x ∈ b :=
h.right
@[simp] theorem inter_self (a : set α) : a ∩ a = a :=
ext (assume x, and_self _)
@[simp] theorem inter_empty (a : set α) : a ∩ ∅ = ∅ :=
ext (assume x, and_false _)
@[simp] theorem empty_inter (a : set α) : ∅ ∩ a = ∅ :=
ext (assume x, false_and _)
theorem inter_comm (a b : set α) : a ∩ b = b ∩ a :=
ext (assume x, and.comm)
theorem inter_assoc (a b c : set α) : (a ∩ b) ∩ c = a ∩ (b ∩ c) :=
ext (assume x, and.assoc)
instance inter_is_assoc : is_associative (set α) (∩) :=
⟨inter_assoc⟩
instance inter_is_comm : is_commutative (set α) (∩) :=
⟨inter_comm⟩
theorem inter_left_comm (s₁ s₂ s₃ : set α) : s₁ ∩ (s₂ ∩ s₃) = s₂ ∩ (s₁ ∩ s₃) :=
by finish
theorem inter_right_comm (s₁ s₂ s₃ : set α) : (s₁ ∩ s₂) ∩ s₃ = (s₁ ∩ s₃) ∩ s₂ :=
by finish
@[simp] theorem inter_subset_left (s t : set α) : s ∩ t ⊆ s := λ x H, and.left H
@[simp] theorem inter_subset_right (s t : set α) : s ∩ t ⊆ t := λ x H, and.right H
theorem subset_inter {s t r : set α} (rs : r ⊆ s) (rt : r ⊆ t) : r ⊆ s ∩ t :=
by finish [subset_def, inter_def]
@[simp] theorem subset_inter_iff {s t r : set α} : r ⊆ s ∩ t ↔ r ⊆ s ∧ r ⊆ t :=
⟨λ h, ⟨subset.trans h (inter_subset_left _ _), subset.trans h (inter_subset_right _ _)⟩,
λ ⟨h₁, h₂⟩, subset_inter h₁ h₂⟩
@[simp] theorem inter_univ (a : set α) : a ∩ univ = a :=
ext (assume x, and_true _)
@[simp] theorem univ_inter (a : set α) : univ ∩ a = a :=
ext (assume x, true_and _)
theorem inter_subset_inter_left {s t : set α} (u : set α) (H : s ⊆ t) : s ∩ u ⊆ t ∩ u :=
by finish [subset_def]
theorem inter_subset_inter_right {s t : set α} (u : set α) (H : s ⊆ t) : u ∩ s ⊆ u ∩ t :=
by finish [subset_def]
theorem inter_subset_inter {s₁ s₂ t₁ t₂ : set α} (h₁ : s₁ ⊆ t₁) (h₂ : s₂ ⊆ t₂) : s₁ ∩ s₂ ⊆ t₁ ∩ t₂ :=
by finish [subset_def]
theorem inter_eq_self_of_subset_left {s t : set α} (h : s ⊆ t) : s ∩ t = s :=
by finish [subset_def, ext_iff, iff_def]
theorem inter_eq_self_of_subset_right {s t : set α} (h : t ⊆ s) : s ∩ t = t :=
by finish [subset_def, ext_iff, iff_def]
lemma inter_compl_nonempty_iff {s t : set α} : (s ∩ tᶜ).nonempty ↔ ¬ s ⊆ t :=
begin
split,
{ rintros ⟨x ,xs, xt⟩ sub,
exact xt (sub xs) },
{ intros h,
rcases not_subset.mp h with ⟨x, xs, xt⟩,
exact ⟨x, xs, xt⟩ }
end
theorem union_inter_cancel_left {s t : set α} : (s ∪ t) ∩ s = s :=
by finish [ext_iff, iff_def]
theorem union_inter_cancel_right {s t : set α} : (s ∪ t) ∩ t = t :=
by finish [ext_iff, iff_def]
/-! ### Distributivity laws -/
theorem inter_distrib_left (s t u : set α) : s ∩ (t ∪ u) = (s ∩ t) ∪ (s ∩ u) :=
ext (assume x, and_or_distrib_left)
theorem inter_distrib_right (s t u : set α) : (s ∪ t) ∩ u = (s ∩ u) ∪ (t ∩ u) :=
ext (assume x, or_and_distrib_right)
theorem union_distrib_left (s t u : set α) : s ∪ (t ∩ u) = (s ∪ t) ∩ (s ∪ u) :=
ext (assume x, or_and_distrib_left)
theorem union_distrib_right (s t u : set α) : (s ∩ t) ∪ u = (s ∪ u) ∩ (t ∪ u) :=
ext (assume x, and_or_distrib_right)
/-!
### Lemmas about `insert`
`insert α s` is the set `{α} ∪ s`.
-/
theorem insert_def (x : α) (s : set α) : insert x s = { y | y = x ∨ y ∈ s } := rfl
@[simp] theorem subset_insert (x : α) (s : set α) : s ⊆ insert x s :=
assume y ys, or.inr ys
theorem mem_insert (x : α) (s : set α) : x ∈ insert x s :=
or.inl rfl
theorem mem_insert_of_mem {x : α} {s : set α} (y : α) : x ∈ s → x ∈ insert y s := or.inr
theorem eq_or_mem_of_mem_insert {x a : α} {s : set α} : x ∈ insert a s → x = a ∨ x ∈ s := id
theorem mem_of_mem_insert_of_ne {x a : α} {s : set α} (xin : x ∈ insert a s) : x ≠ a → x ∈ s :=
by finish [insert_def]
@[simp] theorem mem_insert_iff {x a : α} {s : set α} : x ∈ insert a s ↔ (x = a ∨ x ∈ s) := iff.rfl
@[simp] theorem insert_eq_of_mem {a : α} {s : set α} (h : a ∈ s) : insert a s = s :=
by finish [ext_iff, iff_def]
lemma ne_insert_of_not_mem {s : set α} (t : set α) {a : α} (h : a ∉ s) :
s ≠ insert a t :=
by { contrapose! h, simp [h] }
theorem insert_subset : insert a s ⊆ t ↔ (a ∈ t ∧ s ⊆ t) :=
by simp [subset_def, or_imp_distrib, forall_and_distrib]
theorem insert_subset_insert (h : s ⊆ t) : insert a s ⊆ insert a t :=
assume a', or.imp_right (@h a')
theorem ssubset_iff_insert {s t : set α} : s ⊂ t ↔ ∃ a ∉ s, insert a s ⊆ t :=
begin
simp only [insert_subset, exists_and_distrib_right, ssubset_def, not_subset],
simp only [exists_prop, and_comm]
end
theorem ssubset_insert {s : set α} {a : α} (h : a ∉ s) : s ⊂ insert a s :=
ssubset_iff_insert.2 ⟨a, h, subset.refl _⟩
theorem insert_comm (a b : α) (s : set α) : insert a (insert b s) = insert b (insert a s) :=
by { ext, simp [or.left_comm] }
theorem insert_union : insert a s ∪ t = insert a (s ∪ t) :=
by { ext, simp [or.comm, or.left_comm] }
@[simp] theorem union_insert : s ∪ insert a t = insert a (s ∪ t) :=
by { ext, simp [or.comm, or.left_comm] }
theorem insert_nonempty (a : α) (s : set α) : (insert a s).nonempty :=
⟨a, mem_insert a s⟩
lemma insert_inter (x : α) (s t : set α) : insert x (s ∩ t) = insert x s ∩ insert x t :=
by { ext y, simp [←or_and_distrib_left] }
-- useful in proofs by induction
theorem forall_of_forall_insert {P : α → Prop} {a : α} {s : set α} (h : ∀ x, x ∈ insert a s → P x) :
∀ x, x ∈ s → P x :=
by finish
theorem forall_insert_of_forall {P : α → Prop} {a : α} {s : set α} (h : ∀ x, x ∈ s → P x) (ha : P a) :
∀ x, x ∈ insert a s → P x :=
by finish
theorem bex_insert_iff {P : α → Prop} {a : α} {s : set α} :
(∃ x ∈ insert a s, P x) ↔ (∃ x ∈ s, P x) ∨ P a :=
by finish [iff_def]
theorem ball_insert_iff {P : α → Prop} {a : α} {s : set α} :
(∀ x ∈ insert a s, P x) ↔ P a ∧ (∀x ∈ s, P x) :=
by finish [iff_def]
/-! ### Lemmas about singletons -/
theorem singleton_def (a : α) : ({a} : set α) = insert a ∅ :=
(insert_emptyc_eq _).symm
@[simp] theorem mem_singleton_iff {a b : α} : a ∈ ({b} : set α) ↔ a = b :=
iff.rfl
@[simp]
lemma set_of_eq_eq_singleton {a : α} : {n | n = a} = {a} :=
set.ext $ λ n, (set.mem_singleton_iff).symm
-- TODO: again, annotation needed
@[simp] theorem mem_singleton (a : α) : a ∈ ({a} : set α) := by finish
theorem eq_of_mem_singleton {x y : α} (h : x ∈ ({y} : set α)) : x = y :=
by finish
@[simp] theorem singleton_eq_singleton_iff {x y : α} : {x} = ({y} : set α) ↔ x = y :=
by finish [ext_iff, iff_def]
theorem mem_singleton_of_eq {x y : α} (H : x = y) : x ∈ ({y} : set α) :=
by finish
theorem insert_eq (x : α) (s : set α) : insert x s = ({x} : set α) ∪ s :=
by finish [ext_iff, or_comm]
@[simp] theorem pair_eq_singleton (a : α) : ({a, a} : set α) = {a} :=
by finish
theorem pair_comm (a b : α) : ({a, b} : set α) = {b, a} :=
ext $ λ x, or_comm _ _
@[simp] theorem singleton_nonempty (a : α) : ({a} : set α).nonempty :=
⟨a, rfl⟩
@[simp] theorem singleton_subset_iff {a : α} {s : set α} : {a} ⊆ s ↔ a ∈ s :=
⟨λh, h (by simp), λh b e, by { rw [mem_singleton_iff] at e, simp [*] }⟩
theorem set_compr_eq_eq_singleton {a : α} : {b | b = a} = {a} :=
by { ext, simp }
@[simp] theorem singleton_union : {a} ∪ s = insert a s :=
rfl
@[simp] theorem union_singleton : s ∪ {a} = insert a s :=
by rw [union_comm, singleton_union]
theorem singleton_inter_eq_empty : {a} ∩ s = ∅ ↔ a ∉ s :=
by simp [eq_empty_iff_forall_not_mem]
theorem inter_singleton_eq_empty : s ∩ {a} = ∅ ↔ a ∉ s :=
by rw [inter_comm, singleton_inter_eq_empty]
lemma nmem_singleton_empty {s : set α} : s ∉ ({∅} : set (set α)) ↔ s.nonempty :=
by rw [mem_singleton_iff, ← ne.def, ne_empty_iff_nonempty]
instance unique_singleton (a : α) : unique ↥({a} : set α) :=
{ default := ⟨a, mem_singleton a⟩,
uniq :=
begin
intros x,
apply subtype.ext,
apply eq_of_mem_singleton (subtype.mem x),
end}
lemma eq_singleton_iff_unique_mem {s : set α} {a : α} :
s = {a} ↔ a ∈ s ∧ ∀ x ∈ s, x = a :=
by simp [ext_iff, @iff_def (_ ∈ s), forall_and_distrib, and_comm]
/-! ### Lemmas about sets defined as `{x ∈ s | p x}`. -/
theorem mem_sep {s : set α} {p : α → Prop} {x : α} (xs : x ∈ s) (px : p x) : x ∈ {x ∈ s | p x} :=
⟨xs, px⟩
@[simp] theorem sep_mem_eq {s t : set α} : {x ∈ s | x ∈ t} = s ∩ t := rfl
@[simp] theorem mem_sep_eq {s : set α} {p : α → Prop} {x : α} : x ∈ {x ∈ s | p x} = (x ∈ s ∧ p x) := rfl
theorem mem_sep_iff {s : set α} {p : α → Prop} {x : α} : x ∈ {x ∈ s | p x} ↔ x ∈ s ∧ p x :=
iff.rfl
theorem eq_sep_of_subset {s t : set α} (ssubt : s ⊆ t) : s = {x ∈ t | x ∈ s} :=
by finish [ext_iff, iff_def, subset_def]
theorem sep_subset (s : set α) (p : α → Prop) : {x ∈ s | p x} ⊆ s :=
assume x, and.left
theorem forall_not_of_sep_empty {s : set α} {p : α → Prop} (h : {x ∈ s | p x} = ∅) :
∀ x ∈ s, ¬ p x :=
by finish [ext_iff]
@[simp] lemma sep_univ {α} {p : α → Prop} : {a ∈ (univ : set α) | p a} = {a | p a} :=
by { ext, simp }
/-! ### Lemmas about complement -/
theorem mem_compl {s : set α} {x : α} (h : x ∉ s) : x ∈ sᶜ := h
lemma compl_set_of {α} (p : α → Prop) : {a | p a}ᶜ = { a | ¬ p a } := rfl
theorem not_mem_of_mem_compl {s : set α} {x : α} (h : x ∈ sᶜ) : x ∉ s := h
@[simp] theorem mem_compl_eq (s : set α) (x : α) : x ∈ sᶜ = (x ∉ s) := rfl
theorem mem_compl_iff (s : set α) (x : α) : x ∈ sᶜ ↔ x ∉ s := iff.rfl
@[simp] theorem inter_compl_self (s : set α) : s ∩ sᶜ = ∅ :=
by finish [ext_iff]
@[simp] theorem compl_inter_self (s : set α) : sᶜ ∩ s = ∅ :=
by finish [ext_iff]
@[simp] theorem compl_empty : (∅ : set α)ᶜ = univ :=
by finish [ext_iff]
@[simp] theorem compl_union (s t : set α) : (s ∪ t)ᶜ = sᶜ ∩ tᶜ :=
by finish [ext_iff]
local attribute [simp] -- Will be generalized to lattices in `compl_compl'`
theorem compl_compl (s : set α) : sᶜᶜ = s :=
by finish [ext_iff]
-- ditto
theorem compl_inter (s t : set α) : (s ∩ t)ᶜ = sᶜ ∪ tᶜ :=
by finish [ext_iff]
@[simp] theorem compl_univ : (univ : set α)ᶜ = ∅ :=
by finish [ext_iff]
lemma compl_empty_iff {s : set α} : sᶜ = ∅ ↔ s = univ :=
by { split, intro h, rw [←compl_compl s, h, compl_empty], intro h, rw [h, compl_univ] }
lemma compl_univ_iff {s : set α} : sᶜ = univ ↔ s = ∅ :=
by rw [←compl_empty_iff, compl_compl]
lemma nonempty_compl {s : set α} : sᶜ.nonempty ↔ s ≠ univ :=
ne_empty_iff_nonempty.symm.trans $ not_congr $ compl_empty_iff
lemma mem_compl_singleton_iff {a x : α} : x ∈ ({a} : set α)ᶜ ↔ x ≠ a :=
not_iff_not_of_iff mem_singleton_iff
lemma compl_singleton_eq (a : α) : ({a} : set α)ᶜ = {x | x ≠ a} :=
ext $ λ x, mem_compl_singleton_iff
theorem union_eq_compl_compl_inter_compl (s t : set α) : s ∪ t = (sᶜ ∩ tᶜ)ᶜ :=
by simp [compl_inter, compl_compl]
theorem inter_eq_compl_compl_union_compl (s t : set α) : s ∩ t = (sᶜ ∪ tᶜ)ᶜ :=
by simp [compl_compl]
@[simp] theorem union_compl_self (s : set α) : s ∪ sᶜ = univ :=
by finish [ext_iff]
@[simp] theorem compl_union_self (s : set α) : sᶜ ∪ s = univ :=
by finish [ext_iff]
theorem compl_comp_compl : compl ∘ compl = @id (set α) :=
funext compl_compl
theorem compl_subset_comm {s t : set α} : sᶜ ⊆ t ↔ tᶜ ⊆ s :=
by haveI := classical.prop_decidable; exact
forall_congr (λ a, not_imp_comm)
lemma compl_subset_compl {s t : set α} : sᶜ ⊆ tᶜ ↔ t ⊆ s :=
by rw [compl_subset_comm, compl_compl]
theorem compl_subset_iff_union {s t : set α} : sᶜ ⊆ t ↔ s ∪ t = univ :=
iff.symm $ eq_univ_iff_forall.trans $ forall_congr $ λ a,
by haveI := classical.prop_decidable; exact or_iff_not_imp_left
theorem subset_compl_comm {s t : set α} : s ⊆ tᶜ ↔ t ⊆ sᶜ :=
forall_congr $ λ a, imp_not_comm
theorem subset_compl_iff_disjoint {s t : set α} : s ⊆ tᶜ ↔ s ∩ t = ∅ :=
iff.trans (forall_congr $ λ a, and_imp.symm) subset_empty_iff
lemma subset_compl_singleton_iff {a : α} {s : set α} : s ⊆ {a}ᶜ ↔ a ∉ s :=
by { rw subset_compl_comm, simp }
theorem inter_subset (a b c : set α) : a ∩ b ⊆ c ↔ a ⊆ bᶜ ∪ c :=
begin
classical,
split,
{ intros h x xa, by_cases h' : x ∈ b, simp [h ⟨xa, h'⟩], simp [h'] },
intros h x, rintro ⟨xa, xb⟩, cases h xa, contradiction, assumption
end
/-! ### Lemmas about set difference -/
theorem diff_eq (s t : set α) : s \ t = s ∩ tᶜ := rfl
@[simp] theorem mem_diff {s t : set α} (x : α) : x ∈ s \ t ↔ x ∈ s ∧ x ∉ t := iff.rfl
theorem mem_diff_of_mem {s t : set α} {x : α} (h1 : x ∈ s) (h2 : x ∉ t) : x ∈ s \ t :=
⟨h1, h2⟩
theorem mem_of_mem_diff {s t : set α} {x : α} (h : x ∈ s \ t) : x ∈ s :=
h.left
theorem not_mem_of_mem_diff {s t : set α} {x : α} (h : x ∈ s \ t) : x ∉ t :=
h.right
theorem diff_eq_compl_inter {s t : set α} : s \ t = tᶜ ∩ s :=
by rw [diff_eq, inter_comm]
theorem nonempty_diff {s t : set α} : (s \ t).nonempty ↔ ¬ (s ⊆ t) :=
⟨λ ⟨x, xs, xt⟩, not_subset.2 ⟨x, xs, xt⟩,
λ h, let ⟨x, xs, xt⟩ := not_subset.1 h in ⟨x, xs, xt⟩⟩
theorem union_diff_cancel' {s t u : set α} (h₁ : s ⊆ t) (h₂ : t ⊆ u) : t ∪ (u \ s) = u :=
by finish [ext_iff, iff_def, subset_def]
theorem union_diff_cancel {s t : set α} (h : s ⊆ t) : s ∪ (t \ s) = t :=
union_diff_cancel' (subset.refl s) h
theorem union_diff_cancel_left {s t : set α} (h : s ∩ t ⊆ ∅) : (s ∪ t) \ s = t :=
by finish [ext_iff, iff_def, subset_def]
theorem union_diff_cancel_right {s t : set α} (h : s ∩ t ⊆ ∅) : (s ∪ t) \ t = s :=
by finish [ext_iff, iff_def, subset_def]
theorem union_diff_left {s t : set α} : (s ∪ t) \ s = t \ s :=
by finish [ext_iff, iff_def]
theorem union_diff_right {s t : set α} : (s ∪ t) \ t = s \ t :=
by finish [ext_iff, iff_def]
theorem union_diff_distrib {s t u : set α} : (s ∪ t) \ u = s \ u ∪ t \ u :=
inter_distrib_right _ _ _
theorem inter_union_distrib_left {s t u : set α} : s ∩ (t ∪ u) = (s ∩ t) ∪ (s ∩ u) :=
set.ext $ λ _, and_or_distrib_left
theorem inter_union_distrib_right {s t u : set α} : (s ∩ t) ∪ u = (s ∪ u) ∩ (t ∪ u) :=
set.ext $ λ _, and_or_distrib_right
theorem union_inter_distrib_left {s t u : set α} : s ∪ (t ∩ u) = (s ∪ t) ∩ (s ∪ u) :=
set.ext $ λ _, or_and_distrib_left
theorem union_inter_distrib_right {s t u : set α} : (s ∪ t) ∩ u = (s ∩ u) ∪ (t ∩ u) :=
set.ext $ λ _, or_and_distrib_right
theorem inter_diff_assoc (a b c : set α) : (a ∩ b) \ c = a ∩ (b \ c) :=
inter_assoc _ _ _
theorem inter_diff_self (a b : set α) : a ∩ (b \ a) = ∅ :=
by finish [ext_iff]
theorem inter_union_diff (s t : set α) : (s ∩ t) ∪ (s \ t) = s :=
by finish [ext_iff, iff_def]
theorem inter_union_compl (s t : set α) : (s ∩ t) ∪ (s ∩ tᶜ) = s := inter_union_diff _ _
theorem diff_subset (s t : set α) : s \ t ⊆ s :=
by finish [subset_def]
theorem diff_subset_diff {s₁ s₂ t₁ t₂ : set α} : s₁ ⊆ s₂ → t₂ ⊆ t₁ → s₁ \ t₁ ⊆ s₂ \ t₂ :=
by finish [subset_def]
theorem diff_subset_diff_left {s₁ s₂ t : set α} (h : s₁ ⊆ s₂) : s₁ \ t ⊆ s₂ \ t :=
diff_subset_diff h (by refl)
theorem diff_subset_diff_right {s t u : set α} (h : t ⊆ u) : s \ u ⊆ s \ t :=
diff_subset_diff (subset.refl s) h
theorem compl_eq_univ_diff (s : set α) : sᶜ = univ \ s :=
by finish [ext_iff]
@[simp] lemma empty_diff (s : set α) : (∅ \ s : set α) = ∅ :=
eq_empty_of_subset_empty $ assume x ⟨hx, _⟩, hx
theorem diff_eq_empty {s t : set α} : s \ t = ∅ ↔ s ⊆ t :=
⟨assume h x hx, classical.by_contradiction $ assume : x ∉ t, show x ∈ (∅ : set α), from h ▸ ⟨hx, this⟩,
assume h, eq_empty_of_subset_empty $ assume x ⟨hx, hnx⟩, hnx $ h hx⟩
@[simp] theorem diff_empty {s : set α} : s \ ∅ = s :=
ext $ assume x, ⟨assume ⟨hx, _⟩, hx, assume h, ⟨h, not_false⟩⟩
theorem diff_diff {u : set α} : s \ t \ u = s \ (t ∪ u) :=
ext $ by simp [not_or_distrib, and.comm, and.left_comm]
-- the following statement contains parentheses to help the reader
lemma diff_diff_comm {s t u : set α} : (s \ t) \ u = (s \ u) \ t :=
by simp_rw [diff_diff, union_comm]
lemma diff_subset_iff {s t u : set α} : s \ t ⊆ u ↔ s ⊆ t ∪ u :=
⟨assume h x xs, classical.by_cases or.inl (assume nxt, or.inr (h ⟨xs, nxt⟩)),
assume h x ⟨xs, nxt⟩, or.resolve_left (h xs) nxt⟩
lemma subset_diff_union (s t : set α) : s ⊆ (s \ t) ∪ t :=
by rw [union_comm, ←diff_subset_iff]
@[simp] lemma diff_singleton_subset_iff {x : α} {s t : set α} : s \ {x} ⊆ t ↔ s ⊆ insert x t :=
by { rw [←union_singleton, union_comm], apply diff_subset_iff }
lemma subset_diff_singleton {x : α} {s t : set α} (h : s ⊆ t) (hx : x ∉ s) : s ⊆ t \ {x} :=
subset_inter h $ subset_compl_comm.1 $ singleton_subset_iff.2 hx
lemma subset_insert_diff_singleton (x : α) (s : set α) : s ⊆ insert x (s \ {x}) :=
by rw [←diff_singleton_subset_iff]
lemma diff_subset_comm {s t u : set α} : s \ t ⊆ u ↔ s \ u ⊆ t :=
by rw [diff_subset_iff, diff_subset_iff, union_comm]
lemma diff_inter {s t u : set α} : s \ (t ∩ u) = (s \ t) ∪ (s \ u) :=
ext $ λ x, by simp [not_and_distrib, and_or_distrib_left]
lemma diff_inter_diff {s t u : set α} : s \ t ∩ (s \ u) = s \ (t ∪ u) :=
by { ext x, simp only [mem_inter_eq, mem_union_eq, mem_diff, not_or_distrib, and.left_comm,
and.assoc, and_self_left] }
lemma diff_compl : s \ tᶜ = s ∩ t := by rw [diff_eq, compl_compl]
lemma diff_diff_right {s t u : set α} : s \ (t \ u) = (s \ t) ∪ (s ∩ u) :=
by rw [diff_eq t u, diff_inter, diff_compl]
@[simp] theorem insert_diff_of_mem (s) (h : a ∈ t) : insert a s \ t = s \ t :=
by { ext, split; simp [or_imp_distrib, h] {contextual := tt} }
theorem insert_diff_of_not_mem (s) (h : a ∉ t) : insert a s \ t = insert a (s \ t) :=
begin
classical,
ext x,
by_cases h' : x ∈ t,
{ have : x ≠ a,
{ assume H,
rw H at h',
exact h h' },
simp [h, h', this] },
{ simp [h, h'] }
end
lemma insert_diff_self_of_not_mem {a : α} {s : set α} (h : a ∉ s) :
insert a s \ {a} = s :=
by { ext, simp [and_iff_left_of_imp (λ hx : x ∈ s, show x ≠ a, from λ hxa, h $ hxa ▸ hx)] }
theorem union_diff_self {s t : set α} : s ∪ (t \ s) = s ∪ t :=
by finish [ext_iff, iff_def]
theorem diff_union_self {s t : set α} : (s \ t) ∪ t = s ∪ t :=
by rw [union_comm, union_diff_self, union_comm]
theorem diff_inter_self {a b : set α} : (b \ a) ∩ a = ∅ :=
by { ext, by simp [iff_def] {contextual:=tt} }
theorem diff_inter_self_eq_diff {s t : set α} : s \ (t ∩ s) = s \ t :=
by { ext, simp [iff_def] {contextual := tt} }
theorem diff_self_inter {s t : set α} : s \ (s ∩ t) = s \ t :=
by rw [inter_comm, diff_inter_self_eq_diff]
theorem diff_eq_self {s t : set α} : s \ t = s ↔ t ∩ s ⊆ ∅ :=
by finish [ext_iff, iff_def, subset_def]
@[simp] theorem diff_singleton_eq_self {a : α} {s : set α} (h : a ∉ s) : s \ {a} = s :=
diff_eq_self.2 $ by simp [singleton_inter_eq_empty.2 h]
@[simp] theorem insert_diff_singleton {a : α} {s : set α} :
insert a (s \ {a}) = insert a s :=
by simp [insert_eq, union_diff_self, -union_singleton, -singleton_union]
@[simp] lemma diff_self {s : set α} : s \ s = ∅ := by { ext, simp }
lemma diff_diff_cancel_left {s t : set α} (h : s ⊆ t) : t \ (t \ s) = s :=
by simp only [diff_diff_right, diff_self, inter_eq_self_of_subset_right h, empty_union]
lemma mem_diff_singleton {x y : α} {s : set α} : x ∈ s \ {y} ↔ (x ∈ s ∧ x ≠ y) :=
iff.rfl
lemma mem_diff_singleton_empty {s : set α} {t : set (set α)} :
s ∈ t \ {∅} ↔ (s ∈ t ∧ s.nonempty) :=
mem_diff_singleton.trans $ and_congr iff.rfl ne_empty_iff_nonempty
/-! ### Powerset -/
theorem mem_powerset {x s : set α} (h : x ⊆ s) : x ∈ powerset s := h
theorem subset_of_mem_powerset {x s : set α} (h : x ∈ powerset s) : x ⊆ s := h
@[simp] theorem mem_powerset_iff (x s : set α) : x ∈ powerset s ↔ x ⊆ s := iff.rfl
theorem powerset_inter (s t : set α) : 𝒫 (s ∩ t) = 𝒫 s ∩ 𝒫 t :=
ext $ λ u, subset_inter_iff
@[simp] theorem powerset_mono : 𝒫 s ⊆ 𝒫 t ↔ s ⊆ t :=
⟨λ h, h (subset.refl s), λ h u hu, subset.trans hu h⟩
theorem monotone_powerset : monotone (powerset : set α → set (set α)) :=
λ s t, powerset_mono.2
@[simp] theorem powerset_nonempty : (𝒫 s).nonempty :=
⟨∅, empty_subset s⟩
@[simp] theorem powerset_empty : 𝒫 (∅ : set α) = {∅} :=
ext $ λ s, subset_empty_iff
/-! ### Inverse image -/
/-- The preimage of `s : set β` by `f : α → β`, written `f ⁻¹' s`,
is the set of `x : α` such that `f x ∈ s`. -/
def preimage {α : Type u} {β : Type v} (f : α → β) (s : set β) : set α := {x | f x ∈ s}
infix ` ⁻¹' `:80 := preimage
section preimage
variables {f : α → β} {g : β → γ}
@[simp] theorem preimage_empty : f ⁻¹' ∅ = ∅ := rfl
@[simp] theorem mem_preimage {s : set β} {a : α} : (a ∈ f ⁻¹' s) ↔ (f a ∈ s) := iff.rfl
lemma preimage_congr {f g : α → β} {s : set β} (h : ∀ (x : α), f x = g x) : f ⁻¹' s = g ⁻¹' s :=
by { congr' with x, apply_assumption }
theorem preimage_mono {s t : set β} (h : s ⊆ t) : f ⁻¹' s ⊆ f ⁻¹' t :=
assume x hx, h hx
@[simp] theorem preimage_univ : f ⁻¹' univ = univ := rfl
theorem subset_preimage_univ {s : set α} : s ⊆ f ⁻¹' univ := subset_univ _
@[simp] theorem preimage_inter {s t : set β} : f ⁻¹' (s ∩ t) = f ⁻¹' s ∩ f ⁻¹' t := rfl
@[simp] theorem preimage_union {s t : set β} : f ⁻¹' (s ∪ t) = f ⁻¹' s ∪ f ⁻¹' t := rfl
@[simp] theorem preimage_compl {s : set β} : f ⁻¹' sᶜ = (f ⁻¹' s)ᶜ := rfl
@[simp] theorem preimage_diff (f : α → β) (s t : set β) :
f ⁻¹' (s \ t) = f ⁻¹' s \ f ⁻¹' t := rfl
@[simp] theorem preimage_set_of_eq {p : α → Prop} {f : β → α} : f ⁻¹' {a | p a} = {a | p (f a)} :=
rfl
@[simp] theorem preimage_id {s : set α} : id ⁻¹' s = s := rfl
@[simp] theorem preimage_id' {s : set α} : (λ x, x) ⁻¹' s = s := rfl
theorem preimage_const_of_mem {b : β} {s : set β} (h : b ∈ s) :
(λ (x : α), b) ⁻¹' s = univ :=
eq_univ_of_forall $ λ x, h
theorem preimage_const_of_not_mem {b : β} {s : set β} (h : b ∉ s) :
(λ (x : α), b) ⁻¹' s = ∅ :=
eq_empty_of_subset_empty $ λ x hx, h hx
theorem preimage_const (b : β) (s : set β) [decidable (b ∈ s)] :
(λ (x : α), b) ⁻¹' s = if b ∈ s then univ else ∅ :=
by { split_ifs with hb hb, exacts [preimage_const_of_mem hb, preimage_const_of_not_mem hb] }
theorem preimage_comp {s : set γ} : (g ∘ f) ⁻¹' s = f ⁻¹' (g ⁻¹' s) := rfl
lemma preimage_preimage {g : β → γ} {f : α → β} {s : set γ} :
f ⁻¹' (g ⁻¹' s) = (λ x, g (f x)) ⁻¹' s :=
preimage_comp.symm
theorem eq_preimage_subtype_val_iff {p : α → Prop} {s : set (subtype p)} {t : set α} :
s = subtype.val ⁻¹' t ↔ (∀x (h : p x), (⟨x, h⟩ : subtype p) ∈ s ↔ x ∈ t) :=
⟨assume s_eq x h, by { rw [s_eq], simp },
assume h, ext $ λ ⟨x, hx⟩, by simp [h]⟩
lemma preimage_coe_coe_diagonal {α : Type*} (s : set α) :
(prod.map coe coe) ⁻¹' (diagonal α) = diagonal s :=
begin
ext ⟨⟨x, x_in⟩, ⟨y, y_in⟩⟩,
simp [set.diagonal],
end
end preimage
/-! ### Image of a set under a function -/
section image
infix ` '' `:80 := image
theorem mem_image_iff_bex {f : α → β} {s : set α} {y : β} :
y ∈ f '' s ↔ ∃ x (_ : x ∈ s), f x = y := bex_def.symm
theorem mem_image_eq (f : α → β) (s : set α) (y: β) : y ∈ f '' s = ∃ x, x ∈ s ∧ f x = y := rfl
@[simp] theorem mem_image (f : α → β) (s : set α) (y : β) :
y ∈ f '' s ↔ ∃ x, x ∈ s ∧ f x = y := iff.rfl
lemma image_eta (f : α → β) : f '' s = (λ x, f x) '' s := rfl
theorem mem_image_of_mem (f : α → β) {x : α} {a : set α} (h : x ∈ a) : f x ∈ f '' a :=
⟨_, h, rfl⟩
theorem mem_image_of_injective {f : α → β} {a : α} {s : set α} (hf : injective f) :
f a ∈ f '' s ↔ a ∈ s :=
iff.intro
(assume ⟨b, hb, eq⟩, (hf eq) ▸ hb)
(assume h, mem_image_of_mem _ h)
theorem ball_image_of_ball {f : α → β} {s : set α} {p : β → Prop}
(h : ∀ x ∈ s, p (f x)) : ∀ y ∈ f '' s, p y :=
by finish [mem_image_eq]
theorem ball_image_iff {f : α → β} {s : set α} {p : β → Prop} :
(∀ y ∈ f '' s, p y) ↔ (∀ x ∈ s, p (f x)) :=
iff.intro
(assume h a ha, h _ $ mem_image_of_mem _ ha)
(assume h b ⟨a, ha, eq⟩, eq ▸ h a ha)
theorem bex_image_iff {f : α → β} {s : set α} {p : β → Prop} :
(∃ y ∈ f '' s, p y) ↔ (∃ x ∈ s, p (f x)) :=
by simp
theorem mem_image_elim {f : α → β} {s : set α} {C : β → Prop} (h : ∀ (x : α), x ∈ s → C (f x)) :
∀{y : β}, y ∈ f '' s → C y
| ._ ⟨a, a_in, rfl⟩ := h a a_in
theorem mem_image_elim_on {f : α → β} {s : set α} {C : β → Prop} {y : β} (h_y : y ∈ f '' s)
(h : ∀ (x : α), x ∈ s → C (f x)) : C y :=
mem_image_elim h h_y
@[congr] lemma image_congr {f g : α → β} {s : set α}
(h : ∀a∈s, f a = g a) : f '' s = g '' s :=
by safe [ext_iff, iff_def]
/-- A common special case of `image_congr` -/
lemma image_congr' {f g : α → β} {s : set α} (h : ∀ (x : α), f x = g x) : f '' s = g '' s :=
image_congr (λx _, h x)
theorem image_comp (f : β → γ) (g : α → β) (a : set α) : (f ∘ g) '' a = f '' (g '' a) :=
subset.antisymm
(ball_image_of_ball $ assume a ha, mem_image_of_mem _ $ mem_image_of_mem _ ha)
(ball_image_of_ball $ ball_image_of_ball $ assume a ha, mem_image_of_mem _ ha)
/-- A variant of `image_comp`, useful for rewriting -/
lemma image_image (g : β → γ) (f : α → β) (s : set α) : g '' (f '' s) = (λ x, g (f x)) '' s :=
(image_comp g f s).symm
/-- Image is monotone with respect to `⊆`. See `set.monotone_image` for the statement in
terms of `≤`. -/
theorem image_subset {a b : set α} (f : α → β) (h : a ⊆ b) : f '' a ⊆ f '' b :=
by finish [subset_def, mem_image_eq]
theorem image_union (f : α → β) (s t : set α) :
f '' (s ∪ t) = f '' s ∪ f '' t :=
by finish [ext_iff, iff_def, mem_image_eq]
@[simp] theorem image_empty (f : α → β) : f '' ∅ = ∅ := by { ext, simp }
lemma image_inter_subset (f : α → β) (s t : set α) :
f '' (s ∩ t) ⊆ f '' s ∩ f '' t :=
subset_inter (image_subset _ $ inter_subset_left _ _) (image_subset _ $ inter_subset_right _ _)
theorem image_inter_on {f : α → β} {s t : set α} (h : ∀x∈t, ∀y∈s, f x = f y → x = y) :
f '' s ∩ f '' t = f '' (s ∩ t) :=
subset.antisymm
(assume b ⟨⟨a₁, ha₁, h₁⟩, ⟨a₂, ha₂, h₂⟩⟩,
have a₂ = a₁, from h _ ha₂ _ ha₁ (by simp *),
⟨a₁, ⟨ha₁, this ▸ ha₂⟩, h₁⟩)
(image_inter_subset _ _ _)
theorem image_inter {f : α → β} {s t : set α} (H : injective f) :
f '' s ∩ f '' t = f '' (s ∩ t) :=
image_inter_on (assume x _ y _ h, H h)
theorem image_univ_of_surjective {ι : Type*} {f : ι → β} (H : surjective f) : f '' univ = univ :=
eq_univ_of_forall $ by { simpa [image] }
@[simp] theorem image_singleton {f : α → β} {a : α} : f '' {a} = {f a} :=
by { ext, simp [image, eq_comm] }
theorem nonempty.image_const {s : set α} (hs : s.nonempty) (a : β) : (λ _, a) '' s = {a} :=
ext $ λ x, ⟨λ ⟨y, _, h⟩, h ▸ mem_singleton _,
λ h, (eq_of_mem_singleton h).symm ▸ hs.imp (λ y hy, ⟨hy, rfl⟩)⟩
@[simp] lemma image_eq_empty {α β} {f : α → β} {s : set α} : f '' s = ∅ ↔ s = ∅ :=
by { simp only [eq_empty_iff_forall_not_mem],
exact ⟨λ H a ha, H _ ⟨_, ha, rfl⟩, λ H b ⟨_, ha, _⟩, H _ ha⟩ }
lemma inter_singleton_nonempty {s : set α} {a : α} : (s ∩ {a}).nonempty ↔ a ∈ s :=
by finish [set.nonempty]
-- TODO(Jeremy): there is an issue with - t unfolding to compl t
theorem mem_compl_image (t : set α) (S : set (set α)) :
t ∈ compl '' S ↔ tᶜ ∈ S :=
begin
suffices : ∀ x, xᶜ = t ↔ tᶜ = x, { simp [this] },
intro x, split; { intro e, subst e, simp }
end
/-- A variant of `image_id` -/
@[simp] lemma image_id' (s : set α) : (λx, x) '' s = s := by { ext, simp }
theorem image_id (s : set α) : id '' s = s := by simp
theorem compl_compl_image (S : set (set α)) :
compl '' (compl '' S) = S :=
by rw [← image_comp, compl_comp_compl, image_id]
theorem image_insert_eq {f : α → β} {a : α} {s : set α} :
f '' (insert a s) = insert (f a) (f '' s) :=
by { ext, simp [and_or_distrib_left, exists_or_distrib, eq_comm, or_comm, and_comm] }
theorem image_pair (f : α → β) (a b : α) : f '' {a, b} = {f a, f b} :=
by simp only [image_insert_eq, image_singleton]
theorem image_subset_preimage_of_inverse {f : α → β} {g : β → α}
(I : left_inverse g f) (s : set α) : f '' s ⊆ g ⁻¹' s :=
λ b ⟨a, h, e⟩, e ▸ ((I a).symm ▸ h : g (f a) ∈ s)
theorem preimage_subset_image_of_inverse {f : α → β} {g : β → α}
(I : left_inverse g f) (s : set β) : f ⁻¹' s ⊆ g '' s :=
λ b h, ⟨f b, h, I b⟩
theorem image_eq_preimage_of_inverse {f : α → β} {g : β → α}
(h₁ : left_inverse g f) (h₂ : right_inverse g f) :
image f = preimage g :=
funext $ λ s, subset.antisymm
(image_subset_preimage_of_inverse h₁ s)
(preimage_subset_image_of_inverse h₂ s)
theorem mem_image_iff_of_inverse {f : α → β} {g : β → α} {b : β} {s : set α}
(h₁ : left_inverse g f) (h₂ : right_inverse g f) :
b ∈ f '' s ↔ g b ∈ s :=
by rw image_eq_preimage_of_inverse h₁ h₂; refl
theorem image_compl_subset {f : α → β} {s : set α} (H : injective f) : f '' sᶜ ⊆ (f '' s)ᶜ :=
subset_compl_iff_disjoint.2 $ by simp [image_inter H]
theorem subset_image_compl {f : α → β} {s : set α} (H : surjective f) : (f '' s)ᶜ ⊆ f '' sᶜ :=
compl_subset_iff_union.2 $
by { rw ← image_union, simp [image_univ_of_surjective H] }
theorem image_compl_eq {f : α → β} {s : set α} (H : bijective f) : f '' sᶜ = (f '' s)ᶜ :=
subset.antisymm (image_compl_subset H.1) (subset_image_compl H.2)
theorem subset_image_diff (f : α → β) (s t : set α) :
f '' s \ f '' t ⊆ f '' (s \ t) :=
begin
rw [diff_subset_iff, ← image_union, union_diff_self],
exact image_subset f (subset_union_right t s)
end
theorem image_diff {f : α → β} (hf : injective f) (s t : set α) :
f '' (s \ t) = f '' s \ f '' t :=
subset.antisymm
(subset.trans (image_inter_subset _ _ _) $ inter_subset_inter_right _ $ image_compl_subset hf)
(subset_image_diff f s t)
lemma nonempty.image (f : α → β) {s : set α} : s.nonempty → (f '' s).nonempty
| ⟨x, hx⟩ := ⟨f x, mem_image_of_mem f hx⟩
lemma nonempty.of_image {f : α → β} {s : set α} : (f '' s).nonempty → s.nonempty
| ⟨y, x, hx, _⟩ := ⟨x, hx⟩
@[simp] lemma nonempty_image_iff {f : α → β} {s : set α} :
(f '' s).nonempty ↔ s.nonempty :=
⟨nonempty.of_image, λ h, h.image f⟩
/-- image and preimage are a Galois connection -/
@[simp] theorem image_subset_iff {s : set α} {t : set β} {f : α → β} :
f '' s ⊆ t ↔ s ⊆ f ⁻¹' t :=
ball_image_iff
theorem image_preimage_subset (f : α → β) (s : set β) :
f '' (f ⁻¹' s) ⊆ s :=
image_subset_iff.2 (subset.refl _)
theorem subset_preimage_image (f : α → β) (s : set α) :
s ⊆ f ⁻¹' (f '' s) :=
λ x, mem_image_of_mem f
theorem preimage_image_eq {f : α → β} (s : set α) (h : injective f) : f ⁻¹' (f '' s) = s :=
subset.antisymm
(λ x ⟨y, hy, e⟩, h e ▸ hy)
(subset_preimage_image f s)
theorem image_preimage_eq {f : α → β} {s : set β} (h : surjective f) : f '' (f ⁻¹' s) = s :=
subset.antisymm
(image_preimage_subset f s)
(λ x hx, let ⟨y, e⟩ := h x in ⟨y, (e.symm ▸ hx : f y ∈ s), e⟩)
lemma preimage_eq_preimage {f : β → α} (hf : surjective f) : f ⁻¹' s = preimage f t ↔ s = t :=
iff.intro
(assume eq, by rw [← @image_preimage_eq β α f s hf, ← @image_preimage_eq β α f t hf, eq])
(assume eq, eq ▸ rfl)
lemma image_inter_preimage (f : α → β) (s : set α) (t : set β) :
f '' (s ∩ f ⁻¹' t) = f '' s ∩ t :=
begin
apply subset.antisymm,
{ calc f '' (s ∩ f ⁻¹' t) ⊆ f '' s ∩ (f '' (f⁻¹' t)) : image_inter_subset _ _ _
... ⊆ f '' s ∩ t : inter_subset_inter_right _ (image_preimage_subset f t) },
{ rintros _ ⟨⟨x, h', rfl⟩, h⟩,
exact ⟨x, ⟨h', h⟩, rfl⟩ }
end
lemma image_preimage_inter (f : α → β) (s : set α) (t : set β) :
f '' (f ⁻¹' t ∩ s) = t ∩ f '' s :=
by simp only [inter_comm, image_inter_preimage]
lemma image_diff_preimage {f : α → β} {s : set α} {t : set β} : f '' (s \ f ⁻¹' t) = f '' s \ t :=
by simp_rw [diff_eq, ← preimage_compl, image_inter_preimage]
theorem compl_image : image (compl : set α → set α) = preimage compl :=
image_eq_preimage_of_inverse compl_compl compl_compl
theorem compl_image_set_of {p : set α → Prop} :
compl '' {s | p s} = {s | p sᶜ} :=
congr_fun compl_image p
theorem inter_preimage_subset (s : set α) (t : set β) (f : α → β) :
s ∩ f ⁻¹' t ⊆ f ⁻¹' (f '' s ∩ t) :=
λ x h, ⟨mem_image_of_mem _ h.left, h.right⟩
theorem union_preimage_subset (s : set α) (t : set β) (f : α → β) :
s ∪ f ⁻¹' t ⊆ f ⁻¹' (f '' s ∪ t) :=
λ x h, or.elim h (λ l, or.inl $ mem_image_of_mem _ l) (λ r, or.inr r)
theorem subset_image_union (f : α → β) (s : set α) (t : set β) :
f '' (s ∪ f ⁻¹' t) ⊆ f '' s ∪ t :=
image_subset_iff.2 (union_preimage_subset _ _ _)
lemma preimage_subset_iff {A : set α} {B : set β} {f : α → β} :
f⁻¹' B ⊆ A ↔ (∀ a : α, f a ∈ B → a ∈ A) := iff.rfl
lemma image_eq_image {f : α → β} (hf : injective f) : f '' s = f '' t ↔ s = t :=
iff.symm $ iff.intro (assume eq, eq ▸ rfl) $ assume eq,
by rw [← preimage_image_eq s hf, ← preimage_image_eq t hf, eq]
lemma image_subset_image_iff {f : α → β} (hf : injective f) : f '' s ⊆ f '' t ↔ s ⊆ t :=
begin
refine (iff.symm $ iff.intro (image_subset f) $ assume h, _),
rw [← preimage_image_eq s hf, ← preimage_image_eq t hf],
exact preimage_mono h
end
lemma prod_quotient_preimage_eq_image [s : setoid α] (g : quotient s → β) {h : α → β}
(Hh : h = g ∘ quotient.mk) (r : set (β × β)) :
{x : quotient s × quotient s | (g x.1, g x.2) ∈ r} =
(λ a : α × α, (⟦a.1⟧, ⟦a.2⟧)) '' ((λ a : α × α, (h a.1, h a.2)) ⁻¹' r) :=
Hh.symm ▸ set.ext (λ ⟨a₁, a₂⟩, ⟨quotient.induction_on₂ a₁ a₂
(λ a₁ a₂ h, ⟨(a₁, a₂), h, rfl⟩),
λ ⟨⟨b₁, b₂⟩, h₁, h₂⟩, show (g a₁, g a₂) ∈ r, from
have h₃ : ⟦b₁⟧ = a₁ ∧ ⟦b₂⟧ = a₂ := prod.ext_iff.1 h₂,
h₃.1 ▸ h₃.2 ▸ h₁⟩)
/-- Restriction of `f` to `s` factors through `s.image_factorization f : s → f '' s`. -/
def image_factorization (f : α → β) (s : set α) : s → f '' s :=
λ p, ⟨f p.1, mem_image_of_mem f p.2⟩
lemma image_factorization_eq {f : α → β} {s : set α} :
subtype.val ∘ image_factorization f s = f ∘ subtype.val :=
funext $ λ p, rfl
lemma surjective_onto_image {f : α → β} {s : set α} :
surjective (image_factorization f s) :=
λ ⟨_, ⟨a, ha, rfl⟩⟩, ⟨⟨a, ha⟩, rfl⟩
end image
/-! ### Subsingleton -/
/-- A set `s` is a `subsingleton`, if it has at most one element. -/
protected def subsingleton (s : set α) : Prop :=
∀ ⦃x⦄ (hx : x ∈ s) ⦃y⦄ (hy : y ∈ s), x = y
lemma subsingleton.mono (ht : t.subsingleton) (hst : s ⊆ t) : s.subsingleton :=
λ x hx y hy, ht (hst hx) (hst hy)
lemma subsingleton.image (hs : s.subsingleton) (f : α → β) : (f '' s).subsingleton :=
λ _ ⟨x, hx, Hx⟩ _ ⟨y, hy, Hy⟩, Hx ▸ Hy ▸ congr_arg f (hs hx hy)
lemma subsingleton.eq_singleton_of_mem (hs : s.subsingleton) {x:α} (hx : x ∈ s) :
s = {x} :=
ext $ λ y, ⟨λ hy, (hs hx hy) ▸ mem_singleton _, λ hy, (eq_of_mem_singleton hy).symm ▸ hx⟩
lemma subsingleton_empty : (∅ : set α).subsingleton := λ x, false.elim
lemma subsingleton_singleton {a} : ({a} : set α).subsingleton :=
λ x hx y hy, (eq_of_mem_singleton hx).symm ▸ (eq_of_mem_singleton hy).symm ▸ rfl
lemma subsingleton.eq_empty_or_singleton (hs : s.subsingleton) :
s = ∅ ∨ ∃ x, s = {x} :=
s.eq_empty_or_nonempty.elim or.inl (λ ⟨x, hx⟩, or.inr ⟨x, hs.eq_singleton_of_mem hx⟩)
lemma subsingleton_univ [subsingleton α] : (univ : set α).subsingleton :=
λ x hx y hy, subsingleton.elim x y
/-- `s`, coerced to a type, is a subsingleton type if and only if `s`
is a subsingleton set. -/
@[simp, norm_cast] lemma subsingleton_coe (s : set α) : subsingleton s ↔ s.subsingleton :=
begin
split,
{ refine λ h, (λ a ha b hb, _),
exact set_coe.ext_iff.2 (@subsingleton.elim s h ⟨a, ha⟩ ⟨b, hb⟩) },
{ exact λ h, subsingleton.intro (λ a b, set_coe.ext (h a.property b.property)) }
end
theorem univ_eq_true_false : univ = ({true, false} : set Prop) :=
eq.symm $ eq_univ_of_forall $ classical.cases (by simp) (by simp)
/-! ### Lemmas about range of a function. -/
section range
variables {f : ι → α}
open function
/-- Range of a function.
This function is more flexible than `f '' univ`, as the image requires that the domain is in Type
and not an arbitrary Sort. -/
def range (f : ι → α) : set α := {x | ∃y, f y = x}
@[simp] theorem mem_range {x : α} : x ∈ range f ↔ ∃ y, f y = x := iff.rfl
@[simp] theorem mem_range_self (i : ι) : f i ∈ range f := ⟨i, rfl⟩
theorem forall_range_iff {p : α → Prop} : (∀ a ∈ range f, p a) ↔ (∀ i, p (f i)) :=
by simp
theorem exists_range_iff {p : α → Prop} : (∃ a ∈ range f, p a) ↔ (∃ i, p (f i)) :=
by simp
lemma exists_range_iff' {p : α → Prop} :
(∃ a, a ∈ range f ∧ p a) ↔ ∃ i, p (f i) :=
by simpa only [exists_prop] using exists_range_iff
theorem range_iff_surjective : range f = univ ↔ surjective f :=
eq_univ_iff_forall
alias range_iff_surjective ↔ _ function.surjective.range_eq
@[simp] theorem range_id : range (@id α) = univ := range_iff_surjective.2 surjective_id
theorem is_compl_range_inl_range_inr : is_compl (range $ @sum.inl α β) (range sum.inr) :=
⟨by { rintro y ⟨⟨x₁, rfl⟩, ⟨x₂, _⟩⟩, cc },
by { rintro (x|y) -; [left, right]; exact mem_range_self _ }⟩
theorem range_inl_union_range_inr : range (@sum.inl α β) ∪ range sum.inr = univ :=
is_compl_range_inl_range_inr.sup_eq_top
@[simp] theorem range_quot_mk (r : α → α → Prop) : range (quot.mk r) = univ :=
(surjective_quot_mk r).range_eq
@[simp] theorem image_univ {ι : Type*} {f : ι → β} : f '' univ = range f :=
by { ext, simp [image, range] }
theorem image_subset_range {ι : Type*} (f : ι → β) (s : set ι) : f '' s ⊆ range f :=
by rw ← image_univ; exact image_subset _ (subset_univ _)
theorem range_comp (g : α → β) (f : ι → α) : range (g ∘ f) = g '' range f :=
subset.antisymm
(forall_range_iff.mpr $ assume i, mem_image_of_mem g (mem_range_self _))
(ball_image_iff.mpr $ forall_range_iff.mpr mem_range_self)
theorem range_subset_iff {s : set α} : range f ⊆ s ↔ ∀ y, f y ∈ s :=
forall_range_iff
lemma range_comp_subset_range (f : α → β) (g : β → γ) : range (g ∘ f) ⊆ range g :=
by rw range_comp; apply image_subset_range
lemma range_nonempty_iff_nonempty : (range f).nonempty ↔ nonempty ι :=
⟨λ ⟨y, x, hxy⟩, ⟨x⟩, λ ⟨x⟩, ⟨f x, mem_range_self x⟩⟩
lemma range_nonempty [h : nonempty ι] (f : ι → α) : (range f).nonempty :=
range_nonempty_iff_nonempty.2 h
@[simp] lemma range_eq_empty {f : ι → α} : range f = ∅ ↔ ¬ nonempty ι :=
not_nonempty_iff_eq_empty.symm.trans $ not_congr range_nonempty_iff_nonempty
@[simp] lemma image_union_image_compl_eq_range (f : α → β) :
(f '' s) ∪ (f '' sᶜ) = range f :=
by rw [← image_union, ← image_univ, ← union_compl_self]
theorem image_preimage_eq_inter_range {f : α → β} {t : set β} :
f '' (f ⁻¹' t) = t ∩ range f :=
ext $ assume x, ⟨assume ⟨x, hx, heq⟩, heq ▸ ⟨hx, mem_range_self _⟩,
assume ⟨hx, ⟨y, h_eq⟩⟩, h_eq ▸ mem_image_of_mem f $
show y ∈ f ⁻¹' t, by simp [preimage, h_eq, hx]⟩
lemma image_preimage_eq_of_subset {f : α → β} {s : set β} (hs : s ⊆ range f) :
f '' (f ⁻¹' s) = s :=
by rw [image_preimage_eq_inter_range, inter_eq_self_of_subset_left hs]
lemma image_preimage_eq_iff {f : α → β} {s : set β} : f '' (f ⁻¹' s) = s ↔ s ⊆ range f :=
⟨by { intro h, rw [← h], apply image_subset_range }, image_preimage_eq_of_subset⟩
lemma preimage_subset_preimage_iff {s t : set α} {f : β → α} (hs : s ⊆ range f) :
f ⁻¹' s ⊆ f ⁻¹' t ↔ s ⊆ t :=
begin
split,
{ intros h x hx, rcases hs hx with ⟨y, rfl⟩, exact h hx },
intros h x, apply h
end
lemma preimage_eq_preimage' {s t : set α} {f : β → α} (hs : s ⊆ range f) (ht : t ⊆ range f) :
f ⁻¹' s = f ⁻¹' t ↔ s = t :=
begin
split,
{ intro h, apply subset.antisymm, rw [←preimage_subset_preimage_iff hs, h],
rw [←preimage_subset_preimage_iff ht, h] },
rintro rfl, refl
end
@[simp] theorem preimage_inter_range {f : α → β} {s : set β} : f ⁻¹' (s ∩ range f) = f ⁻¹' s :=
set.ext $ λ x, and_iff_left ⟨x, rfl⟩
@[simp] theorem preimage_range_inter {f : α → β} {s : set β} : f ⁻¹' (range f ∩ s) = f ⁻¹' s :=
by rw [inter_comm, preimage_inter_range]
theorem preimage_image_preimage {f : α → β} {s : set β} :
f ⁻¹' (f '' (f ⁻¹' s)) = f ⁻¹' s :=
by rw [image_preimage_eq_inter_range, preimage_inter_range]
@[simp] theorem quot_mk_range_eq [setoid α] : range (λx : α, ⟦x⟧) = univ :=
range_iff_surjective.2 quot.exists_rep
lemma range_const_subset {c : α} : range (λx:ι, c) ⊆ {c} :=
range_subset_iff.2 $ λ x, rfl
@[simp] lemma range_const : ∀ [nonempty ι] {c : α}, range (λx:ι, c) = {c}
| ⟨x⟩ c := subset.antisymm range_const_subset $
assume y hy, (mem_singleton_iff.1 hy).symm ▸ mem_range_self x
lemma diagonal_eq_range {α : Type*} : diagonal α = range (λ x, (x, x)) :=
by { ext ⟨x, y⟩, simp [diagonal, eq_comm] }
theorem preimage_singleton_nonempty {f : α → β} {y : β} :
(f ⁻¹' {y}).nonempty ↔ y ∈ range f :=
iff.rfl
theorem preimage_singleton_eq_empty {f : α → β} {y : β} :
f ⁻¹' {y} = ∅ ↔ y ∉ range f :=
not_nonempty_iff_eq_empty.symm.trans $ not_congr preimage_singleton_nonempty
lemma range_subset_singleton {f : ι → α} {x : α} : range f ⊆ {x} ↔ f = const ι x :=
by simp [range_subset_iff, funext_iff, mem_singleton]
lemma image_compl_preimage {f : α → β} {s : set β} : f '' ((f ⁻¹' s)ᶜ) = range f \ s :=
by rw [compl_eq_univ_diff, image_diff_preimage, image_univ]
@[simp] theorem range_sigma_mk {β : α → Type*} (a : α) :
range (sigma.mk a : β a → Σ a, β a) = sigma.fst ⁻¹' {a} :=
begin
apply subset.antisymm,
{ rintros _ ⟨b, rfl⟩, simp },
{ rintros ⟨x, y⟩ (rfl|_),
exact mem_range_self y }
end
/-- Any map `f : ι → β` factors through a map `range_factorization f : ι → range f`. -/
def range_factorization (f : ι → β) : ι → range f :=
λ i, ⟨f i, mem_range_self i⟩
lemma range_factorization_eq {f : ι → β} :
subtype.val ∘ range_factorization f = f :=
funext $ λ i, rfl
lemma surjective_onto_range : surjective (range_factorization f) :=
λ ⟨_, ⟨i, rfl⟩⟩, ⟨i, rfl⟩
lemma image_eq_range (f : α → β) (s : set α) : f '' s = range (λ(x : s), f x) :=
by { ext, split, rintro ⟨x, h1, h2⟩, exact ⟨⟨x, h1⟩, h2⟩, rintro ⟨⟨x, h1⟩, h2⟩, exact ⟨x, h1, h2⟩ }
@[simp] lemma sum.elim_range {α β γ : Type*} (f : α → γ) (g : β → γ) :
range (sum.elim f g) = range f ∪ range g :=
by simp [set.ext_iff, mem_range]
lemma range_ite_subset' {p : Prop} [decidable p] {f g : α → β} :
range (if p then f else g) ⊆ range f ∪ range g :=
begin
by_cases h : p, {rw if_pos h, exact subset_union_left _ _},
{rw if_neg h, exact subset_union_right _ _}
end
lemma range_ite_subset {p : α → Prop} [decidable_pred p] {f g : α → β} :
range (λ x, if p x then f x else g x) ⊆ range f ∪ range g :=
begin
rw range_subset_iff, intro x, by_cases h : p x,
simp [if_pos h, mem_union, mem_range_self],
simp [if_neg h, mem_union, mem_range_self]
end
@[simp] lemma preimage_range (f : α → β) : f ⁻¹' (range f) = univ :=
eq_univ_of_forall mem_range_self
/-- The range of a function from a `unique` type contains just the
function applied to its single value. -/
lemma range_unique [h : unique ι] : range f = {f $ default ι} :=
begin
ext x,
rw mem_range,
split,
{ rintros ⟨i, hi⟩,
rw h.uniq i at hi,
exact hi ▸ mem_singleton _ },
{ exact λ h, ⟨default ι, h.symm⟩ }
end
end range
/-- The set `s` is pairwise `r` if `r x y` for all *distinct* `x y ∈ s`. -/
def pairwise_on (s : set α) (r : α → α → Prop) := ∀ x ∈ s, ∀ y ∈ s, x ≠ y → r x y
theorem pairwise_on.mono {s t : set α} {r}
(h : t ⊆ s) (hp : pairwise_on s r) : pairwise_on t r :=
λ x xt y yt, hp x (h xt) y (h yt)
theorem pairwise_on.mono' {s : set α} {r r' : α → α → Prop}
(H : ∀ a b, r a b → r' a b) (hp : pairwise_on s r) : pairwise_on s r' :=
λ x xs y ys h, H _ _ (hp x xs y ys h)
/-- If and only if `f` takes pairwise equal values on `s`, there is
some value it takes everywhere on `s`. -/
lemma pairwise_on_eq_iff_exists_eq [nonempty β] (s : set α) (f : α → β) :
(pairwise_on s (λ x y, f x = f y)) ↔ ∃ z, ∀ x ∈ s, f x = z :=
begin
split,
{ intro h,
rcases eq_empty_or_nonempty s with rfl | ⟨x, hx⟩,
{ exact ⟨classical.arbitrary β, λ x hx, false.elim hx⟩ },
{ use f x,
intros y hy,
by_cases hyx : y = x,
{ rw hyx },
{ exact h y hy x hx hyx } } },
{ rintros ⟨z, hz⟩ x hx y hy hne,
rw [hz x hx, hz y hy] }
end
end set
open set
namespace function
variables {ι : Sort*} {α : Type*} {β : Type*} {f : α → β}
lemma surjective.preimage_injective (hf : surjective f) : injective (preimage f) :=
assume s t, (preimage_eq_preimage hf).1
lemma injective.preimage_surjective (hf : injective f) : surjective (preimage f) :=
by { intro s, use f '' s, rw preimage_image_eq _ hf }
lemma surjective.image_surjective (hf : surjective f) : surjective (image f) :=
by { intro s, use f ⁻¹' s, rw image_preimage_eq hf }
lemma injective.image_injective (hf : injective f) : injective (image f) :=
by { intros s t h, rw [←preimage_image_eq s hf, ←preimage_image_eq t hf, h] }
lemma surjective.preimage_subset_preimage_iff {s t : set β} (hf : surjective f) :
f ⁻¹' s ⊆ f ⁻¹' t ↔ s ⊆ t :=
by { apply preimage_subset_preimage_iff, rw [hf.range_eq], apply subset_univ }
lemma surjective.range_comp {ι' : Sort*} {f : ι → ι'} (hf : surjective f) (g : ι' → α) :
range (g ∘ f) = range g :=
ext $ λ y, (@surjective.exists _ _ _ hf (λ x, g x = y)).symm
lemma injective.nonempty_apply_iff {f : set α → set β} (hf : injective f)
(h2 : f ∅ = ∅) {s : set α} : (f s).nonempty ↔ s.nonempty :=
by rw [← ne_empty_iff_nonempty, ← h2, ← ne_empty_iff_nonempty, hf.ne_iff]
end function
open function
/-! ### Image and preimage on subtypes -/
namespace subtype
variable {α : Type*}
lemma coe_image {p : α → Prop} {s : set (subtype p)} :
coe '' s = {x | ∃h : p x, (⟨x, h⟩ : subtype p) ∈ s} :=
set.ext $ assume a,
⟨assume ⟨⟨a', ha'⟩, in_s, h_eq⟩, h_eq ▸ ⟨ha', in_s⟩,
assume ⟨ha, in_s⟩, ⟨⟨a, ha⟩, in_s, rfl⟩⟩
lemma range_coe {s : set α} :
range (coe : s → α) = s :=
by { rw ← set.image_univ, simp [-set.image_univ, coe_image] }
/-- A variant of `range_coe`. Try to use `range_coe` if possible.
This version is useful when defining a new type that is defined as the subtype of something.
In that case, the coercion doesn't fire anymore. -/
lemma range_val {s : set α} :
range (subtype.val : s → α) = s :=
range_coe
/-- We make this the simp lemma instead of `range_coe`. The reason is that if we write
for `s : set α` the function `coe : s → α`, then the inferred implicit arguments of `coe` are
`coe α (λ x, x ∈ s)`. -/
@[simp] lemma range_coe_subtype {p : α → Prop} :
range (coe : subtype p → α) = {x | p x} :=
range_coe
@[simp] lemma coe_preimage_self (s : set α) : (coe : s → α) ⁻¹' s = univ :=
by rw [← preimage_range (coe : s → α), range_coe]
lemma range_val_subtype {p : α → Prop} :
range (subtype.val : subtype p → α) = {x | p x} :=
range_coe
theorem coe_image_subset (s : set α) (t : set s) : coe '' t ⊆ s :=
λ x ⟨y, yt, yvaleq⟩, by rw ←yvaleq; exact y.property
theorem coe_image_univ (s : set α) : (coe : s → α) '' set.univ = s :=
image_univ.trans range_coe
@[simp] theorem image_preimage_coe (s t : set α) :
(coe : s → α) '' (coe ⁻¹' t) = t ∩ s :=
image_preimage_eq_inter_range.trans $ congr_arg _ range_coe
theorem image_preimage_val (s t : set α) :
(subtype.val : s → α) '' (subtype.val ⁻¹' t) = t ∩ s :=
image_preimage_coe s t
theorem preimage_coe_eq_preimage_coe_iff {s t u : set α} :
((coe : s → α) ⁻¹' t = coe ⁻¹' u) ↔ t ∩ s = u ∩ s :=
begin
rw [←image_preimage_coe, ←image_preimage_coe],
split, { intro h, rw h },
intro h, exact coe_injective.image_injective h
end
theorem preimage_val_eq_preimage_val_iff (s t u : set α) :
((subtype.val : s → α) ⁻¹' t = subtype.val ⁻¹' u) ↔ (t ∩ s = u ∩ s) :=
preimage_coe_eq_preimage_coe_iff
lemma exists_set_subtype {t : set α} (p : set α → Prop) :
(∃(s : set t), p (coe '' s)) ↔ ∃(s : set α), s ⊆ t ∧ p s :=
begin
split,
{ rintro ⟨s, hs⟩, refine ⟨coe '' s, _, hs⟩,
convert image_subset_range _ _, rw [range_coe] },
rintro ⟨s, hs₁, hs₂⟩, refine ⟨coe ⁻¹' s, _⟩,
rw [image_preimage_eq_of_subset], exact hs₂, rw [range_coe], exact hs₁
end
lemma preimage_coe_nonempty {s t : set α} : ((coe : s → α) ⁻¹' t).nonempty ↔ (s ∩ t).nonempty :=
by rw [inter_comm, ← image_preimage_coe, nonempty_image_iff]
end subtype
namespace set
/-! ### Lemmas about cartesian product of sets -/
section prod
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
variables {s s₁ s₂ : set α} {t t₁ t₂ : set β}
/-- The cartesian product `prod s t` is the set of `(a, b)`
such that `a ∈ s` and `b ∈ t`. -/
protected def prod (s : set α) (t : set β) : set (α × β) :=
{p | p.1 ∈ s ∧ p.2 ∈ t}
lemma prod_eq (s : set α) (t : set β) : s.prod t = prod.fst ⁻¹' s ∩ prod.snd ⁻¹' t := rfl
theorem mem_prod_eq {p : α × β} : p ∈ s.prod t = (p.1 ∈ s ∧ p.2 ∈ t) := rfl
@[simp] theorem mem_prod {p : α × β} : p ∈ s.prod t ↔ p.1 ∈ s ∧ p.2 ∈ t := iff.rfl
@[simp] theorem prod_mk_mem_set_prod_eq {a : α} {b : β} :
(a, b) ∈ s.prod t = (a ∈ s ∧ b ∈ t) := rfl
lemma mk_mem_prod {a : α} {b : β} (a_in : a ∈ s) (b_in : b ∈ t) : (a, b) ∈ s.prod t :=
⟨a_in, b_in⟩
theorem prod_mono {s₁ s₂ : set α} {t₁ t₂ : set β} (hs : s₁ ⊆ s₂) (ht : t₁ ⊆ t₂) :
s₁.prod t₁ ⊆ s₂.prod t₂ :=
assume x ⟨h₁, h₂⟩, ⟨hs h₁, ht h₂⟩
lemma prod_subset_iff {P : set (α × β)} :
(s.prod t ⊆ P) ↔ ∀ (x ∈ s) (y ∈ t), (x, y) ∈ P :=
⟨λ h _ xin _ yin, h (mk_mem_prod xin yin), λ h ⟨_, _⟩ pin, h _ pin.1 _ pin.2⟩
lemma forall_prod_set {p : α × β → Prop} :
(∀ x ∈ s.prod t, p x) ↔ ∀ (x ∈ s) (y ∈ t), p (x, y) :=
prod_subset_iff
@[simp] theorem prod_empty : s.prod ∅ = (∅ : set (α × β)) :=
by { ext, simp }
@[simp] theorem empty_prod : set.prod ∅ t = (∅ : set (α × β)) :=
by { ext, simp }
@[simp] theorem univ_prod_univ : (@univ α).prod (@univ β) = univ :=
by { ext ⟨x, y⟩, simp }
lemma univ_prod {t : set β} : set.prod (univ : set α) t = prod.snd ⁻¹' t :=
by simp [prod_eq]
lemma prod_univ {s : set α} : set.prod s (univ : set β) = prod.fst ⁻¹' s :=
by simp [prod_eq]
@[simp] theorem singleton_prod {a : α} : set.prod {a} t = prod.mk a '' t :=
by { ext ⟨x, y⟩, simp [and.left_comm, eq_comm] }
@[simp] theorem prod_singleton {b : β} : s.prod {b} = (λ a, (a, b)) '' s :=
by { ext ⟨x, y⟩, simp [and.left_comm, eq_comm] }
theorem singleton_prod_singleton {a : α} {b : β} : set.prod {a} {b} = ({(a, b)} : set (α × β)) :=
by simp
@[simp] theorem union_prod : (s₁ ∪ s₂).prod t = s₁.prod t ∪ s₂.prod t :=
by { ext ⟨x, y⟩, simp [or_and_distrib_right] }
@[simp] theorem prod_union : s.prod (t₁ ∪ t₂) = s.prod t₁ ∪ s.prod t₂ :=
by { ext ⟨x, y⟩, simp [and_or_distrib_left] }
theorem prod_inter_prod : s₁.prod t₁ ∩ s₂.prod t₂ = (s₁ ∩ s₂).prod (t₁ ∩ t₂) :=
by { ext ⟨x, y⟩, simp [and_assoc, and.left_comm] }
theorem insert_prod {a : α} : (insert a s).prod t = (prod.mk a '' t) ∪ s.prod t :=
by { ext ⟨x, y⟩, simp [image, iff_def, or_imp_distrib, imp.swap] {contextual := tt} }
theorem prod_insert {b : β} : s.prod (insert b t) = ((λa, (a, b)) '' s) ∪ s.prod t :=
by { ext ⟨x, y⟩, simp [image, iff_def, or_imp_distrib, imp.swap] {contextual := tt} }
theorem prod_preimage_eq {f : γ → α} {g : δ → β} :
(preimage f s).prod (preimage g t) = preimage (λp, (f p.1, g p.2)) (s.prod t) := rfl
@[simp] lemma mk_preimage_prod_left {y : β} (h : y ∈ t) : (λ x, (x, y)) ⁻¹' s.prod t = s :=
by { ext x, simp [h] }
@[simp] lemma mk_preimage_prod_right {x : α} (h : x ∈ s) : prod.mk x ⁻¹' s.prod t = t :=
by { ext y, simp [h] }
@[simp] lemma mk_preimage_prod_left_eq_empty {y : β} (hy : y ∉ t) :
(λ x, (x, y)) ⁻¹' s.prod t = ∅ :=
by { ext z, simp [hy] }
@[simp] lemma mk_preimage_prod_right_eq_empty {x : α} (hx : x ∉ s) :
prod.mk x ⁻¹' s.prod t = ∅ :=
by { ext z, simp [hx] }
lemma mk_preimage_prod_left_eq_if {y : β} [decidable_pred (∈ t)] :
(λ x, (x, y)) ⁻¹' s.prod t = if y ∈ t then s else ∅ :=
by { split_ifs; simp [h] }
lemma mk_preimage_prod_right_eq_if {x : α} [decidable_pred (∈ s)] :
prod.mk x ⁻¹' s.prod t = if x ∈ s then t else ∅ :=
by { split_ifs; simp [h] }
theorem image_swap_eq_preimage_swap : image (@prod.swap α β) = preimage prod.swap :=
image_eq_preimage_of_inverse prod.swap_left_inverse prod.swap_right_inverse
theorem preimage_swap_prod {s : set α} {t : set β} : prod.swap ⁻¹' t.prod s = s.prod t :=
by { ext ⟨x, y⟩, simp [and_comm] }
theorem image_swap_prod : prod.swap '' t.prod s = s.prod t :=
by rw [image_swap_eq_preimage_swap, preimage_swap_prod]
theorem prod_image_image_eq {m₁ : α → γ} {m₂ : β → δ} :
(image m₁ s).prod (image m₂ t) = image (λp:α×β, (m₁ p.1, m₂ p.2)) (s.prod t) :=
ext $ by simp [-exists_and_distrib_right, exists_and_distrib_right.symm, and.left_comm,
and.assoc, and.comm]
theorem prod_range_range_eq {α β γ δ} {m₁ : α → γ} {m₂ : β → δ} :
(range m₁).prod (range m₂) = range (λp:α×β, (m₁ p.1, m₂ p.2)) :=
ext $ by simp [range]
theorem prod_range_univ_eq {α β γ} {m₁ : α → γ} :
(range m₁).prod (univ : set β) = range (λp:α×β, (m₁ p.1, p.2)) :=
ext $ by simp [range]
theorem prod_univ_range_eq {α β δ} {m₂ : β → δ} :
(univ : set α).prod (range m₂) = range (λp:α×β, (p.1, m₂ p.2)) :=
ext $ by simp [range]
theorem nonempty.prod : s.nonempty → t.nonempty → (s.prod t).nonempty
| ⟨x, hx⟩ ⟨y, hy⟩ := ⟨(x, y), ⟨hx, hy⟩⟩
theorem nonempty.fst : (s.prod t).nonempty → s.nonempty
| ⟨p, hp⟩ := ⟨p.1, hp.1⟩
theorem nonempty.snd : (s.prod t).nonempty → t.nonempty
| ⟨p, hp⟩ := ⟨p.2, hp.2⟩
theorem prod_nonempty_iff : (s.prod t).nonempty ↔ s.nonempty ∧ t.nonempty :=
⟨λ h, ⟨h.fst, h.snd⟩, λ h, nonempty.prod h.1 h.2⟩
theorem prod_eq_empty_iff :
s.prod t = ∅ ↔ (s = ∅ ∨ t = ∅) :=
by simp only [not_nonempty_iff_eq_empty.symm, prod_nonempty_iff, not_and_distrib]
lemma prod_sub_preimage_iff {W : set γ} {f : α × β → γ} :
s.prod t ⊆ f ⁻¹' W ↔ ∀ a b, a ∈ s → b ∈ t → f (a, b) ∈ W :=
by simp [subset_def]
lemma fst_image_prod_subset (s : set α) (t : set β) :
prod.fst '' (s.prod t) ⊆ s :=
λ _ h, let ⟨_, ⟨h₂, _⟩, h₁⟩ := (set.mem_image _ _ _).1 h in h₁ ▸ h₂
lemma prod_subset_preimage_fst (s : set α) (t : set β) :
s.prod t ⊆ prod.fst ⁻¹' s :=
image_subset_iff.1 (fst_image_prod_subset s t)
lemma fst_image_prod (s : set β) {t : set α} (ht : t.nonempty) :
prod.fst '' (s.prod t) = s :=
set.subset.antisymm (fst_image_prod_subset _ _)
$ λ y y_in, let ⟨x, x_in⟩ := ht in
⟨(y, x), ⟨y_in, x_in⟩, rfl⟩
lemma snd_image_prod_subset (s : set α) (t : set β) :
prod.snd '' (s.prod t) ⊆ t :=
λ _ h, let ⟨_, ⟨_, h₂⟩, h₁⟩ := (set.mem_image _ _ _).1 h in h₁ ▸ h₂
lemma prod_subset_preimage_snd (s : set α) (t : set β) :
s.prod t ⊆ prod.snd ⁻¹' t :=
image_subset_iff.1 (snd_image_prod_subset s t)
lemma snd_image_prod {s : set α} (hs : s.nonempty) (t : set β) :
prod.snd '' (s.prod t) = t :=
set.subset.antisymm (snd_image_prod_subset _ _)
$ λ y y_in, let ⟨x, x_in⟩ := hs in
⟨(x, y), ⟨x_in, y_in⟩, rfl⟩
/-- A product set is included in a product set if and only factors are included, or a factor of the
first set is empty. -/
lemma prod_subset_prod_iff :
(s.prod t ⊆ s₁.prod t₁) ↔ (s ⊆ s₁ ∧ t ⊆ t₁) ∨ (s = ∅) ∨ (t = ∅) :=
begin
classical,
cases (s.prod t).eq_empty_or_nonempty with h h,
{ simp [h, prod_eq_empty_iff.1 h] },
{ have st : s.nonempty ∧ t.nonempty, by rwa [prod_nonempty_iff] at h,
split,
{ assume H : s.prod t ⊆ s₁.prod t₁,
have h' : s₁.nonempty ∧ t₁.nonempty := prod_nonempty_iff.1 (h.mono H),
refine or.inl ⟨_, _⟩,
show s ⊆ s₁,
{ have := image_subset (prod.fst : α × β → α) H,
rwa [fst_image_prod _ st.2, fst_image_prod _ h'.2] at this },
show t ⊆ t₁,
{ have := image_subset (prod.snd : α × β → β) H,
rwa [snd_image_prod st.1, snd_image_prod h'.1] at this } },
{ assume H,
simp only [st.1.ne_empty, st.2.ne_empty, or_false] at H,
exact prod_mono H.1 H.2 } }
end
end prod
/-! ### Lemmas about set-indexed products of sets -/
section pi
variables {ι : Type*} {α : ι → Type*} {s : set ι} {t t₁ t₂ : Π i, set (α i)}
/-- Given an index set `i` and a family of sets `s : Π i, set (α i)`, `pi i s`
is the set of dependent functions `f : Πa, π a` such that `f a` belongs to `π a`
whenever `a ∈ i`. -/
def pi (s : set ι) (t : Π i, set (α i)) : set (Π i, α i) := { f | ∀i ∈ s, f i ∈ t i }
@[simp] lemma mem_pi {f : Π i, α i} : f ∈ s.pi t ↔ ∀ i ∈ s, f i ∈ t i :=
by refl
@[simp] lemma mem_univ_pi {f : Π i, α i} : f ∈ pi univ t ↔ ∀ i, f i ∈ t i :=
by simp
@[simp] lemma empty_pi (s : Π i, set (α i)) : pi ∅ s = univ := by { ext, simp [pi] }
lemma pi_eq_empty {i : ι} (hs : i ∈ s) (ht : t i = ∅) : s.pi t = ∅ :=
by { ext f, simp only [mem_empty_eq, not_forall, iff_false, mem_pi, not_imp],
exact ⟨i, hs, by simp [ht]⟩ }
lemma univ_pi_eq_empty {i : ι} (ht : t i = ∅) : pi univ t = ∅ :=
pi_eq_empty (mem_univ i) ht
lemma pi_nonempty_iff : (s.pi t).nonempty ↔ ∀ i, ∃ x, i ∈ s → x ∈ t i :=
by simp [classical.skolem, set.nonempty]
lemma univ_pi_nonempty_iff : (pi univ t).nonempty ↔ ∀ i, (t i).nonempty :=
by simp [classical.skolem, set.nonempty]
lemma pi_eq_empty_iff : s.pi t = ∅ ↔ ∃ i, (α i → false) ∨ (i ∈ s ∧ t i = ∅) :=
begin
rw [← not_nonempty_iff_eq_empty, pi_nonempty_iff], push_neg, apply exists_congr, intro i,
split,
{ intro h, by_cases hα : nonempty (α i),
{ cases hα with x, refine or.inr ⟨(h x).1, by simp [eq_empty_iff_forall_not_mem, h]⟩ },
{ exact or.inl (λ x, hα ⟨x⟩) }},
{ rintro (h|h) x, exfalso, exact h x, simp [h] }
end
lemma univ_pi_eq_empty_iff : pi univ t = ∅ ↔ ∃ i, t i = ∅ :=
by simp [← not_nonempty_iff_eq_empty, univ_pi_nonempty_iff]
@[simp] lemma insert_pi (i : ι) (s : set ι) (t : Π i, set (α i)) :
pi (insert i s) t = (eval i ⁻¹' t i) ∩ pi s t :=
by { ext, simp [pi, or_imp_distrib, forall_and_distrib] }
@[simp] lemma singleton_pi (i : ι) (t : Π i, set (α i)) :
pi {i} t = (eval i ⁻¹' t i) :=
by { ext, simp [pi] }
lemma pi_if {p : ι → Prop} [h : decidable_pred p] (s : set ι) (t₁ t₂ : Π i, set (α i)) :
pi s (λ i, if p i then t₁ i else t₂ i) = pi {i ∈ s | p i} t₁ ∩ pi {i ∈ s | ¬ p i} t₂ :=
begin
ext f,
split,
{ assume h, split; { rintros i ⟨his, hpi⟩, simpa [*] using h i } },
{ rintros ⟨ht₁, ht₂⟩ i his,
by_cases p i; simp * at * }
end
open_locale classical
lemma eval_image_pi {i : ι} (hs : i ∈ s) (ht : (s.pi t).nonempty) : eval i '' s.pi t = t i :=
begin
ext x, rcases ht with ⟨f, hf⟩, split,
{ rintro ⟨g, hg, rfl⟩, exact hg i hs },
{ intro hg, refine ⟨update f i x, _, by simp⟩,
intros j hj, by_cases hji : j = i,
{ subst hji, simp [hg] },
{ rw [mem_pi] at hf, simp [hji, hf, hj] }},
end
@[simp] lemma eval_image_univ_pi {i : ι} (ht : (pi univ t).nonempty) :
(λ f : Π i, α i, f i) '' pi univ t = t i :=
eval_image_pi (mem_univ i) ht
lemma update_preimage_pi {i : ι} {f : Π i, α i} (hi : i ∈ s)
(hf : ∀ j ∈ s, j ≠ i → f j ∈ t j) : (update f i) ⁻¹' s.pi t = t i :=
begin
ext x, split,
{ intro h, convert h i hi, simp },
{ intros hx j hj, by_cases h : j = i,
{ cases h, simpa },
{ rw [update_noteq h], exact hf j hj h }}
end
lemma update_preimage_univ_pi {i : ι} {f : Π i, α i} (hf : ∀ j ≠ i, f j ∈ t j) :
(update f i) ⁻¹' pi univ t = t i :=
update_preimage_pi (mem_univ i) (λ j _, hf j)
lemma subset_pi_eval_image (s : set ι) (u : set (Π i, α i)) : u ⊆ pi s (λ i, eval i '' u) :=
λ f hf i hi, ⟨f, hf, rfl⟩
end pi
/-! ### Lemmas about `inclusion`, the injection of subtypes induced by `⊆` -/
section inclusion
variable {α : Type*}
/-- `inclusion` is the "identity" function between two subsets `s` and `t`, where `s ⊆ t` -/
def inclusion {s t : set α} (h : s ⊆ t) : s → t :=
λ x : s, (⟨x, h x.2⟩ : t)
@[simp] lemma inclusion_self {s : set α} (x : s) :
inclusion (set.subset.refl _) x = x :=
by { cases x, refl }
@[simp] lemma inclusion_right {s t : set α} (h : s ⊆ t) (x : t) (m : (x : α) ∈ s) :
inclusion h ⟨x, m⟩ = x :=
by { cases x, refl }
@[simp] lemma inclusion_inclusion {s t u : set α} (hst : s ⊆ t) (htu : t ⊆ u)
(x : s) : inclusion htu (inclusion hst x) = inclusion (set.subset.trans hst htu) x :=
by { cases x, refl }
@[simp] lemma coe_inclusion {s t : set α} (h : s ⊆ t) (x : s) :
(inclusion h x : α) = (x : α) := rfl
lemma inclusion_injective {s t : set α} (h : s ⊆ t) :
function.injective (inclusion h)
| ⟨_, _⟩ ⟨_, _⟩ := subtype.ext_iff_val.2 ∘ subtype.ext_iff_val.1
lemma range_inclusion {s t : set α} (h : s ⊆ t) : range (inclusion h) = {x : t | (x:α) ∈ s} :=
by { ext ⟨x, hx⟩, simp [inclusion] }
end inclusion
/-! ### Injectivity and surjectivity lemmas for image and preimage -/
section image_preimage
variables {α : Type u} {β : Type v} {f : α → β}
@[simp]
lemma preimage_injective : injective (preimage f) ↔ surjective f :=
begin
refine ⟨λ h y, _, surjective.preimage_injective⟩,
obtain ⟨x, hx⟩ : (f ⁻¹' {y}).nonempty,
{ rw [h.nonempty_apply_iff preimage_empty], apply singleton_nonempty },
exact ⟨x, hx⟩
end
@[simp]
lemma preimage_surjective : surjective (preimage f) ↔ injective f :=
begin
refine ⟨λ h x x' hx, _, injective.preimage_surjective⟩,
cases h {x} with s hs, have := mem_singleton x,
rwa [← hs, mem_preimage, hx, ← mem_preimage, hs, mem_singleton_iff, eq_comm] at this
end
@[simp] lemma image_surjective : surjective (image f) ↔ surjective f :=
begin
refine ⟨λ h y, _, surjective.image_surjective⟩,
cases h {y} with s hs,
have := mem_singleton y, rw [← hs] at this, rcases this with ⟨x, h1x, h2x⟩,
exact ⟨x, h2x⟩
end
@[simp] lemma image_injective : injective (image f) ↔ injective f :=
begin
refine ⟨λ h x x' hx, _, injective.image_injective⟩,
rw [← singleton_eq_singleton_iff], apply h,
rw [image_singleton, image_singleton, hx]
end
end image_preimage
/-! ### Lemmas about images of binary and ternary functions -/
section n_ary_image
variables {α β γ δ ε : Type*} {f f' : α → β → γ} {g g' : α → β → γ → δ}
variables {s s' : set α} {t t' : set β} {u u' : set γ} {a a' : α} {b b' : β} {c c' : γ} {d d' : δ}
/-- The image of a binary function `f : α → β → γ` as a function `set α → set β → set γ`.
Mathematically this should be thought of as the image of the corresponding function `α × β → γ`.
-/
def image2 (f : α → β → γ) (s : set α) (t : set β) : set γ :=
{c | ∃ a b, a ∈ s ∧ b ∈ t ∧ f a b = c }
lemma mem_image2_eq : c ∈ image2 f s t = ∃ a b, a ∈ s ∧ b ∈ t ∧ f a b = c := rfl
@[simp] lemma mem_image2 : c ∈ image2 f s t ↔ ∃ a b, a ∈ s ∧ b ∈ t ∧ f a b = c := iff.rfl
lemma mem_image2_of_mem (h1 : a ∈ s) (h2 : b ∈ t) : f a b ∈ image2 f s t :=
⟨a, b, h1, h2, rfl⟩
lemma mem_image2_iff (hf : injective2 f) : f a b ∈ image2 f s t ↔ a ∈ s ∧ b ∈ t :=
⟨ by { rintro ⟨a', b', ha', hb', h⟩, rcases hf h with ⟨rfl, rfl⟩, exact ⟨ha', hb'⟩ },
λ ⟨ha, hb⟩, mem_image2_of_mem ha hb⟩
/-- image2 is monotone with respect to `⊆`. -/
lemma image2_subset (hs : s ⊆ s') (ht : t ⊆ t') : image2 f s t ⊆ image2 f s' t' :=
by { rintro _ ⟨a, b, ha, hb, rfl⟩, exact mem_image2_of_mem (hs ha) (ht hb) }
lemma image2_union_left : image2 f (s ∪ s') t = image2 f s t ∪ image2 f s' t :=
begin
ext c, split,
{ rintros ⟨a, b, h1a|h2a, hb, rfl⟩;[left, right]; exact ⟨_, _, ‹_›, ‹_›, rfl⟩ },
{ rintro (⟨_, _, _, _, rfl⟩|⟨_, _, _, _, rfl⟩); refine ⟨_, _, _, ‹_›, rfl⟩; simp [mem_union, *] }
end
lemma image2_union_right : image2 f s (t ∪ t') = image2 f s t ∪ image2 f s t' :=
begin
ext c, split,
{ rintros ⟨a, b, ha, h1b|h2b, rfl⟩;[left, right]; exact ⟨_, _, ‹_›, ‹_›, rfl⟩ },
{ rintro (⟨_, _, _, _, rfl⟩|⟨_, _, _, _, rfl⟩); refine ⟨_, _, ‹_›, _, rfl⟩; simp [mem_union, *] }
end
@[simp] lemma image2_empty_left : image2 f ∅ t = ∅ := ext $ by simp
@[simp] lemma image2_empty_right : image2 f s ∅ = ∅ := ext $ by simp
lemma image2_inter_subset_left : image2 f (s ∩ s') t ⊆ image2 f s t ∩ image2 f s' t :=
by { rintro _ ⟨a, b, ⟨h1a, h2a⟩, hb, rfl⟩, split; exact ⟨_, _, ‹_›, ‹_›, rfl⟩ }
lemma image2_inter_subset_right : image2 f s (t ∩ t') ⊆ image2 f s t ∩ image2 f s t' :=
by { rintro _ ⟨a, b, ha, ⟨h1b, h2b⟩, rfl⟩, split; exact ⟨_, _, ‹_›, ‹_›, rfl⟩ }
@[simp] lemma image2_singleton : image2 f {a} {b} = {f a b} :=
ext $ λ x, by simp [eq_comm]
lemma image2_singleton_left : image2 f {a} t = f a '' t :=
ext $ λ x, by simp
lemma image2_singleton_right : image2 f s {b} = (λ a, f a b) '' s :=
ext $ λ x, by simp
@[congr] lemma image2_congr (h : ∀ (a ∈ s) (b ∈ t), f a b = f' a b) :
image2 f s t = image2 f' s t :=
by { ext, split; rintro ⟨a, b, ha, hb, rfl⟩; refine ⟨a, b, ha, hb, by rw h a ha b hb⟩ }
/-- A common special case of `image2_congr` -/
lemma image2_congr' (h : ∀ a b, f a b = f' a b) : image2 f s t = image2 f' s t :=
image2_congr (λ a _ b _, h a b)
/-- The image of a ternary function `f : α → β → γ → δ` as a function
`set α → set β → set γ → set δ`. Mathematically this should be thought of as the image of the
corresponding function `α × β × γ → δ`.
-/
def image3 (g : α → β → γ → δ) (s : set α) (t : set β) (u : set γ) : set δ :=
{d | ∃ a b c, a ∈ s ∧ b ∈ t ∧ c ∈ u ∧ g a b c = d }
@[simp] lemma mem_image3 : d ∈ image3 g s t u ↔ ∃ a b c, a ∈ s ∧ b ∈ t ∧ c ∈ u ∧ g a b c = d :=
iff.rfl
@[congr] lemma image3_congr (h : ∀ (a ∈ s) (b ∈ t) (c ∈ u), g a b c = g' a b c) :
image3 g s t u = image3 g' s t u :=
by { ext x,
split; rintro ⟨a, b, c, ha, hb, hc, rfl⟩; refine ⟨a, b, c, ha, hb, hc, by rw h a ha b hb c hc⟩ }
/-- A common special case of `image3_congr` -/
lemma image3_congr' (h : ∀ a b c, g a b c = g' a b c) : image3 g s t u = image3 g' s t u :=
image3_congr (λ a _ b _ c _, h a b c)
lemma image2_image2_left (f : δ → γ → ε) (g : α → β → δ) :
image2 f (image2 g s t) u = image3 (λ a b c, f (g a b) c) s t u :=
begin
ext, split,
{ rintro ⟨_, c, ⟨a, b, ha, hb, rfl⟩, hc, rfl⟩, refine ⟨a, b, c, ha, hb, hc, rfl⟩ },
{ rintro ⟨a, b, c, ha, hb, hc, rfl⟩, refine ⟨_, c, ⟨a, b, ha, hb, rfl⟩, hc, rfl⟩ }
end
lemma image2_image2_right (f : α → δ → ε) (g : β → γ → δ) :
image2 f s (image2 g t u) = image3 (λ a b c, f a (g b c)) s t u :=
begin
ext, split,
{ rintro ⟨a, _, ha, ⟨b, c, hb, hc, rfl⟩, rfl⟩, refine ⟨a, b, c, ha, hb, hc, rfl⟩ },
{ rintro ⟨a, b, c, ha, hb, hc, rfl⟩, refine ⟨a, _, ha, ⟨b, c, hb, hc, rfl⟩, rfl⟩ }
end
lemma image_image2 (f : α → β → γ) (g : γ → δ) :
g '' image2 f s t = image2 (λ a b, g (f a b)) s t :=
begin
ext, split,
{ rintro ⟨_, ⟨a, b, ha, hb, rfl⟩, rfl⟩, refine ⟨a, b, ha, hb, rfl⟩ },
{ rintro ⟨a, b, ha, hb, rfl⟩, refine ⟨_, ⟨a, b, ha, hb, rfl⟩, rfl⟩ }
end
lemma image2_image_left (f : γ → β → δ) (g : α → γ) :
image2 f (g '' s) t = image2 (λ a b, f (g a) b) s t :=
begin
ext, split,
{ rintro ⟨_, b, ⟨a, ha, rfl⟩, hb, rfl⟩, refine ⟨a, b, ha, hb, rfl⟩ },
{ rintro ⟨a, b, ha, hb, rfl⟩, refine ⟨_, b, ⟨a, ha, rfl⟩, hb, rfl⟩ }
end
lemma image2_image_right (f : α → γ → δ) (g : β → γ) :
image2 f s (g '' t) = image2 (λ a b, f a (g b)) s t :=
begin
ext, split,
{ rintro ⟨a, _, ha, ⟨b, hb, rfl⟩, rfl⟩, refine ⟨a, b, ha, hb, rfl⟩ },
{ rintro ⟨a, b, ha, hb, rfl⟩, refine ⟨a, _, ha, ⟨b, hb, rfl⟩, rfl⟩ }
end
lemma image2_swap (f : α → β → γ) (s : set α) (t : set β) :
image2 f s t = image2 (λ a b, f b a) t s :=
by { ext, split; rintro ⟨a, b, ha, hb, rfl⟩; refine ⟨b, a, hb, ha, rfl⟩ }
@[simp] lemma image2_left (h : t.nonempty) : image2 (λ x y, x) s t = s :=
by simp [nonempty_def.mp h, ext_iff]
@[simp] lemma image2_right (h : s.nonempty) : image2 (λ x y, y) s t = t :=
by simp [nonempty_def.mp h, ext_iff]
@[simp] lemma image_prod (f : α → β → γ) : (λ x : α × β, f x.1 x.2) '' s.prod t = image2 f s t :=
set.ext $ λ a,
⟨ by { rintros ⟨_, _, rfl⟩, exact ⟨_, _, (mem_prod.mp ‹_›).1, (mem_prod.mp ‹_›).2, rfl⟩ },
by { rintros ⟨_, _, _, _, rfl⟩, exact ⟨(_, _), mem_prod.mpr ⟨‹_›, ‹_›⟩, rfl⟩ }⟩
lemma nonempty.image2 (hs : s.nonempty) (ht : t.nonempty) : (image2 f s t).nonempty :=
by { cases hs with a ha, cases ht with b hb, exact ⟨f a b, ⟨a, b, ha, hb, rfl⟩⟩ }
end n_ary_image
end set
namespace subsingleton
variables {α : Type*} [subsingleton α]
lemma eq_univ_of_nonempty {s : set α} : s.nonempty → s = univ :=
λ ⟨x, hx⟩, eq_univ_of_forall $ λ y, subsingleton.elim x y ▸ hx
@[elab_as_eliminator]
lemma set_cases {p : set α → Prop} (h0 : p ∅) (h1 : p univ) (s) : p s :=
s.eq_empty_or_nonempty.elim (λ h, h.symm ▸ h0) $ λ h, (eq_univ_of_nonempty h).symm ▸ h1
end subsingleton
|
c3f5bbae6c0486eacaf899f62fad4402c68f40c9
|
55c7fc2bf55d496ace18cd6f3376e12bb14c8cc5
|
/src/topology/metric_space/emetric_space.lean
|
5c276c970edc72ba7f649489075ecac9adcd0d78
|
[
"Apache-2.0"
] |
permissive
|
dupuisf/mathlib
|
62de4ec6544bf3b79086afd27b6529acfaf2c1bb
|
8582b06b0a5d06c33ee07d0bdf7c646cae22cf36
|
refs/heads/master
| 1,669,494,854,016
| 1,595,692,409,000
| 1,595,692,409,000
| 272,046,630
| 0
| 0
|
Apache-2.0
| 1,592,066,143,000
| 1,592,066,142,000
| null |
UTF-8
|
Lean
| false
| false
| 40,094
|
lean
|
/-
Copyright (c) 2015, 2017 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Robert Y. Lewis, Johannes Hölzl, Mario Carneiro, Sébastien Gouëzel
-/
import data.real.ennreal
import data.finset.intervals
import topology.uniform_space.uniform_embedding
import topology.uniform_space.pi
import topology.uniform_space.uniform_convergence
/-!
# Extended metric spaces
This file is devoted to the definition and study of `emetric_spaces`, i.e., metric
spaces in which the distance is allowed to take the value ∞. This extended distance is
called `edist`, and takes values in `ennreal`.
Many definitions and theorems expected on emetric spaces are already introduced on uniform spaces and
topological spaces. For example:
open and closed sets, compactness, completeness, continuity and uniform continuity
The class `emetric_space` therefore extends `uniform_space` (and `topological_space`).
-/
open set filter classical
noncomputable theory
open_locale uniformity topological_space big_operators filter
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
/-- Characterizing uniformities associated to a (generalized) distance function `D`
in terms of the elements of the uniformity. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem uniformity_dist_of_mem_uniformity [linear_order β] {U : filter (α × α)} (z : β) (D : α → α → β)
(H : ∀ s, s ∈ U ↔ ∃ε>z, ∀{a b:α}, D a b < ε → (a, b) ∈ s) :
U = ⨅ ε>z, 𝓟 {p:α×α | D p.1 p.2 < ε} :=
le_antisymm
(le_infi $ λ ε, le_infi $ λ ε0, le_principal_iff.2 $ (H _).2 ⟨ε, ε0, λ a b, id⟩)
(λ r ur, let ⟨ε, ε0, h⟩ := (H _).1 ur in
mem_infi_sets ε $ mem_infi_sets ε0 $ mem_principal_sets.2 $ λ ⟨a, b⟩, h)
class has_edist (α : Type*) := (edist : α → α → ennreal)
export has_edist (edist)
/-- Creating a uniform space from an extended distance. -/
def uniform_space_of_edist
(edist : α → α → ennreal)
(edist_self : ∀ x : α, edist x x = 0)
(edist_comm : ∀ x y : α, edist x y = edist y x)
(edist_triangle : ∀ x y z : α, edist x z ≤ edist x y + edist y z) : uniform_space α :=
uniform_space.of_core {
uniformity := (⨅ ε>0, 𝓟 {p:α×α | edist p.1 p.2 < ε}),
refl := le_infi $ assume ε, le_infi $
by simp [set.subset_def, id_rel, edist_self, (>)] {contextual := tt},
comp :=
le_infi $ assume ε, le_infi $ assume h,
have (2 : ennreal) = (2 : ℕ) := by simp,
have A : 0 < ε / 2 := ennreal.div_pos_iff.2
⟨ne_of_gt h, by { convert ennreal.nat_ne_top 2 }⟩,
lift'_le
(mem_infi_sets (ε / 2) $ mem_infi_sets A (subset.refl _)) $
have ∀ (a b c : α), edist a c < ε / 2 → edist c b < ε / 2 → edist a b < ε,
from assume a b c hac hcb,
calc edist a b ≤ edist a c + edist c b : edist_triangle _ _ _
... < ε / 2 + ε / 2 : ennreal.add_lt_add hac hcb
... = ε : by rw [ennreal.add_halves],
by simpa [comp_rel],
symm := tendsto_infi.2 $ assume ε, tendsto_infi.2 $ assume h,
tendsto_infi' ε $ tendsto_infi' h $ tendsto_principal_principal.2 $ by simp [edist_comm] }
section prio
set_option default_priority 100 -- see Note [default priority]
-- the uniform structure is embedded in the emetric space structure
-- to avoid instance diamond issues. See Note [forgetful inheritance].
/-- Extended metric spaces, with an extended distance `edist` possibly taking the
value ∞
Each emetric space induces a canonical `uniform_space` and hence a canonical `topological_space`.
This is enforced in the type class definition, by extending the `uniform_space` structure. When
instantiating an `emetric_space` structure, the uniformity fields are not necessary, they will be
filled in by default. There is a default value for the uniformity, that can be substituted
in cases of interest, for instance when instantiating an `emetric_space` structure
on a product.
Continuity of `edist` is proved in `topology.instances.ennreal`
-/
@[nolint ge_or_gt] -- see Note [nolint_ge]
class emetric_space (α : Type u) extends has_edist α : Type u :=
(edist_self : ∀ x : α, edist x x = 0)
(eq_of_edist_eq_zero : ∀ {x y : α}, edist x y = 0 → x = y)
(edist_comm : ∀ x y : α, edist x y = edist y x)
(edist_triangle : ∀ x y z : α, edist x z ≤ edist x y + edist y z)
(to_uniform_space : uniform_space α := uniform_space_of_edist edist edist_self edist_comm edist_triangle)
(uniformity_edist : 𝓤 α = ⨅ ε>0, 𝓟 {p:α×α | edist p.1 p.2 < ε} . control_laws_tac)
end prio
/- emetric spaces are less common than metric spaces. Therefore, we work in a dedicated
namespace, while notions associated to metric spaces are mostly in the root namespace. -/
variables [emetric_space α]
@[priority 100] -- see Note [lower instance priority]
instance emetric_space.to_uniform_space' : uniform_space α :=
emetric_space.to_uniform_space
export emetric_space (edist_self eq_of_edist_eq_zero edist_comm edist_triangle)
attribute [simp] edist_self
/-- Characterize the equality of points by the vanishing of their extended distance -/
@[simp] theorem edist_eq_zero {x y : α} : edist x y = 0 ↔ x = y :=
iff.intro eq_of_edist_eq_zero (assume : x = y, this ▸ edist_self _)
@[simp] theorem zero_eq_edist {x y : α} : 0 = edist x y ↔ x = y :=
iff.intro (assume h, eq_of_edist_eq_zero (h.symm))
(assume : x = y, this ▸ (edist_self _).symm)
theorem edist_le_zero {x y : α} : (edist x y ≤ 0) ↔ x = y :=
le_zero_iff_eq.trans edist_eq_zero
/-- Triangle inequality for the extended distance -/
theorem edist_triangle_left (x y z : α) : edist x y ≤ edist z x + edist z y :=
by rw edist_comm z; apply edist_triangle
theorem edist_triangle_right (x y z : α) : edist x y ≤ edist x z + edist y z :=
by rw edist_comm y; apply edist_triangle
lemma edist_triangle4 (x y z t : α) :
edist x t ≤ edist x y + edist y z + edist z t :=
calc
edist x t ≤ edist x z + edist z t : edist_triangle x z t
... ≤ (edist x y + edist y z) + edist z t : add_le_add_right (edist_triangle x y z) _
/-- The triangle (polygon) inequality for sequences of points; `finset.Ico` version. -/
lemma edist_le_Ico_sum_edist (f : ℕ → α) {m n} (h : m ≤ n) :
edist (f m) (f n) ≤ ∑ i in finset.Ico m n, edist (f i) (f (i + 1)) :=
begin
revert n,
refine nat.le_induction _ _,
{ simp only [finset.sum_empty, finset.Ico.self_eq_empty, edist_self],
-- TODO: Why doesn't Lean close this goal automatically? `apply le_refl` fails too.
exact le_refl (0:ennreal) },
{ assume n hn hrec,
calc edist (f m) (f (n+1)) ≤ edist (f m) (f n) + edist (f n) (f (n+1)) : edist_triangle _ _ _
... ≤ ∑ i in finset.Ico m n, _ + _ : add_le_add hrec (le_refl _)
... = ∑ i in finset.Ico m (n+1), _ :
by rw [finset.Ico.succ_top hn, finset.sum_insert, add_comm]; simp }
end
/-- The triangle (polygon) inequality for sequences of points; `finset.range` version. -/
lemma edist_le_range_sum_edist (f : ℕ → α) (n : ℕ) :
edist (f 0) (f n) ≤ ∑ i in finset.range n, edist (f i) (f (i + 1)) :=
finset.Ico.zero_bot n ▸ edist_le_Ico_sum_edist f (nat.zero_le n)
/-- A version of `edist_le_Ico_sum_edist` with each intermediate distance replaced
with an upper estimate. -/
lemma edist_le_Ico_sum_of_edist_le {f : ℕ → α} {m n} (hmn : m ≤ n)
{d : ℕ → ennreal} (hd : ∀ {k}, m ≤ k → k < n → edist (f k) (f (k + 1)) ≤ d k) :
edist (f m) (f n) ≤ ∑ i in finset.Ico m n, d i :=
le_trans (edist_le_Ico_sum_edist f hmn) $
finset.sum_le_sum $ λ k hk, hd (finset.Ico.mem.1 hk).1 (finset.Ico.mem.1 hk).2
/-- A version of `edist_le_range_sum_edist` with each intermediate distance replaced
with an upper estimate. -/
lemma edist_le_range_sum_of_edist_le {f : ℕ → α} (n : ℕ)
{d : ℕ → ennreal} (hd : ∀ {k}, k < n → edist (f k) (f (k + 1)) ≤ d k) :
edist (f 0) (f n) ≤ ∑ i in finset.range n, d i :=
finset.Ico.zero_bot n ▸ edist_le_Ico_sum_of_edist_le (zero_le n) (λ _ _, hd)
/-- Two points coincide if their distance is `< ε` for all positive ε -/
theorem eq_of_forall_edist_le {x y : α} (h : ∀ε > 0, edist x y ≤ ε) : x = y :=
eq_of_edist_eq_zero (eq_of_le_of_forall_le_of_dense bot_le h)
/-- Reformulation of the uniform structure in terms of the extended distance -/
theorem uniformity_edist :
𝓤 α = ⨅ ε>0, 𝓟 {p:α×α | edist p.1 p.2 < ε} :=
emetric_space.uniformity_edist
theorem uniformity_basis_edist :
(𝓤 α).has_basis (λ ε : ennreal, 0 < ε) (λ ε, {p:α×α | edist p.1 p.2 < ε}) :=
(@uniformity_edist α _).symm ▸ has_basis_binfi_principal
(λ r hr p hp, ⟨min r p, lt_min hr hp,
λ x hx, lt_of_lt_of_le hx (min_le_left _ _),
λ x hx, lt_of_lt_of_le hx (min_le_right _ _)⟩)
⟨1, ennreal.zero_lt_one⟩
/-- Characterization of the elements of the uniformity in terms of the extended distance -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem mem_uniformity_edist {s : set (α×α)} :
s ∈ 𝓤 α ↔ (∃ε>0, ∀{a b:α}, edist a b < ε → (a, b) ∈ s) :=
uniformity_basis_edist.mem_uniformity_iff
/-- Given `f : β → ennreal`, if `f` sends `{i | p i}` to a set of positive numbers
accumulating to zero, then `f i`-neighborhoods of the diagonal form a basis of `𝓤 α`.
For specific bases see `uniformity_basis_edist`, `uniformity_basis_edist'`,
`uniformity_basis_edist_nnreal`, and `uniformity_basis_edist_inv_nat`. -/
protected theorem emetric.mk_uniformity_basis {β : Type*} {p : β → Prop} {f : β → ennreal}
(hf₀ : ∀ x, p x → 0 < f x) (hf : ∀ ε, 0 < ε → ∃ x (hx : p x), f x ≤ ε) :
(𝓤 α).has_basis p (λ x, {p:α×α | edist p.1 p.2 < f x}) :=
begin
refine ⟨λ s, uniformity_basis_edist.mem_iff.trans _⟩,
split,
{ rintros ⟨ε, ε₀, hε⟩,
rcases hf ε ε₀ with ⟨i, hi, H⟩,
exact ⟨i, hi, λ x hx, hε $ lt_of_lt_of_le hx H⟩ },
{ exact λ ⟨i, hi, H⟩, ⟨f i, hf₀ i hi, H⟩ }
end
/-- Given `f : β → ennreal`, if `f` sends `{i | p i}` to a set of positive numbers
accumulating to zero, then closed `f i`-neighborhoods of the diagonal form a basis of `𝓤 α`.
For specific bases see `uniformity_basis_edist_le` and `uniformity_basis_edist_le'`. -/
protected theorem emetric.mk_uniformity_basis_le {β : Type*} {p : β → Prop} {f : β → ennreal}
(hf₀ : ∀ x, p x → 0 < f x) (hf : ∀ ε, 0 < ε → ∃ x (hx : p x), f x ≤ ε) :
(𝓤 α).has_basis p (λ x, {p:α×α | edist p.1 p.2 ≤ f x}) :=
begin
refine ⟨λ s, uniformity_basis_edist.mem_iff.trans _⟩,
split,
{ rintros ⟨ε, ε₀, hε⟩,
rcases dense ε₀ with ⟨ε', hε'⟩,
rcases hf ε' hε'.1 with ⟨i, hi, H⟩,
exact ⟨i, hi, λ x hx, hε $ lt_of_le_of_lt (le_trans hx H) hε'.2⟩ },
{ exact λ ⟨i, hi, H⟩, ⟨f i, hf₀ i hi, λ x hx, H (le_of_lt hx)⟩ }
end
theorem uniformity_basis_edist_le :
(𝓤 α).has_basis (λ ε : ennreal, 0 < ε) (λ ε, {p:α×α | edist p.1 p.2 ≤ ε}) :=
emetric.mk_uniformity_basis_le (λ _, id) (λ ε ε₀, ⟨ε, ε₀, le_refl ε⟩)
theorem uniformity_basis_edist' (ε' : ennreal) (hε' : 0 < ε') :
(𝓤 α).has_basis (λ ε : ennreal, ε ∈ Ioo 0 ε') (λ ε, {p:α×α | edist p.1 p.2 < ε}) :=
emetric.mk_uniformity_basis (λ _, and.left)
(λ ε ε₀, let ⟨δ, hδ⟩ := dense hε' in
⟨min ε δ, ⟨lt_min ε₀ hδ.1, lt_of_le_of_lt (min_le_right _ _) hδ.2⟩, min_le_left _ _⟩)
theorem uniformity_basis_edist_le' (ε' : ennreal) (hε' : 0 < ε') :
(𝓤 α).has_basis (λ ε : ennreal, ε ∈ Ioo 0 ε') (λ ε, {p:α×α | edist p.1 p.2 ≤ ε}) :=
emetric.mk_uniformity_basis_le (λ _, and.left)
(λ ε ε₀, let ⟨δ, hδ⟩ := dense hε' in
⟨min ε δ, ⟨lt_min ε₀ hδ.1, lt_of_le_of_lt (min_le_right _ _) hδ.2⟩, min_le_left _ _⟩)
theorem uniformity_basis_edist_nnreal :
(𝓤 α).has_basis (λ ε : nnreal, 0 < ε) (λ ε, {p:α×α | edist p.1 p.2 < ε}) :=
emetric.mk_uniformity_basis (λ _, ennreal.coe_pos.2)
(λ ε ε₀, let ⟨δ, hδ⟩ := ennreal.lt_iff_exists_nnreal_btwn.1 ε₀ in
⟨δ, ennreal.coe_pos.1 hδ.1, le_of_lt hδ.2⟩)
theorem uniformity_basis_edist_inv_nat :
(𝓤 α).has_basis (λ _, true) (λ n:ℕ, {p:α×α | edist p.1 p.2 < (↑n)⁻¹}) :=
emetric.mk_uniformity_basis
(λ n _, ennreal.inv_pos.2 $ ennreal.nat_ne_top n)
(λ ε ε₀, let ⟨n, hn⟩ := ennreal.exists_inv_nat_lt (ne_of_gt ε₀) in ⟨n, trivial, le_of_lt hn⟩)
/-- Fixed size neighborhoods of the diagonal belong to the uniform structure -/
theorem edist_mem_uniformity {ε:ennreal} (ε0 : 0 < ε) :
{p:α×α | edist p.1 p.2 < ε} ∈ 𝓤 α :=
mem_uniformity_edist.2 ⟨ε, ε0, λ a b, id⟩
namespace emetric
theorem uniformity_has_countable_basis : is_countably_generated (𝓤 α) :=
is_countably_generated_of_seq ⟨_, uniformity_basis_edist_inv_nat.eq_infi⟩
/-- ε-δ characterization of uniform continuity on emetric spaces -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem uniform_continuous_iff [emetric_space β] {f : α → β} :
uniform_continuous f ↔ ∀ ε > 0, ∃ δ > 0,
∀{a b:α}, edist a b < δ → edist (f a) (f b) < ε :=
uniformity_basis_edist.uniform_continuous_iff uniformity_basis_edist
/-- ε-δ characterization of uniform embeddings on emetric spaces -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem uniform_embedding_iff [emetric_space β] {f : α → β} :
uniform_embedding f ↔ function.injective f ∧ uniform_continuous f ∧
∀ δ > 0, ∃ ε > 0, ∀ {a b : α}, edist (f a) (f b) < ε → edist a b < δ :=
uniform_embedding_def'.trans $ and_congr iff.rfl $ and_congr iff.rfl
⟨λ H δ δ0, let ⟨t, tu, ht⟩ := H _ (edist_mem_uniformity δ0),
⟨ε, ε0, hε⟩ := mem_uniformity_edist.1 tu in
⟨ε, ε0, λ a b h, ht _ _ (hε h)⟩,
λ H s su, let ⟨δ, δ0, hδ⟩ := mem_uniformity_edist.1 su, ⟨ε, ε0, hε⟩ := H _ δ0 in
⟨_, edist_mem_uniformity ε0, λ a b h, hδ (hε h)⟩⟩
/-- A map between emetric spaces is a uniform embedding if and only if the edistance between `f x`
and `f y` is controlled in terms of the distance between `x` and `y` and conversely. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem uniform_embedding_iff' [emetric_space β] {f : α → β} :
uniform_embedding f ↔
(∀ ε > 0, ∃ δ > 0, ∀ {a b : α}, edist a b < δ → edist (f a) (f b) < ε) ∧
(∀ δ > 0, ∃ ε > 0, ∀ {a b : α}, edist (f a) (f b) < ε → edist a b < δ) :=
begin
split,
{ assume h,
exact ⟨uniform_continuous_iff.1 (uniform_embedding_iff.1 h).2.1,
(uniform_embedding_iff.1 h).2.2⟩ },
{ rintros ⟨h₁, h₂⟩,
refine uniform_embedding_iff.2 ⟨_, uniform_continuous_iff.2 h₁, h₂⟩,
assume x y hxy,
have : edist x y ≤ 0,
{ refine le_of_forall_lt' (λδ δpos, _),
rcases h₂ δ δpos with ⟨ε, εpos, hε⟩,
have : edist (f x) (f y) < ε, by simpa [hxy],
exact hε this },
simpa using this }
end
/-- ε-δ characterization of Cauchy sequences on emetric spaces -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
protected lemma cauchy_iff {f : filter α} :
cauchy f ↔ f ≠ ⊥ ∧ ∀ ε > 0, ∃ t ∈ f, ∀ x y ∈ t, edist x y < ε :=
uniformity_basis_edist.cauchy_iff
/-- A very useful criterion to show that a space is complete is to show that all sequences
which satisfy a bound of the form `edist (u n) (u m) < B N` for all `n m ≥ N` are
converging. This is often applied for `B N = 2^{-N}`, i.e., with a very fast convergence to
`0`, which makes it possible to use arguments of converging series, while this is impossible
to do in general for arbitrary Cauchy sequences. -/
theorem complete_of_convergent_controlled_sequences (B : ℕ → ennreal) (hB : ∀n, 0 < B n)
(H : ∀u : ℕ → α, (∀N n m : ℕ, N ≤ n → N ≤ m → edist (u n) (u m) < B N) → ∃x, tendsto u at_top (𝓝 x)) :
complete_space α :=
uniform_space.complete_of_convergent_controlled_sequences
uniformity_has_countable_basis
(λ n, {p:α×α | edist p.1 p.2 < B n}) (λ n, edist_mem_uniformity $ hB n) H
/-- A sequentially complete emetric space is complete. -/
theorem complete_of_cauchy_seq_tendsto :
(∀ u : ℕ → α, cauchy_seq u → ∃a, tendsto u at_top (𝓝 a)) → complete_space α :=
uniform_space.complete_of_cauchy_seq_tendsto uniformity_has_countable_basis
/-- Expressing locally uniform convergence on a set using `edist`. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
lemma tendsto_locally_uniformly_on_iff {ι : Type*} [topological_space β]
{F : ι → β → α} {f : β → α} {p : filter ι} {s : set β} :
tendsto_locally_uniformly_on F f p s ↔
∀ ε > 0, ∀ x ∈ s, ∃ t ∈ nhds_within x s, ∀ᶠ n in p, ∀ y ∈ t, edist (f y) (F n y) < ε :=
begin
refine ⟨λ H ε hε, H _ (edist_mem_uniformity hε), λ H u hu x hx, _⟩,
rcases mem_uniformity_edist.1 hu with ⟨ε, εpos, hε⟩,
rcases H ε εpos x hx with ⟨t, ht, Ht⟩,
exact ⟨t, ht, Ht.mono (λ n hs x hx, hε (hs x hx))⟩
end
/-- Expressing uniform convergence on a set using `edist`. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
lemma tendsto_uniformly_on_iff {ι : Type*}
{F : ι → β → α} {f : β → α} {p : filter ι} {s : set β} :
tendsto_uniformly_on F f p s ↔ ∀ ε > 0, ∀ᶠ n in p, ∀ x ∈ s, edist (f x) (F n x) < ε :=
begin
refine ⟨λ H ε hε, H _ (edist_mem_uniformity hε), λ H u hu, _⟩,
rcases mem_uniformity_edist.1 hu with ⟨ε, εpos, hε⟩,
exact (H ε εpos).mono (λ n hs x hx, hε (hs x hx))
end
/-- Expressing locally uniform convergence using `edist`. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
lemma tendsto_locally_uniformly_iff {ι : Type*} [topological_space β]
{F : ι → β → α} {f : β → α} {p : filter ι} :
tendsto_locally_uniformly F f p ↔
∀ ε > 0, ∀ (x : β), ∃ t ∈ 𝓝 x, ∀ᶠ n in p, ∀ y ∈ t, edist (f y) (F n y) < ε :=
by simp [← nhds_within_univ, ← tendsto_locally_uniformly_on_univ, tendsto_locally_uniformly_on_iff]
/-- Expressing uniform convergence using `edist`. -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
lemma tendsto_uniformly_iff {ι : Type*}
{F : ι → β → α} {f : β → α} {p : filter ι} :
tendsto_uniformly F f p ↔ ∀ ε > 0, ∀ᶠ n in p, ∀ x, edist (f x) (F n x) < ε :=
by { rw [← tendsto_uniformly_on_univ, tendsto_uniformly_on_iff], simp }
end emetric
open emetric
/-- An emetric space is separated -/
@[priority 100] -- see Note [lower instance priority]
instance to_separated : separated_space α :=
separated_def.2 $ λ x y h, eq_of_forall_edist_le $
λ ε ε0, le_of_lt (h _ (edist_mem_uniformity ε0))
/-- Auxiliary function to replace the uniformity on an emetric space with
a uniformity which is equal to the original one, but maybe not defeq.
This is useful if one wants to construct an emetric space with a
specified uniformity. See Note [forgetful inheritance] explaining why having definitionally
the right uniformity is often important.
-/
def emetric_space.replace_uniformity {α} [U : uniform_space α] (m : emetric_space α)
(H : @uniformity _ U = @uniformity _ emetric_space.to_uniform_space) :
emetric_space α :=
{ edist := @edist _ m.to_has_edist,
edist_self := edist_self,
eq_of_edist_eq_zero := @eq_of_edist_eq_zero _ _,
edist_comm := edist_comm,
edist_triangle := edist_triangle,
to_uniform_space := U,
uniformity_edist := H.trans (@emetric_space.uniformity_edist α _) }
/-- The extended metric induced by an injective function taking values in an emetric space. -/
def emetric_space.induced {α β} (f : α → β) (hf : function.injective f)
(m : emetric_space β) : emetric_space α :=
{ edist := λ x y, edist (f x) (f y),
edist_self := λ x, edist_self _,
eq_of_edist_eq_zero := λ x y h, hf (edist_eq_zero.1 h),
edist_comm := λ x y, edist_comm _ _,
edist_triangle := λ x y z, edist_triangle _ _ _,
to_uniform_space := uniform_space.comap f m.to_uniform_space,
uniformity_edist := begin
apply @uniformity_dist_of_mem_uniformity _ _ _ _ _ (λ x y, edist (f x) (f y)),
refine λ s, mem_comap_sets.trans _,
split; intro H,
{ rcases H with ⟨r, ru, rs⟩,
rcases mem_uniformity_edist.1 ru with ⟨ε, ε0, hε⟩,
refine ⟨ε, ε0, λ a b h, rs (hε _)⟩, exact h },
{ rcases H with ⟨ε, ε0, hε⟩,
exact ⟨_, edist_mem_uniformity ε0, λ ⟨a, b⟩, hε⟩ }
end }
/-- Emetric space instance on subsets of emetric spaces -/
instance {α : Type*} {p : α → Prop} [t : emetric_space α] : emetric_space (subtype p) :=
t.induced coe (λ x y, subtype.ext_iff_val.2)
/-- The extended distance on a subset of an emetric space is the restriction of
the original distance, by definition -/
theorem subtype.edist_eq {p : α → Prop} (x y : subtype p) : edist x y = edist (x : α) y := rfl
/-- The product of two emetric spaces, with the max distance, is an extended
metric spaces. We make sure that the uniform structure thus constructed is the one
corresponding to the product of uniform spaces, to avoid diamond problems. -/
instance prod.emetric_space_max [emetric_space β] : emetric_space (α × β) :=
{ edist := λ x y, max (edist x.1 y.1) (edist x.2 y.2),
edist_self := λ x, by simp,
eq_of_edist_eq_zero := λ x y h, begin
cases max_le_iff.1 (le_of_eq h) with h₁ h₂,
have A : x.fst = y.fst := edist_le_zero.1 h₁,
have B : x.snd = y.snd := edist_le_zero.1 h₂,
exact prod.ext_iff.2 ⟨A, B⟩
end,
edist_comm := λ x y, by simp [edist_comm],
edist_triangle := λ x y z, max_le
(le_trans (edist_triangle _ _ _) (add_le_add (le_max_left _ _) (le_max_left _ _)))
(le_trans (edist_triangle _ _ _) (add_le_add (le_max_right _ _) (le_max_right _ _))),
uniformity_edist := begin
refine uniformity_prod.trans _,
simp [emetric_space.uniformity_edist, comap_infi],
rw ← infi_inf_eq, congr, funext,
rw ← infi_inf_eq, congr, funext,
simp [inf_principal, ext_iff, max_lt_iff]
end,
to_uniform_space := prod.uniform_space }
lemma prod.edist_eq [emetric_space β] (x y : α × β) :
edist x y = max (edist x.1 y.1) (edist x.2 y.2) :=
rfl
section pi
open finset
variables {π : β → Type*} [fintype β]
/-- The product of a finite number of emetric spaces, with the max distance, is still
an emetric space.
This construction would also work for infinite products, but it would not give rise
to the product topology. Hence, we only formalize it in the good situation of finitely many
spaces. -/
instance emetric_space_pi [∀b, emetric_space (π b)] : emetric_space (Πb, π b) :=
{ edist := λ f g, finset.sup univ (λb, edist (f b) (g b)),
edist_self := assume f, bot_unique $ finset.sup_le $ by simp,
edist_comm := assume f g, by unfold edist; congr; funext a; exact edist_comm _ _,
edist_triangle := assume f g h,
begin
simp only [finset.sup_le_iff],
assume b hb,
exact le_trans (edist_triangle _ (g b) _) (add_le_add (le_sup hb) (le_sup hb))
end,
eq_of_edist_eq_zero := assume f g eq0,
begin
have eq1 : sup univ (λ (b : β), edist (f b) (g b)) ≤ 0 := le_of_eq eq0,
simp only [finset.sup_le_iff] at eq1,
exact (funext $ assume b, edist_le_zero.1 $ eq1 b $ mem_univ b),
end,
to_uniform_space := Pi.uniform_space _,
uniformity_edist := begin
simp only [Pi.uniformity, emetric_space.uniformity_edist, comap_infi, gt_iff_lt,
preimage_set_of_eq, comap_principal],
rw infi_comm, congr, funext ε,
rw infi_comm, congr, funext εpos,
change 0 < ε at εpos,
simp [set.ext_iff, εpos]
end }
end pi
namespace emetric
variables {x y z : α} {ε ε₁ ε₂ : ennreal} {s : set α}
/-- `emetric.ball x ε` is the set of all points `y` with `edist y x < ε` -/
def ball (x : α) (ε : ennreal) : set α := {y | edist y x < ε}
@[simp] theorem mem_ball : y ∈ ball x ε ↔ edist y x < ε := iff.rfl
theorem mem_ball' : y ∈ ball x ε ↔ edist x y < ε := by rw edist_comm; refl
/-- `emetric.closed_ball x ε` is the set of all points `y` with `edist y x ≤ ε` -/
def closed_ball (x : α) (ε : ennreal) := {y | edist y x ≤ ε}
@[simp] theorem mem_closed_ball : y ∈ closed_ball x ε ↔ edist y x ≤ ε := iff.rfl
theorem ball_subset_closed_ball : ball x ε ⊆ closed_ball x ε :=
assume y, by simp; intros h; apply le_of_lt h
theorem pos_of_mem_ball (hy : y ∈ ball x ε) : 0 < ε :=
lt_of_le_of_lt (zero_le _) hy
theorem mem_ball_self (h : 0 < ε) : x ∈ ball x ε :=
show edist x x < ε, by rw edist_self; assumption
theorem mem_closed_ball_self : x ∈ closed_ball x ε :=
show edist x x ≤ ε, by rw edist_self; exact bot_le
theorem mem_ball_comm : x ∈ ball y ε ↔ y ∈ ball x ε :=
by simp [edist_comm]
theorem ball_subset_ball (h : ε₁ ≤ ε₂) : ball x ε₁ ⊆ ball x ε₂ :=
λ y (yx : _ < ε₁), lt_of_lt_of_le yx h
theorem closed_ball_subset_closed_ball (h : ε₁ ≤ ε₂) :
closed_ball x ε₁ ⊆ closed_ball x ε₂ :=
λ y (yx : _ ≤ ε₁), le_trans yx h
theorem ball_disjoint (h : ε₁ + ε₂ ≤ edist x y) : ball x ε₁ ∩ ball y ε₂ = ∅ :=
eq_empty_iff_forall_not_mem.2 $ λ z ⟨h₁, h₂⟩,
not_lt_of_le (edist_triangle_left x y z)
(lt_of_lt_of_le (ennreal.add_lt_add h₁ h₂) h)
theorem ball_subset (h : edist x y + ε₁ ≤ ε₂) (h' : edist x y < ⊤) : ball x ε₁ ⊆ ball y ε₂ :=
λ z zx, calc
edist z y ≤ edist z x + edist x y : edist_triangle _ _ _
... = edist x y + edist z x : add_comm _ _
... < edist x y + ε₁ : (ennreal.add_lt_add_iff_left h').2 zx
... ≤ ε₂ : h
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem exists_ball_subset_ball (h : y ∈ ball x ε) : ∃ ε' > 0, ball y ε' ⊆ ball x ε :=
begin
have : 0 < ε - edist y x := by simpa using h,
refine ⟨ε - edist y x, this, ball_subset _ _⟩,
{ rw ennreal.add_sub_cancel_of_le (le_of_lt h), apply le_refl _},
{ have : edist y x ≠ ⊤ := ne_top_of_lt h, apply lt_top_iff_ne_top.2 this }
end
theorem ball_eq_empty_iff : ball x ε = ∅ ↔ ε = 0 :=
eq_empty_iff_forall_not_mem.trans
⟨λh, le_bot_iff.1 (le_of_not_gt (λ ε0, h _ (mem_ball_self ε0))),
λε0 y h, not_lt_of_le (le_of_eq ε0) (pos_of_mem_ball h)⟩
/-- Relation “two points are at a finite edistance” is an equivalence relation. -/
def edist_lt_top_setoid : setoid α :=
{ r := λ x y, edist x y < ⊤,
iseqv := ⟨λ x, by { rw edist_self, exact ennreal.coe_lt_top },
λ x y h, by rwa edist_comm,
λ x y z hxy hyz, lt_of_le_of_lt (edist_triangle x y z) (ennreal.add_lt_top.2 ⟨hxy, hyz⟩)⟩ }
@[simp] lemma ball_zero : ball x 0 = ∅ :=
by rw [emetric.ball_eq_empty_iff]
theorem nhds_basis_eball : (𝓝 x).has_basis (λ ε:ennreal, 0 < ε) (ball x) :=
nhds_basis_uniformity uniformity_basis_edist
theorem nhds_basis_closed_eball : (𝓝 x).has_basis (λ ε:ennreal, 0 < ε) (closed_ball x) :=
nhds_basis_uniformity uniformity_basis_edist_le
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem nhds_eq : 𝓝 x = (⨅ε>0, 𝓟 (ball x ε)) :=
nhds_basis_eball.eq_binfi
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem mem_nhds_iff : s ∈ 𝓝 x ↔ ∃ε>0, ball x ε ⊆ s := nhds_basis_eball.mem_iff
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem is_open_iff : is_open s ↔ ∀x∈s, ∃ε>0, ball x ε ⊆ s :=
by simp [is_open_iff_nhds, mem_nhds_iff]
theorem is_open_ball : is_open (ball x ε) :=
is_open_iff.2 $ λ y, exists_ball_subset_ball
theorem is_closed_ball_top : is_closed (ball x ⊤) :=
is_open_iff.2 $ λ y hy, ⟨⊤, ennreal.coe_lt_top, subset_compl_iff_disjoint.2 $
ball_disjoint $ by { rw ennreal.top_add, exact le_of_not_lt hy }⟩
theorem ball_mem_nhds (x : α) {ε : ennreal} (ε0 : 0 < ε) : ball x ε ∈ 𝓝 x :=
mem_nhds_sets is_open_ball (mem_ball_self ε0)
/-- ε-characterization of the closure in emetric spaces -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem mem_closure_iff :
x ∈ closure s ↔ ∀ε>0, ∃y ∈ s, edist x y < ε :=
(mem_closure_iff_nhds_basis nhds_basis_eball).trans $
by simp only [mem_ball, edist_comm x]
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem tendsto_nhds {f : filter β} {u : β → α} {a : α} :
tendsto u f (𝓝 a) ↔ ∀ ε > 0, ∀ᶠ x in f, edist (u x) a < ε :=
nhds_basis_eball.tendsto_right_iff
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem tendsto_at_top [nonempty β] [semilattice_sup β] (u : β → α) {a : α} :
tendsto u at_top (𝓝 a) ↔ ∀ε>0, ∃N, ∀n≥N, edist (u n) a < ε :=
(at_top_basis.tendsto_iff nhds_basis_eball).trans $
by simp only [exists_prop, true_and, mem_Ici, mem_ball]
/-- In an emetric space, Cauchy sequences are characterized by the fact that, eventually,
the edistance between its elements is arbitrarily small -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem cauchy_seq_iff [nonempty β] [semilattice_sup β] {u : β → α} :
cauchy_seq u ↔ ∀ε>0, ∃N, ∀m n≥N, edist (u m) (u n) < ε :=
uniformity_basis_edist.cauchy_seq_iff
/-- A variation around the emetric characterization of Cauchy sequences -/
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem cauchy_seq_iff' [nonempty β] [semilattice_sup β] {u : β → α} :
cauchy_seq u ↔ ∀ε>(0 : ennreal), ∃N, ∀n≥N, edist (u n) (u N) < ε :=
uniformity_basis_edist.cauchy_seq_iff'
/-- A variation of the emetric characterization of Cauchy sequences that deals with
`nnreal` upper bounds. -/
theorem cauchy_seq_iff_nnreal [nonempty β] [semilattice_sup β] {u : β → α} :
cauchy_seq u ↔ ∀ ε : nnreal, 0 < ε → ∃ N, ∀ n, N ≤ n → edist (u n) (u N) < ε :=
uniformity_basis_edist_nnreal.cauchy_seq_iff'
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem totally_bounded_iff {s : set α} :
totally_bounded s ↔ ∀ ε > 0, ∃t : set α, finite t ∧ s ⊆ ⋃y∈t, ball y ε :=
⟨λ H ε ε0, H _ (edist_mem_uniformity ε0),
λ H r ru, let ⟨ε, ε0, hε⟩ := mem_uniformity_edist.1 ru,
⟨t, ft, h⟩ := H ε ε0 in
⟨t, ft, subset.trans h $ Union_subset_Union $ λ y, Union_subset_Union $ λ yt z, hε⟩⟩
@[nolint ge_or_gt] -- see Note [nolint_ge]
theorem totally_bounded_iff' {s : set α} :
totally_bounded s ↔ ∀ ε > 0, ∃t⊆s, finite t ∧ s ⊆ ⋃y∈t, ball y ε :=
⟨λ H ε ε0, (totally_bounded_iff_subset.1 H) _ (edist_mem_uniformity ε0),
λ H r ru, let ⟨ε, ε0, hε⟩ := mem_uniformity_edist.1 ru,
⟨t, _, ft, h⟩ := H ε ε0 in
⟨t, ft, subset.trans h $ Union_subset_Union $ λ y, Union_subset_Union $ λ yt z, hε⟩⟩
section compact
/-- A compact set in an emetric space is separable, i.e., it is the closure of a countable set -/
lemma countable_closure_of_compact {α : Type u} [emetric_space α] {s : set α} (hs : is_compact s) :
∃ t ⊆ s, (countable t ∧ s = closure t) :=
begin
have A : ∀ (e:ennreal), e > 0 → ∃ t ⊆ s, (finite t ∧ s ⊆ (⋃x∈t, ball x e)) :=
totally_bounded_iff'.1 (compact_iff_totally_bounded_complete.1 hs).1,
-- assume e, finite_cover_balls_of_compact hs,
have B : ∀ (e:ennreal), ∃ t ⊆ s, finite t ∧ (e > 0 → s ⊆ (⋃x∈t, ball x e)),
{ intro e,
cases le_or_gt e 0 with h,
{ exact ⟨∅, by finish⟩ },
{ rcases A e h with ⟨s, ⟨finite_s, closure_s⟩⟩, existsi s, finish }},
/-The desired countable set is obtained by taking for each `n` the centers of a finite cover
by balls of radius `1/n`, and then the union over `n`. -/
choose T T_in_s finite_T using B,
let t := ⋃n:ℕ, T n⁻¹,
have T₁ : t ⊆ s := begin apply Union_subset, assume n, apply T_in_s end,
have T₂ : countable t := by finish [countable_Union, finite.countable],
have T₃ : s ⊆ closure t,
{ intros x x_in_s,
apply mem_closure_iff.2,
intros ε εpos,
rcases ennreal.exists_inv_nat_lt (bot_lt_iff_ne_bot.1 εpos) with ⟨n, hn⟩,
have inv_n_pos : (0 : ennreal) < (n : ℕ)⁻¹ := by simp [ennreal.bot_lt_iff_ne_bot],
have C : x ∈ (⋃y∈ T (n : ℕ)⁻¹, ball y (n : ℕ)⁻¹) :=
mem_of_mem_of_subset x_in_s ((finite_T (n : ℕ)⁻¹).2 inv_n_pos),
rcases mem_Union.1 C with ⟨y, _, ⟨y_in_T, rfl⟩, Dxy⟩,
simp at Dxy, -- Dxy : edist x y < 1 / ↑n
have : y ∈ t := mem_of_mem_of_subset y_in_T (by apply subset_Union (λ (n:ℕ), T (n : ℕ)⁻¹)),
have : edist x y < ε := lt_trans Dxy hn,
exact ⟨y, ‹y ∈ t›, ‹edist x y < ε›⟩ },
have T₄ : closure t ⊆ s := calc
closure t ⊆ closure s : closure_mono T₁
... = s : hs.is_closed.closure_eq,
exact ⟨t, ⟨T₁, T₂, subset.antisymm T₃ T₄⟩⟩
end
end compact
section first_countable
@[priority 100] -- see Note [lower instance priority]
instance (α : Type u) [emetric_space α] :
topological_space.first_countable_topology α :=
uniform_space.first_countable_topology uniformity_has_countable_basis
end first_countable
section second_countable
open topological_space
/-- A separable emetric space is second countable: one obtains a countable basis by taking
the balls centered at points in a dense subset, and with rational radii. We do not register
this as an instance, as there is already an instance going in the other direction
from second countable spaces to separable spaces, and we want to avoid loops. -/
lemma second_countable_of_separable (α : Type u) [emetric_space α] [separable_space α] :
second_countable_topology α :=
let ⟨S, ⟨S_countable, S_dense⟩⟩ := separable_space.exists_countable_closure_eq_univ in
⟨⟨⋃x ∈ S, ⋃ (n : nat), {ball x (n⁻¹)},
⟨show countable ⋃x ∈ S, ⋃ (n : nat), {ball x (n⁻¹)},
{ apply S_countable.bUnion,
intros a aS,
apply countable_Union,
simp },
show uniform_space.to_topological_space = generate_from (⋃x ∈ S, ⋃ (n : nat), {ball x (n⁻¹)}),
{ have A : ∀ (u : set α), (u ∈ ⋃x ∈ S, ⋃ (n : nat), ({ball x ((n : ennreal)⁻¹)} : set (set α))) → is_open u,
{ simp only [and_imp, exists_prop, set.mem_Union, set.mem_singleton_iff, exists_imp_distrib],
intros u x hx i u_ball,
rw [u_ball],
exact is_open_ball },
have B : is_topological_basis (⋃x ∈ S, ⋃ (n : nat), ({ball x (n⁻¹)} : set (set α))),
{ refine is_topological_basis_of_open_of_nhds A (λa u au open_u, _),
rcases is_open_iff.1 open_u a au with ⟨ε, εpos, εball⟩,
have : ε / 2 > 0 := ennreal.half_pos εpos,
/- The ball `ball a ε` is included in `u`. We need to find one of our balls `ball x (n⁻¹)`
containing `a` and contained in `ball a ε`. For this, we take `n` larger than `2/ε`, and
then `x` in `S` at distance at most `n⁻¹` of `a` -/
rcases ennreal.exists_inv_nat_lt (bot_lt_iff_ne_bot.1 (ennreal.half_pos εpos)) with ⟨n, εn⟩,
have : (0 : ennreal) < n⁻¹ := by simp [ennreal.bot_lt_iff_ne_bot],
have : (a : α) ∈ closure (S : set α) := by rw [S_dense]; simp,
rcases mem_closure_iff.1 this _ ‹(0 : ennreal) < n⁻¹› with ⟨x, xS, xdist⟩,
existsi ball x (↑n)⁻¹,
have I : ball x (n⁻¹) ⊆ ball a ε := λy ydist, calc
edist y a = edist a y : edist_comm _ _
... ≤ edist a x + edist y x : edist_triangle_right _ _ _
... < n⁻¹ + n⁻¹ : ennreal.add_lt_add xdist ydist
... < ε/2 + ε/2 : ennreal.add_lt_add εn εn
... = ε : ennreal.add_halves _,
simp only [emetric.mem_ball, exists_prop, set.mem_Union, set.mem_singleton_iff],
exact ⟨⟨x, ⟨xS, ⟨n, rfl⟩⟩⟩, ⟨by simpa, subset.trans I εball⟩⟩ },
exact B.2.2 }⟩⟩⟩
end second_countable
section diam
/-- The diameter of a set in an emetric space, named `emetric.diam` -/
def diam (s : set α) := ⨆ (x ∈ s) (y ∈ s), edist x y
lemma diam_le_iff_forall_edist_le {d : ennreal} :
diam s ≤ d ↔ ∀ (x ∈ s) (y ∈ s), edist x y ≤ d :=
by simp only [diam, supr_le_iff]
/-- If two points belong to some set, their edistance is bounded by the diameter of the set -/
lemma edist_le_diam_of_mem (hx : x ∈ s) (hy : y ∈ s) : edist x y ≤ diam s :=
diam_le_iff_forall_edist_le.1 (le_refl _) x hx y hy
/-- If the distance between any two points in a set is bounded by some constant, this constant
bounds the diameter. -/
lemma diam_le_of_forall_edist_le {d : ennreal} (h : ∀ (x ∈ s) (y ∈ s), edist x y ≤ d) :
diam s ≤ d :=
diam_le_iff_forall_edist_le.2 h
/-- The diameter of a subsingleton vanishes. -/
lemma diam_subsingleton (hs : s.subsingleton) : diam s = 0 :=
le_zero_iff_eq.1 $ diam_le_of_forall_edist_le $
λ x hx y hy, (hs hx hy).symm ▸ edist_self y ▸ le_refl _
/-- The diameter of the empty set vanishes -/
@[simp] lemma diam_empty : diam (∅ : set α) = 0 :=
diam_subsingleton subsingleton_empty
/-- The diameter of a singleton vanishes -/
@[simp] lemma diam_singleton : diam ({x} : set α) = 0 :=
diam_subsingleton subsingleton_singleton
lemma diam_eq_zero_iff : diam s = 0 ↔ s.subsingleton :=
⟨λ h x hx y hy, edist_le_zero.1 $ h ▸ edist_le_diam_of_mem hx hy, diam_subsingleton⟩
lemma diam_pos_iff : 0 < diam s ↔ ∃ (x ∈ s) (y ∈ s), x ≠ y :=
begin
have := not_congr (@diam_eq_zero_iff _ _ s),
dunfold set.subsingleton at this,
push_neg at this,
simpa only [zero_lt_iff_ne_zero, exists_prop] using this
end
lemma diam_insert : diam (insert x s) = max (⨆ y ∈ s, edist x y) (diam s) :=
eq_of_forall_ge_iff $ λ d, by simp only [diam_le_iff_forall_edist_le, ball_insert_iff,
edist_self, edist_comm x, max_le_iff, supr_le_iff, zero_le, true_and,
forall_and_distrib, and_self, ← and_assoc]
lemma diam_pair : diam ({x, y} : set α) = edist x y :=
by simp only [supr_singleton, diam_insert, diam_singleton, ennreal.max_zero_right]
lemma diam_triple :
diam ({x, y, z} : set α) = max (max (edist x y) (edist x z)) (edist y z) :=
by simp only [diam_insert, supr_insert, supr_singleton, diam_singleton,
ennreal.max_zero_right, ennreal.sup_eq_max]
/-- The diameter is monotonous with respect to inclusion -/
lemma diam_mono {s t : set α} (h : s ⊆ t) : diam s ≤ diam t :=
diam_le_of_forall_edist_le $ λ x hx y hy, edist_le_diam_of_mem (h hx) (h hy)
/-- The diameter of a union is controlled by the diameter of the sets, and the edistance
between two points in the sets. -/
lemma diam_union {t : set α} (xs : x ∈ s) (yt : y ∈ t) : diam (s ∪ t) ≤ diam s + edist x y + diam t :=
begin
have A : ∀a ∈ s, ∀b ∈ t, edist a b ≤ diam s + edist x y + diam t := λa ha b hb, calc
edist a b ≤ edist a x + edist x y + edist y b : edist_triangle4 _ _ _ _
... ≤ diam s + edist x y + diam t :
add_le_add (add_le_add (edist_le_diam_of_mem ha xs) (le_refl _)) (edist_le_diam_of_mem yt hb),
refine diam_le_of_forall_edist_le (λa ha b hb, _),
cases (mem_union _ _ _).1 ha with h'a h'a; cases (mem_union _ _ _).1 hb with h'b h'b,
{ calc edist a b ≤ diam s : edist_le_diam_of_mem h'a h'b
... ≤ diam s + (edist x y + diam t) : le_add_right (le_refl _)
... = diam s + edist x y + diam t : by simp only [add_comm, eq_self_iff_true, add_left_comm] },
{ exact A a h'a b h'b },
{ have Z := A b h'b a h'a, rwa [edist_comm] at Z },
{ calc edist a b ≤ diam t : edist_le_diam_of_mem h'a h'b
... ≤ (diam s + edist x y) + diam t : le_add_left (le_refl _) }
end
lemma diam_union' {t : set α} (h : (s ∩ t).nonempty) : diam (s ∪ t) ≤ diam s + diam t :=
let ⟨x, ⟨xs, xt⟩⟩ := h in by simpa using diam_union xs xt
lemma diam_closed_ball {r : ennreal} : diam (closed_ball x r) ≤ 2 * r :=
diam_le_of_forall_edist_le $ λa ha b hb, calc
edist a b ≤ edist a x + edist b x : edist_triangle_right _ _ _
... ≤ r + r : add_le_add ha hb
... = 2 * r : by simp [mul_two, mul_comm]
lemma diam_ball {r : ennreal} : diam (ball x r) ≤ 2 * r :=
le_trans (diam_mono ball_subset_closed_ball) diam_closed_ball
end diam
end emetric --namespace
|
402ad99e3ab8f3cd0e664cdae86664a5c3bf18e0
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/algebra/big_operators/multiset/basic.lean
|
8f44040888cbacee6a2fe94305673654dd0556a2
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 16,065
|
lean
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.list.big_operators.basic
import data.multiset.basic
/-!
# Sums and products over multisets
In this file we define products and sums indexed by multisets. This is later used to define products
and sums indexed by finite sets.
## Main declarations
* `multiset.prod`: `s.prod f` is the product of `f i` over all `i ∈ s`. Not to be mistaken with
the cartesian product `multiset.product`.
* `multiset.sum`: `s.sum f` is the sum of `f i` over all `i ∈ s`.
## Implementation notes
Nov 2022: To speed the Lean 4 port, lemmas requiring extra algebra imports
(`data.list.big_operators.lemmas` rather than `.basic`) have been moved to a separate file,
`algebra.big_operators.multiset.lemmas`. This split does not need to be permanent.
-/
variables {ι α β γ : Type*}
namespace multiset
section comm_monoid
variables [comm_monoid α] {s t : multiset α} {a : α} {m : multiset ι} {f g : ι → α}
/-- Product of a multiset given a commutative monoid structure on `α`.
`prod {a, b, c} = a * b * c` -/
@[to_additive "Sum of a multiset given a commutative additive monoid structure on `α`.
`sum {a, b, c} = a + b + c`"]
def prod : multiset α → α := foldr (*) (λ x y z, by simp [mul_left_comm]) 1
@[to_additive]
lemma prod_eq_foldr (s : multiset α) : prod s = foldr (*) (λ x y z, by simp [mul_left_comm]) 1 s :=
rfl
@[to_additive]
lemma prod_eq_foldl (s : multiset α) : prod s = foldl (*) (λ x y z, by simp [mul_right_comm]) 1 s :=
(foldr_swap _ _ _ _).trans (by simp [mul_comm])
@[simp, norm_cast, to_additive] lemma coe_prod (l : list α) : prod ↑l = l.prod := prod_eq_foldl _
@[simp, to_additive]
lemma prod_to_list (s : multiset α) : s.to_list.prod = s.prod :=
begin
conv_rhs { rw ←coe_to_list s },
rw coe_prod,
end
@[simp, to_additive] lemma prod_zero : @prod α _ 0 = 1 := rfl
@[simp, to_additive]
lemma prod_cons (a : α) (s) : prod (a ::ₘ s) = a * prod s := foldr_cons _ _ _ _ _
@[simp, to_additive]
lemma prod_erase [decidable_eq α] (h : a ∈ s) : a * (s.erase a).prod = s.prod :=
by rw [← s.coe_to_list, coe_erase, coe_prod, coe_prod, list.prod_erase (mem_to_list.2 h)]
@[simp, to_additive]
lemma prod_map_erase [decidable_eq ι] {a : ι} (h : a ∈ m) :
f a * ((m.erase a).map f).prod = (m.map f).prod :=
by rw [← m.coe_to_list, coe_erase, coe_map, coe_map, coe_prod, coe_prod,
list.prod_map_erase f (mem_to_list.2 h)]
@[simp, to_additive]
lemma prod_singleton (a : α) : prod {a} = a :=
by simp only [mul_one, prod_cons, ←cons_zero, eq_self_iff_true, prod_zero]
@[to_additive]
lemma prod_pair (a b : α) : ({a, b} : multiset α).prod = a * b :=
by rw [insert_eq_cons, prod_cons, prod_singleton]
@[simp, to_additive]
lemma prod_add (s t : multiset α) : prod (s + t) = prod s * prod t :=
quotient.induction_on₂ s t $ λ l₁ l₂, by simp
lemma prod_nsmul (m : multiset α) : ∀ (n : ℕ), (n • m).prod = m.prod ^ n
| 0 := by { rw [zero_nsmul, pow_zero], refl }
| (n + 1) :=
by rw [add_nsmul, one_nsmul, pow_add, pow_one, prod_add, prod_nsmul n]
@[simp, to_additive] lemma prod_repeat (a : α) (n : ℕ) : (repeat a n).prod = a ^ n :=
by simp [repeat, list.prod_repeat]
@[to_additive]
lemma prod_map_eq_pow_single [decidable_eq ι] (i : ι) (hf : ∀ i' ≠ i, i' ∈ m → f i' = 1) :
(m.map f).prod = f i ^ m.count i :=
begin
induction m using quotient.induction_on with l,
simp [list.prod_map_eq_pow_single i f hf],
end
@[to_additive]
lemma prod_eq_pow_single [decidable_eq α] (a : α) (h : ∀ a' ≠ a, a' ∈ s → a' = 1) :
s.prod = a ^ (s.count a) :=
begin
induction s using quotient.induction_on with l,
simp [list.prod_eq_pow_single a h],
end
@[to_additive]
lemma pow_count [decidable_eq α] (a : α) : a ^ s.count a = (s.filter (eq a)).prod :=
by rw [filter_eq, prod_repeat]
@[to_additive]
lemma prod_hom [comm_monoid β] (s : multiset α) {F : Type*} [monoid_hom_class F α β] (f : F) :
(s.map f).prod = f s.prod :=
quotient.induction_on s $ λ l, by simp only [l.prod_hom f, quot_mk_to_coe, coe_map, coe_prod]
@[to_additive]
lemma prod_hom' [comm_monoid β] (s : multiset ι) {F : Type*} [monoid_hom_class F α β] (f : F)
(g : ι → α) : (s.map $ λ i, f $ g i).prod = f (s.map g).prod :=
by { convert (s.map g).prod_hom f, exact (map_map _ _ _).symm }
@[to_additive]
lemma prod_hom₂ [comm_monoid β] [comm_monoid γ] (s : multiset ι) (f : α → β → γ)
(hf : ∀ a b c d, f (a * b) (c * d) = f a c * f b d) (hf' : f 1 1 = 1) (f₁ : ι → α) (f₂ : ι → β) :
(s.map $ λ i, f (f₁ i) (f₂ i)).prod = f (s.map f₁).prod (s.map f₂).prod :=
quotient.induction_on s $ λ l,
by simp only [l.prod_hom₂ f hf hf', quot_mk_to_coe, coe_map, coe_prod]
@[to_additive]
lemma prod_hom_rel [comm_monoid β] (s : multiset ι) {r : α → β → Prop} {f : ι → α} {g : ι → β}
(h₁ : r 1 1) (h₂ : ∀ ⦃a b c⦄, r b c → r (f a * b) (g a * c)) :
r (s.map f).prod (s.map g).prod :=
quotient.induction_on s $ λ l,
by simp only [l.prod_hom_rel h₁ h₂, quot_mk_to_coe, coe_map, coe_prod]
@[to_additive]
lemma prod_map_one : prod (m.map (λ i, (1 : α))) = 1 := by rw [map_const, prod_repeat, one_pow]
@[simp, to_additive]
lemma prod_map_mul : (m.map $ λ i, f i * g i).prod = (m.map f).prod * (m.map g).prod :=
m.prod_hom₂ (*) mul_mul_mul_comm (mul_one _) _ _
@[simp]
lemma prod_map_neg [has_distrib_neg α] (s : multiset α) :
(s.map has_neg.neg).prod = (-1) ^ s.card * s.prod :=
by { refine quotient.ind _ s, simp }
@[to_additive]
lemma prod_map_pow {n : ℕ} : (m.map $ λ i, f i ^ n).prod = (m.map f).prod ^ n :=
m.prod_hom' (pow_monoid_hom n : α →* α) f
@[to_additive]
lemma prod_map_prod_map (m : multiset β) (n : multiset γ) {f : β → γ → α} :
prod (m.map $ λ a, prod $ n.map $ λ b, f a b) = prod (n.map $ λ b, prod $ m.map $ λ a, f a b) :=
multiset.induction_on m (by simp) (λ a m ih, by simp [ih])
@[to_additive]
lemma prod_induction (p : α → Prop) (s : multiset α) (p_mul : ∀ a b, p a → p b → p (a * b))
(p_one : p 1) (p_s : ∀ a ∈ s, p a) :
p s.prod :=
begin
rw prod_eq_foldr,
exact foldr_induction (*) (λ x y z, by simp [mul_left_comm]) 1 p s p_mul p_one p_s,
end
@[to_additive]
lemma prod_induction_nonempty (p : α → Prop) (p_mul : ∀ a b, p a → p b → p (a * b))
(hs : s ≠ ∅) (p_s : ∀ a ∈ s, p a) :
p s.prod :=
begin
revert s,
refine multiset.induction _ _,
{ intro h,
exfalso,
simpa using h },
intros a s hs hsa hpsa,
rw prod_cons,
by_cases hs_empty : s = ∅,
{ simp [hs_empty, hpsa a] },
have hps : ∀ x, x ∈ s → p x, from λ x hxs, hpsa x (mem_cons_of_mem hxs),
exact p_mul a s.prod (hpsa a (mem_cons_self a s)) (hs hs_empty hps),
end
lemma prod_dvd_prod_of_le (h : s ≤ t) : s.prod ∣ t.prod :=
by { obtain ⟨z, rfl⟩ := exists_add_of_le h, simp only [prod_add, dvd_mul_right] }
end comm_monoid
lemma prod_dvd_prod_of_dvd [comm_monoid β] {S : multiset α} (g1 g2 : α → β)
(h : ∀ a ∈ S, g1 a ∣ g2 a) :
(multiset.map g1 S).prod ∣ (multiset.map g2 S).prod :=
begin
apply multiset.induction_on' S, { simp },
intros a T haS _ IH,
simp [mul_dvd_mul (h a haS) IH]
end
section add_comm_monoid
variables [add_comm_monoid α]
/-- `multiset.sum`, the sum of the elements of a multiset, promoted to a morphism of
`add_comm_monoid`s. -/
def sum_add_monoid_hom : multiset α →+ α :=
{ to_fun := sum,
map_zero' := sum_zero,
map_add' := sum_add }
@[simp] lemma coe_sum_add_monoid_hom : (sum_add_monoid_hom : multiset α → α) = sum := rfl
end add_comm_monoid
section comm_monoid_with_zero
variables [comm_monoid_with_zero α]
lemma prod_eq_zero {s : multiset α} (h : (0 : α) ∈ s) : s.prod = 0 :=
begin
rcases multiset.exists_cons_of_mem h with ⟨s', hs'⟩,
simp [hs', multiset.prod_cons]
end
variables [no_zero_divisors α] [nontrivial α] {s : multiset α}
lemma prod_eq_zero_iff : s.prod = 0 ↔ (0 : α) ∈ s :=
quotient.induction_on s $ λ l, by { rw [quot_mk_to_coe, coe_prod], exact list.prod_eq_zero_iff }
lemma prod_ne_zero (h : (0 : α) ∉ s) : s.prod ≠ 0 := mt prod_eq_zero_iff.1 h
end comm_monoid_with_zero
section division_comm_monoid
variables [division_comm_monoid α] {m : multiset ι} {f g : ι → α}
@[to_additive] lemma prod_map_inv' (m : multiset α) : (m.map has_inv.inv).prod = m.prod⁻¹ :=
m.prod_hom (inv_monoid_hom : α →* α)
@[simp, to_additive] lemma prod_map_inv : (m.map $ λ i, (f i)⁻¹).prod = (m.map f).prod ⁻¹ :=
by { convert (m.map f).prod_map_inv', rw map_map }
@[simp, to_additive]
lemma prod_map_div : (m.map $ λ i, f i / g i).prod = (m.map f).prod / (m.map g).prod :=
m.prod_hom₂ (/) mul_div_mul_comm (div_one _) _ _
@[to_additive]
lemma prod_map_zpow {n : ℤ} : (m.map $ λ i, f i ^ n).prod = (m.map f).prod ^ n :=
by { convert (m.map f).prod_hom (zpow_group_hom _ : α →* α), rw map_map, refl }
end division_comm_monoid
section non_unital_non_assoc_semiring
variables [non_unital_non_assoc_semiring α] {a : α} {s : multiset ι} {f : ι → α}
lemma sum_map_mul_left : sum (s.map (λ i, a * f i)) = a * sum (s.map f) :=
multiset.induction_on s (by simp) (λ i s ih, by simp [ih, mul_add])
lemma sum_map_mul_right : sum (s.map (λ i, f i * a)) = sum (s.map f) * a :=
multiset.induction_on s (by simp) (λ a s ih, by simp [ih, add_mul])
end non_unital_non_assoc_semiring
section semiring
variables [semiring α]
lemma dvd_sum {a : α} {s : multiset α} : (∀ x ∈ s, a ∣ x) → a ∣ s.sum :=
multiset.induction_on s (λ _, dvd_zero _)
(λ x s ih h, by { rw sum_cons, exact dvd_add
(h _ (mem_cons_self _ _)) (ih $ λ y hy, h _ $ mem_cons.2 $ or.inr hy) })
end semiring
/-! ### Order -/
section ordered_comm_monoid
variables [ordered_comm_monoid α] {s t : multiset α} {a : α}
@[to_additive sum_nonneg]
lemma one_le_prod_of_one_le : (∀ x ∈ s, (1 : α) ≤ x) → 1 ≤ s.prod :=
quotient.induction_on s $ λ l hl, by simpa using list.one_le_prod_of_one_le hl
@[to_additive]
lemma single_le_prod : (∀ x ∈ s, (1 : α) ≤ x) → ∀ x ∈ s, x ≤ s.prod :=
quotient.induction_on s $ λ l hl x hx, by simpa using list.single_le_prod hl x hx
@[to_additive sum_le_card_nsmul]
lemma prod_le_pow_card (s : multiset α) (n : α) (h : ∀ x ∈ s, x ≤ n) : s.prod ≤ n ^ s.card :=
begin
induction s using quotient.induction_on,
simpa using list.prod_le_pow_card _ _ h,
end
@[to_additive all_zero_of_le_zero_le_of_sum_eq_zero]
lemma all_one_of_le_one_le_of_prod_eq_one :
(∀ x ∈ s, (1 : α) ≤ x) → s.prod = 1 → ∀ x ∈ s, x = (1 : α) :=
begin
apply quotient.induction_on s,
simp only [quot_mk_to_coe, coe_prod, mem_coe],
exact λ l, list.all_one_of_le_one_le_of_prod_eq_one,
end
@[to_additive]
lemma prod_le_prod_of_rel_le (h : s.rel (≤) t) : s.prod ≤ t.prod :=
begin
induction h with _ _ _ _ rh _ rt,
{ refl },
{ rw [prod_cons, prod_cons],
exact mul_le_mul' rh rt }
end
@[to_additive]
lemma prod_map_le_prod_map {s : multiset ι} (f : ι → α) (g : ι → α) (h : ∀ i, i ∈ s → f i ≤ g i) :
(s.map f).prod ≤ (s.map g).prod :=
prod_le_prod_of_rel_le $ rel_map.2 $ rel_refl_of_refl_on h
@[to_additive]
lemma prod_map_le_prod (f : α → α) (h : ∀ x, x ∈ s → f x ≤ x) : (s.map f).prod ≤ s.prod :=
prod_le_prod_of_rel_le $ rel_map_left.2 $ rel_refl_of_refl_on h
@[to_additive]
lemma prod_le_prod_map (f : α → α) (h : ∀ x, x ∈ s → x ≤ f x) : s.prod ≤ (s.map f).prod :=
@prod_map_le_prod αᵒᵈ _ _ f h
@[to_additive card_nsmul_le_sum]
lemma pow_card_le_prod (h : ∀ x ∈ s, a ≤ x) : a ^ s.card ≤ s.prod :=
by { rw [←multiset.prod_repeat, ←multiset.map_const], exact prod_map_le_prod _ h }
end ordered_comm_monoid
lemma prod_nonneg [ordered_comm_semiring α] {m : multiset α} (h : ∀ a ∈ m, (0 : α) ≤ a) :
0 ≤ m.prod :=
begin
revert h,
refine m.induction_on _ _,
{ rintro -, rw prod_zero, exact zero_le_one },
intros a s hs ih,
rw prod_cons,
exact mul_nonneg (ih _ $ mem_cons_self _ _) (hs $ λ a ha, ih _ $ mem_cons_of_mem ha),
end
/-- Slightly more general version of `multiset.prod_eq_one_iff` for a non-ordered `monoid` -/
@[to_additive "Slightly more general version of `multiset.sum_eq_zero_iff`
for a non-ordered `add_monoid`"]
lemma prod_eq_one [comm_monoid α] {m : multiset α} (h : ∀ x ∈ m, x = (1 : α)) : m.prod = 1 :=
begin
induction m using quotient.induction_on with l,
simp [list.prod_eq_one h],
end
@[to_additive]
lemma le_prod_of_mem [canonically_ordered_monoid α] {m : multiset α} {a : α} (h : a ∈ m) :
a ≤ m.prod :=
begin
obtain ⟨m', rfl⟩ := exists_cons_of_mem h,
rw [prod_cons],
exact _root_.le_mul_right (le_refl a),
end
@[to_additive le_sum_of_subadditive_on_pred]
lemma le_prod_of_submultiplicative_on_pred [comm_monoid α] [ordered_comm_monoid β]
(f : α → β) (p : α → Prop) (h_one : f 1 = 1) (hp_one : p 1)
(h_mul : ∀ a b, p a → p b → f (a * b) ≤ f a * f b)
(hp_mul : ∀ a b, p a → p b → p (a * b)) (s : multiset α) (hps : ∀ a, a ∈ s → p a) :
f s.prod ≤ (s.map f).prod :=
begin
revert s,
refine multiset.induction _ _,
{ simp [le_of_eq h_one] },
intros a s hs hpsa,
have hps : ∀ x, x ∈ s → p x, from λ x hx, hpsa x (mem_cons_of_mem hx),
have hp_prod : p s.prod, from prod_induction p s hp_mul hp_one hps,
rw [prod_cons, map_cons, prod_cons],
exact (h_mul a s.prod (hpsa a (mem_cons_self a s)) hp_prod).trans (mul_le_mul_left' (hs hps) _),
end
@[to_additive le_sum_of_subadditive]
lemma le_prod_of_submultiplicative [comm_monoid α] [ordered_comm_monoid β]
(f : α → β) (h_one : f 1 = 1) (h_mul : ∀ a b, f (a * b) ≤ f a * f b) (s : multiset α) :
f s.prod ≤ (s.map f).prod :=
le_prod_of_submultiplicative_on_pred f (λ i, true) h_one trivial (λ x y _ _ , h_mul x y) (by simp)
s (by simp)
@[to_additive le_sum_nonempty_of_subadditive_on_pred]
lemma le_prod_nonempty_of_submultiplicative_on_pred [comm_monoid α] [ordered_comm_monoid β]
(f : α → β) (p : α → Prop) (h_mul : ∀ a b, p a → p b → f (a * b) ≤ f a * f b)
(hp_mul : ∀ a b, p a → p b → p (a * b)) (s : multiset α) (hs_nonempty : s ≠ ∅)
(hs : ∀ a, a ∈ s → p a) :
f s.prod ≤ (s.map f).prod :=
begin
revert s,
refine multiset.induction _ _,
{ intro h,
exfalso,
exact h rfl },
rintros a s hs hsa_nonempty hsa_prop,
rw [prod_cons, map_cons, prod_cons],
by_cases hs_empty : s = ∅,
{ simp [hs_empty] },
have hsa_restrict : (∀ x, x ∈ s → p x), from λ x hx, hsa_prop x (mem_cons_of_mem hx),
have hp_sup : p s.prod,
from prod_induction_nonempty p hp_mul hs_empty hsa_restrict,
have hp_a : p a, from hsa_prop a (mem_cons_self a s),
exact (h_mul a _ hp_a hp_sup).trans (mul_le_mul_left' (hs hs_empty hsa_restrict) _),
end
@[to_additive le_sum_nonempty_of_subadditive]
lemma le_prod_nonempty_of_submultiplicative [comm_monoid α] [ordered_comm_monoid β]
(f : α → β) (h_mul : ∀ a b, f (a * b) ≤ f a * f b) (s : multiset α) (hs_nonempty : s ≠ ∅) :
f s.prod ≤ (s.map f).prod :=
le_prod_nonempty_of_submultiplicative_on_pred f (λ i, true) (by simp [h_mul]) (by simp) s
hs_nonempty (by simp)
@[simp] lemma sum_map_singleton (s : multiset α) : (s.map (λ a, ({a} : multiset α))).sum = s :=
multiset.induction_on s (by simp) (by simp)
lemma abs_sum_le_sum_abs [linear_ordered_add_comm_group α] {s : multiset α} :
abs s.sum ≤ (s.map abs).sum :=
le_sum_of_subadditive _ abs_zero abs_add s
end multiset
@[to_additive]
lemma map_multiset_prod [comm_monoid α] [comm_monoid β] {F : Type*} [monoid_hom_class F α β]
(f : F) (s : multiset α) : f s.prod = (s.map f).prod :=
(s.prod_hom f).symm
@[to_additive]
protected lemma monoid_hom.map_multiset_prod [comm_monoid α] [comm_monoid β] (f : α →* β)
(s : multiset α) : f s.prod = (s.map f).prod :=
(s.prod_hom f).symm
|
a034925393ed4e4de719d5087538a7e6e206da44
|
b7f22e51856f4989b970961f794f1c435f9b8f78
|
/tests/lean/run/refine1.lean
|
bb4902c7272fc27862091e9898230991e44ce0df
|
[
"Apache-2.0"
] |
permissive
|
soonhokong/lean
|
cb8aa01055ffe2af0fb99a16b4cda8463b882cd1
|
38607e3eb57f57f77c0ac114ad169e9e4262e24f
|
refs/heads/master
| 1,611,187,284,081
| 1,450,766,737,000
| 1,476,122,547,000
| 11,513,992
| 2
| 0
| null | 1,401,763,102,000
| 1,374,182,235,000
|
C++
|
UTF-8
|
Lean
| false
| false
| 113
|
lean
|
example (a b : Prop) : a → b → a ∧ b :=
begin
intros [Ha, Hb],
refine (and.intro _ Hb),
exact Ha
end
|
37a5853c9f831efcc90ad836d5525cbca1248270
|
432d948a4d3d242fdfb44b81c9e1b1baacd58617
|
/src/algebra/lie/cartan_subalgebra.lean
|
861ccec6aa53c0ee9daa501b6be67d1f7eac5141
|
[
"Apache-2.0"
] |
permissive
|
JLimperg/aesop3
|
306cc6570c556568897ed2e508c8869667252e8a
|
a4a116f650cc7403428e72bd2e2c4cda300fe03f
|
refs/heads/master
| 1,682,884,916,368
| 1,620,320,033,000
| 1,620,320,033,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 3,375
|
lean
|
/-
Copyright (c) 2021 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
-/
import algebra.lie.nilpotent
/-!
# Cartan subalgebras
Cartan subalgebras are one of the most important concepts in Lie theory. We define them here.
The standard example is the set of diagonal matrices in the Lie algebra of matrices.
## Main definitions
* `lie_subalgebra.normalizer`
* `lie_subalgebra.le_normalizer_of_ideal`
* `lie_subalgebra.is_cartan_subalgebra`
## Tags
lie subalgebra, normalizer, idealizer, cartan subalgebra
-/
universes u v w w₁ w₂
variables {R : Type u} {L : Type v}
variables [comm_ring R] [lie_ring L] [lie_algebra R L] (H : lie_subalgebra R L)
namespace lie_subalgebra
/-- The normalizer of a Lie subalgebra `H` is the set of elements of the Lie algebra whose bracket
with any element of `H` lies in `H`. It is the Lie algebra equivalent of the group-theoretic
normalizer (see `subgroup.normalizer`) and is an idealizer in the sense of abstract algebra. -/
def normalizer : lie_subalgebra R L :=
{ carrier := { x : L | ∀ (y : L), (y ∈ H) → ⁅x, y⁆ ∈ H },
zero_mem' := λ y hy, by { rw zero_lie y, exact H.zero_mem, },
add_mem' := λ z₁ z₂ h₁ h₂ y hy, by { rw add_lie, exact H.add_mem (h₁ y hy) (h₂ y hy), },
smul_mem' := λ t y hy z hz, by { rw smul_lie, exact H.smul_mem t (hy z hz), },
lie_mem' := λ z₁ z₂ h₁ h₂ y hy, by
{ rw lie_lie, exact H.sub_mem (h₁ _ (h₂ y hy)) (h₂ _ (h₁ y hy)), }, }
lemma mem_normalizer_iff (x : L) : x ∈ H.normalizer ↔ ∀ (y : L), (y ∈ H) → ⁅x, y⁆ ∈ H := iff.rfl
lemma le_normalizer : H ≤ H.normalizer :=
begin
rw le_def, intros x hx,
simp only [set_like.mem_coe, mem_coe_submodule, coe_coe, mem_normalizer_iff] at ⊢ hx,
intros y, exact H.lie_mem hx,
end
/-- A Lie subalgebra is an ideal of its normalizer. -/
lemma ideal_in_normalizer : ∀ (x y : L), x ∈ H.normalizer → y ∈ H → ⁅x,y⁆ ∈ H :=
begin
simp only [mem_normalizer_iff],
intros x y h, exact h y,
end
/-- The normalizer of a Lie subalgebra `H` is the maximal Lie subalgebra in which `H` is a Lie
ideal. -/
lemma le_normalizer_of_ideal {N : lie_subalgebra R L}
(h : ∀ (x y : L), x ∈ N → y ∈ H → ⁅x,y⁆ ∈ H) : N ≤ H.normalizer :=
begin
intros x hx,
rw mem_normalizer_iff,
exact λ y, h x y hx,
end
/-- A Cartan subalgebra is a nilpotent, self-normalizing subalgebra. -/
class is_cartan_subalgebra : Prop :=
(nilpotent : lie_algebra.is_nilpotent R H)
(self_normalizing : H.normalizer = H)
end lie_subalgebra
@[simp] lemma lie_ideal.normalizer_eq_top {R : Type u} {L : Type v}
[comm_ring R] [lie_ring L] [lie_algebra R L] (I : lie_ideal R L) :
(I : lie_subalgebra R L).normalizer = ⊤ :=
begin
ext x, simp only [lie_subalgebra.mem_normalizer_iff, lie_subalgebra.mem_top, iff_true],
intros y hy, exact I.lie_mem hy,
end
open lie_ideal
/-- A nilpotent Lie algebra is its own Cartan subalgebra. -/
instance lie_algebra.top_is_cartan_subalgebra_of_nilpotent [lie_algebra.is_nilpotent R L] :
lie_subalgebra.is_cartan_subalgebra ⊤ :=
{ nilpotent :=
by { rwa lie_algebra.nilpotent_iff_equiv_nilpotent lie_subalgebra.top_equiv_self, },
self_normalizing :=
by { rw [← top_coe_lie_subalgebra, normalizer_eq_top, top_coe_lie_subalgebra], }, }
|
d87403cd13b72a85e388d7b8d6ef6c009ce1e132
|
22e97a5d648fc451e25a06c668dc03ac7ed7bc25
|
/src/data/equiv/local_equiv.lean
|
c35aef05cfc3cddfe40242015f266a4e00d23ca8
|
[
"Apache-2.0"
] |
permissive
|
keeferrowan/mathlib
|
f2818da875dbc7780830d09bd4c526b0764a4e50
|
aad2dfc40e8e6a7e258287a7c1580318e865817e
|
refs/heads/master
| 1,661,736,426,952
| 1,590,438,032,000
| 1,590,438,032,000
| 266,892,663
| 0
| 0
|
Apache-2.0
| 1,590,445,835,000
| 1,590,445,835,000
| null |
UTF-8
|
Lean
| false
| false
| 22,335
|
lean
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import data.equiv.basic
/-!
# Local equivalences
This files defines equivalences between subsets of given types.
An element `e` of `local_equiv α β` is made of two maps `e.to_fun` and `e.inv_fun` respectively
from α to β and from β to α (just like equivs), which are inverse to each other on the subsets
`e.source` and `e.target` of respectively α and β.
They are designed in particular to define charts on manifolds.
The main functionality is `e.trans f`, which composes the two local equivalences by restricting
the source and target to the maximal set where the composition makes sense.
As for equivs, we register a coercion to functions and use it in our simp normal form: we write
`e x` and `e.symm y` instead of `e.to_fun x` and `e.inv_fun y`.
## Main definitions
`equiv.to_local_equiv`: associating a local equiv to an equiv, with source = target = univ
`local_equiv.symm` : the inverse of a local equiv
`local_equiv.trans` : the composition of two local equivs
`local_equiv.refl` : the identity local equiv
`local_equiv.of_set` : the identity on a set `s`
`eq_on_source` : equivalence relation describing the "right" notion of equality for local
equivs (see below in implementation notes)
## Implementation notes
There are at least three possible implementations of local equivalences:
* equivs on subtypes
* pairs of functions taking values in `option α` and `option β`, equal to none where the local
equivalence is not defined
* pairs of functions defined everywhere, keeping the source and target as additional data
Each of these implementations has pros and cons.
* When dealing with subtypes, one still need to define additional API for composition and
restriction of domains. Checking that one always belongs to the right subtype makes things very
tedious, and leads quickly to DTT hell (as the subtype `u ∩ v` is not the "same" as `v ∩ u`, for
instance).
* With option-valued functions, the composition is very neat (it is just the usual composition, and
the domain is restricted automatically). These are implemented in `pequiv.lean`. For manifolds,
where one wants to discuss thoroughly the smoothness of the maps, this creates however a lot of
overhead as one would need to extend all classes of smoothness to option-valued maps.
* The local_equiv version as explained above is easier to use for manifolds. The drawback is that
there is extra useless data (the values of `to_fun` and `inv_fun` outside of `source` and `target`).
In particular, the equality notion between local equivs is not "the right one", i.e., coinciding
source and target and equality there. Moreover, there are no local equivs in this sense between
an empty type and a nonempty type. Since empty types are not that useful, and since one almost never
needs to talk about equal local equivs, this is not an issue in practice.
Still, we introduce an equivalence relation `eq_on_source` that captures this right notion of
equality, and show that many properties are invariant under this equivalence relation.
-/
open function set
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
/-- Local equivalence between subsets `source` and `target` of α and β respectively. The (global)
maps `to_fun : α → β` and `inv_fun : β → α` map `source` to `target` and conversely, and are inverse
to each other there. The values of `to_fun` outside of `source` and of `inv_fun` outside of `target`
are irrelevant. -/
@[nolint has_inhabited_instance]
structure local_equiv (α : Type*) (β : Type*) :=
(to_fun : α → β)
(inv_fun : β → α)
(source : set α)
(target : set β)
(map_source' : ∀{x}, x ∈ source → to_fun x ∈ target)
(map_target' : ∀{x}, x ∈ target → inv_fun x ∈ source)
(left_inv' : ∀{x}, x ∈ source → inv_fun (to_fun x) = x)
(right_inv' : ∀{x}, x ∈ target → to_fun (inv_fun x) = x)
-- attribute [simp] local_equiv.left_inv local_equiv.right_inv local_equiv.map_source local_equiv.map_target
/-- Associating a local_equiv to an equiv-/
def equiv.to_local_equiv (e : equiv α β) : local_equiv α β :=
{ to_fun := e.to_fun,
inv_fun := e.inv_fun,
source := univ,
target := univ,
map_source' := λx hx, mem_univ _,
map_target' := λy hy, mem_univ _,
left_inv' := λx hx, e.left_inv x,
right_inv' := λx hx, e.right_inv x }
namespace local_equiv
variables (e : local_equiv α β) (e' : local_equiv β γ)
/-- The inverse of a local equiv -/
protected def symm : local_equiv β α :=
{ to_fun := e.inv_fun,
inv_fun := e.to_fun,
source := e.target,
target := e.source,
map_source' := e.map_target',
map_target' := e.map_source',
left_inv' := e.right_inv',
right_inv' := e.left_inv' }
instance : has_coe_to_fun (local_equiv α β) := ⟨_, local_equiv.to_fun⟩
@[simp] theorem coe_mk (f : α → β) (g s t ml mr il ir) :
(local_equiv.mk f g s t ml mr il ir : α → β) = f := rfl
@[simp] theorem coe_symm_mk (f : α → β) (g s t ml mr il ir) :
((local_equiv.mk f g s t ml mr il ir).symm : β → α) = g := rfl
@[simp] lemma to_fun_as_coe : e.to_fun = e := rfl
@[simp] lemma inv_fun_as_coe : e.inv_fun = e.symm := rfl
@[simp] lemma map_source {x : α} (h : x ∈ e.source) : e x ∈ e.target :=
e.map_source' h
@[simp] lemma map_target {x : β} (h : x ∈ e.target) : e.symm x ∈ e.source :=
e.map_target' h
@[simp] lemma left_inv {x : α} (h : x ∈ e.source) : e.symm (e x) = x :=
e.left_inv' h
@[simp] lemma right_inv {x : β} (h : x ∈ e.target) : e (e.symm x) = x :=
e.right_inv' h
/-- Associating to a local_equiv an equiv between the source and the target -/
protected def to_equiv : equiv (e.source) (e.target) :=
{ to_fun := λ x, ⟨e x, e.map_source x.mem⟩,
inv_fun := λ y, ⟨e.symm y, e.map_target y.mem⟩,
left_inv := λ⟨x, hx⟩, subtype.eq $ e.left_inv hx,
right_inv := λ⟨y, hy⟩, subtype.eq $ e.right_inv hy }
@[simp] lemma symm_source : e.symm.source = e.target := rfl
@[simp] lemma symm_target : e.symm.target = e.source := rfl
@[simp] lemma symm_symm : e.symm.symm = e := by { cases e, refl }
/-- A local equiv induces a bijection between its source and target -/
lemma bij_on_source : bij_on e e.source e.target :=
inv_on.bij_on ⟨λ x, e.left_inv, λ x, e.right_inv⟩
(λ x, e.map_source) (λ x, e.map_target)
lemma image_eq_target_inter_inv_preimage {s : set α} (h : s ⊆ e.source) :
e '' s = e.target ∩ e.symm ⁻¹' s :=
begin
refine subset.antisymm (λx hx, _) (λx hx, _),
{ rcases (mem_image _ _ _).1 hx with ⟨y, ys, hy⟩,
rw ← hy,
split,
{ apply e.map_source,
exact h ys },
{ rwa [mem_preimage, e.left_inv (h ys)] } },
{ rw ← e.right_inv hx.1,
exact mem_image_of_mem _ hx.2 }
end
lemma inv_image_eq_source_inter_preimage {s : set β} (h : s ⊆ e.target) :
e.symm '' s = e.source ∩ e ⁻¹' s :=
e.symm.image_eq_target_inter_inv_preimage h
lemma source_inter_preimage_inv_preimage (s : set α) :
e.source ∩ e ⁻¹' (e.symm ⁻¹' s) = e.source ∩ s :=
begin
ext, split,
{ rintros ⟨hx, xs⟩,
simp only [mem_preimage, hx, e.left_inv, mem_preimage] at xs,
exact ⟨hx, xs⟩ },
{ rintros ⟨hx, xs⟩,
simp [hx, xs] }
end
lemma target_inter_inv_preimage_preimage (s : set β) :
e.target ∩ e.symm ⁻¹' (e ⁻¹' s) = e.target ∩ s :=
e.symm.source_inter_preimage_inv_preimage _
lemma image_source_eq_target : e '' e.source = e.target :=
e.bij_on_source.image_eq
lemma source_subset_preimage_target : e.source ⊆ e ⁻¹' e.target :=
λx hx, e.map_source hx
lemma inv_image_target_eq_source : e.symm '' e.target = e.source :=
e.symm.bij_on_source.image_eq
lemma target_subset_preimage_source : e.target ⊆ e.symm ⁻¹' e.source :=
λx hx, e.map_target hx
/-- Two local equivs that have the same `source`, same `to_fun` and same `inv_fun`, coincide. -/
@[ext]
protected lemma ext {e e' : local_equiv α β} (h : ∀x, e x = e' x)
(hsymm : ∀x, e.symm x = e'.symm x) (hs : e.source = e'.source) : e = e' :=
begin
have A : (e : α → β) = e', by { ext x, exact h x },
have B : (e.symm : β → α) = e'.symm, by { ext x, exact hsymm x },
have I : e '' e.source = e.target := e.image_source_eq_target,
have I' : e' '' e'.source = e'.target := e'.image_source_eq_target,
rw [A, hs, I'] at I,
cases e; cases e',
simp * at *
end
/-- Restricting a local equivalence to e.source ∩ s -/
protected def restr (s : set α) : local_equiv α β :=
{ to_fun := e,
inv_fun := e.symm,
source := e.source ∩ s,
target := e.target ∩ e.symm⁻¹' s,
map_source' := λx hx, begin
apply mem_inter,
{ apply e.map_source,
exact hx.1 },
{ rw [mem_preimage, e.left_inv],
exact hx.2,
exact hx.1 },
end,
map_target' := λy hy, begin
apply mem_inter,
{ apply e.map_target,
exact hy.1 },
{ exact hy.2 },
end,
left_inv' := λx hx, e.left_inv hx.1,
right_inv' := λy hy, e.right_inv hy.1 }
@[simp] lemma restr_coe (s : set α) : (e.restr s : α → β) = e := rfl
@[simp] lemma restr_coe_symm (s : set α) : ((e.restr s).symm : β → α) = e.symm := rfl
@[simp] lemma restr_source (s : set α) : (e.restr s).source = e.source ∩ s := rfl
@[simp] lemma restr_target (s : set α) : (e.restr s).target = e.target ∩ e.symm ⁻¹' s := rfl
lemma restr_eq_of_source_subset {e : local_equiv α β} {s : set α} (h : e.source ⊆ s) :
e.restr s = e :=
local_equiv.ext (λ_, rfl) (λ_, rfl) (by simp [inter_eq_self_of_subset_left h])
@[simp] lemma restr_univ {e : local_equiv α β} : e.restr univ = e :=
restr_eq_of_source_subset (subset_univ _)
/-- The identity local equiv -/
protected def refl (α : Type*) : local_equiv α α := (equiv.refl α).to_local_equiv
@[simp] lemma refl_source : (local_equiv.refl α).source = univ := rfl
@[simp] lemma refl_target : (local_equiv.refl α).target = univ := rfl
@[simp] lemma refl_coe : (local_equiv.refl α : α → α) = id := rfl
@[simp] lemma refl_symm : (local_equiv.refl α).symm = local_equiv.refl α := rfl
@[simp] lemma refl_restr_source (s : set α) : ((local_equiv.refl α).restr s).source = s :=
by simp
@[simp] lemma refl_restr_target (s : set α) : ((local_equiv.refl α).restr s).target = s :=
by { change univ ∩ id⁻¹' s = s, simp }
/-- The identity local equiv on a set `s` -/
def of_set (s : set α) : local_equiv α α :=
{ to_fun := id,
inv_fun := id,
source := s,
target := s,
map_source' := λx hx, hx,
map_target' := λx hx, hx,
left_inv' := λx hx, rfl,
right_inv' := λx hx, rfl }
@[simp] lemma of_set_source (s : set α) : (local_equiv.of_set s).source = s := rfl
@[simp] lemma of_set_target (s : set α) : (local_equiv.of_set s).target = s := rfl
@[simp] lemma of_set_coe (s : set α) : (local_equiv.of_set s : α → α) = id := rfl
@[simp] lemma of_set_symm (s : set α) : (local_equiv.of_set s).symm = local_equiv.of_set s := rfl
/-- Composing two local equivs if the target of the first coincides with the source of the
second. -/
protected def trans' (e' : local_equiv β γ) (h : e.target = e'.source) :
local_equiv α γ :=
{ to_fun := e' ∘ e,
inv_fun := e.symm ∘ e'.symm,
source := e.source,
target := e'.target,
map_source' := λx hx, by simp [h.symm, hx],
map_target' := λy hy, by simp [h, hy],
left_inv' := λx hx, by simp [hx, h.symm],
right_inv' := λy hy, by simp [hy, h] }
/-- Composing two local equivs, by restricting to the maximal domain where their composition
is well defined. -/
protected def trans : local_equiv α γ :=
local_equiv.trans' (e.symm.restr (e'.source)).symm (e'.restr (e.target)) (inter_comm _ _)
@[simp] lemma coe_trans : (e.trans e' : α → γ) = e' ∘ e := rfl
@[simp] lemma coe_trans_symm : ((e.trans e').symm : γ → α) = e.symm ∘ e'.symm := rfl
lemma trans_symm_eq_symm_trans_symm : (e.trans e').symm = e'.symm.trans e.symm :=
by cases e; cases e'; refl
/- This could be considered as a simp lemma, but there are many situations where it makes something
simple into something more complicated. -/
lemma trans_source : (e.trans e').source = e.source ∩ e ⁻¹' e'.source := rfl
lemma trans_source' : (e.trans e').source = e.source ∩ e ⁻¹' (e.target ∩ e'.source) :=
begin
symmetry, calc
e.source ∩ e ⁻¹' (e.target ∩ e'.source) =
(e.source ∩ e ⁻¹' (e.target)) ∩ e ⁻¹' (e'.source) :
by rw [preimage_inter, inter_assoc]
... = e.source ∩ e ⁻¹' (e'.source) :
by { congr' 1, apply inter_eq_self_of_subset_left e.source_subset_preimage_target }
... = (e.trans e').source : rfl
end
lemma trans_source'' : (e.trans e').source = e.symm '' (e.target ∩ e'.source) :=
begin
rw [e.trans_source', e.inv_image_eq_source_inter_preimage, inter_comm],
exact inter_subset_left _ _,
end
lemma image_trans_source : e '' (e.trans e').source = e.target ∩ e'.source :=
image_source_eq_target (local_equiv.symm (local_equiv.restr (local_equiv.symm e) (e'.source)))
lemma trans_target : (e.trans e').target = e'.target ∩ e'.symm ⁻¹' e.target := rfl
lemma trans_target' : (e.trans e').target = e'.target ∩ e'.symm ⁻¹' (e'.source ∩ e.target) :=
trans_source' e'.symm e.symm
lemma trans_target'' : (e.trans e').target = e' '' (e'.source ∩ e.target) :=
trans_source'' e'.symm e.symm
lemma inv_image_trans_target : e'.symm '' (e.trans e').target = e'.source ∩ e.target :=
image_trans_source e'.symm e.symm
lemma trans_assoc (e'' : local_equiv γ δ) : (e.trans e').trans e'' = e.trans (e'.trans e'') :=
local_equiv.ext (λx, rfl) (λx, rfl) (by simp [trans_source, @preimage_comp α β γ, inter_assoc])
@[simp] lemma trans_refl : e.trans (local_equiv.refl β) = e :=
local_equiv.ext (λx, rfl) (λx, rfl) (by simp [trans_source])
@[simp] lemma refl_trans : (local_equiv.refl α).trans e = e :=
local_equiv.ext (λx, rfl) (λx, rfl) (by simp [trans_source, preimage_id])
lemma trans_refl_restr (s : set β) :
e.trans ((local_equiv.refl β).restr s) = e.restr (e ⁻¹' s) :=
local_equiv.ext (λx, rfl) (λx, rfl) (by simp [trans_source])
lemma trans_refl_restr' (s : set β) :
e.trans ((local_equiv.refl β).restr s) = e.restr (e.source ∩ e ⁻¹' s) :=
local_equiv.ext (λx, rfl) (λx, rfl) $ by { simp [trans_source], rw [← inter_assoc, inter_self] }
lemma restr_trans (s : set α) :
(e.restr s).trans e' = (e.trans e').restr s :=
local_equiv.ext (λx, rfl) (λx, rfl) $ by { simp [trans_source, inter_comm], rwa inter_assoc }
/-- `eq_on_source e e'` means that `e` and `e'` have the same source, and coincide there. Then `e`
and `e'` should really be considered the same local equiv. -/
def eq_on_source (e e' : local_equiv α β) : Prop :=
e.source = e'.source ∧ (∀x ∈ e.source, e x = e' x)
/-- `eq_on_source` is an equivalence relation -/
instance eq_on_source_setoid : setoid (local_equiv α β) :=
{ r := eq_on_source,
iseqv := ⟨
λe, by simp [eq_on_source],
λe e' h, by { simp [eq_on_source, h.1.symm], exact λx hx, (h.2 x hx).symm },
λe e' e'' h h', ⟨by rwa [← h'.1, ← h.1], λx hx, by { rw [← h'.2 x, h.2 x hx], rwa ← h.1 }⟩⟩ }
lemma eq_on_source_refl : e ≈ e := setoid.refl _
/-- If two local equivs are equivalent, so are their inverses -/
lemma eq_on_source_symm {e e' : local_equiv α β} (h : e ≈ e') : e.symm ≈ e'.symm :=
begin
have T : e.target = e'.target,
{ have : set.bij_on e' e.source e.target := e.bij_on_source.congr h.2,
have A : e' '' e.source = e.target := this.image_eq,
rw [h.1, e'.bij_on_source.image_eq] at A,
exact A.symm },
refine ⟨T, λx hx, _⟩,
have xt : x ∈ e.target := hx,
rw T at xt,
have e's : e'.symm x ∈ e.source, by { rw h.1, apply e'.map_target xt },
have A : e (e.symm x) = x := e.right_inv hx,
have B : e (e'.symm x) = x,
by { rw h.2, exact e'.right_inv xt, exact e's },
apply e.bij_on_source.inj_on (e.map_target hx) e's,
rw [A, B]
end
/-- Two equivalent local equivs have the same source -/
lemma source_eq_of_eq_on_source {e e' : local_equiv α β} (h : e ≈ e') : e.source = e'.source :=
h.1
/-- Two equivalent local equivs have the same target -/
lemma target_eq_of_eq_on_source {e e' : local_equiv α β} (h : e ≈ e') : e.target = e'.target :=
(eq_on_source_symm h).1
/-- Two equivalent local equivs coincide on the source -/
lemma apply_eq_of_eq_on_source {e e' : local_equiv α β} (h : e ≈ e') {x : α} (hx : x ∈ e.source) :
e x = e' x :=
h.2 x hx
/-- Two equivalent local equivs have coinciding inverses on the target -/
lemma inv_apply_eq_of_eq_on_source {e e' : local_equiv α β} (h : e ≈ e') {x : β} (hx : x ∈ e.target) :
e.symm x = e'.symm x :=
(eq_on_source_symm h).2 x hx
/-- Composition of local equivs respects equivalence -/
lemma eq_on_source_trans {e e' : local_equiv α β} {f f' : local_equiv β γ}
(he : e ≈ e') (hf : f ≈ f') : e.trans f ≈ e'.trans f' :=
begin
split,
{ have : e.target = e'.target := (eq_on_source_symm he).1,
rw [trans_source'', trans_source'', ← this, ← hf.1],
exact eq_on.image_eq (λx hx, (eq_on_source_symm he).2 x hx.1) },
{ assume x hx,
rw trans_source at hx,
simp [(he.2 x hx.1).symm, hf.2 _ hx.2] }
end
/-- Restriction of local equivs respects equivalence -/
lemma eq_on_source_restr {e e' : local_equiv α β} (he : e ≈ e') (s : set α) :
e.restr s ≈ e'.restr s :=
begin
split,
{ simp [he.1] },
{ assume x hx,
simp only [mem_inter_eq, restr_source] at hx,
exact he.2 x hx.1 }
end
/-- Preimages are respected by equivalence -/
lemma eq_on_source_preimage {e e' : local_equiv α β} (he : e ≈ e') (s : set β) :
e.source ∩ e ⁻¹' s = e'.source ∩ e' ⁻¹' s :=
begin
ext x,
simp only [mem_inter_eq, mem_preimage],
split,
{ assume hx,
rwa [apply_eq_of_eq_on_source (setoid.symm he), source_eq_of_eq_on_source (setoid.symm he)],
rw source_eq_of_eq_on_source he at hx,
exact hx.1 },
{ assume hx,
rwa [apply_eq_of_eq_on_source he, source_eq_of_eq_on_source he],
rw source_eq_of_eq_on_source (setoid.symm he) at hx,
exact hx.1 },
end
/-- Composition of a local equiv and its inverse is equivalent to the restriction of the identity
to the source -/
lemma trans_self_symm :
e.trans e.symm ≈ local_equiv.of_set e.source :=
begin
have A : (e.trans e.symm).source = e.source,
by simp [trans_source, inter_eq_self_of_subset_left (source_subset_preimage_target _)],
refine ⟨by simp [A], λx hx, _⟩,
rw A at hx,
simp [hx]
end
/-- Composition of the inverse of a local equiv and this local equiv is equivalent to the
restriction of the identity to the target -/
lemma trans_symm_self :
e.symm.trans e ≈ local_equiv.of_set e.target :=
trans_self_symm (e.symm)
/-- Two equivalent local equivs are equal when the source and target are univ -/
lemma eq_of_eq_on_source_univ (e e' : local_equiv α β) (h : e ≈ e')
(s : e.source = univ) (t : e.target = univ) : e = e' :=
begin
apply local_equiv.ext (λx, _) (λx, _) h.1,
{ apply h.2 x,
rw s,
exact mem_univ _ },
{ apply (eq_on_source_symm h).2 x,
rw [symm_source, t],
exact mem_univ _ }
end
section prod
/-- The product of two local equivs, as a local equiv on the product. -/
def prod (e : local_equiv α β) (e' : local_equiv γ δ) : local_equiv (α × γ) (β × δ) :=
{ source := set.prod e.source e'.source,
target := set.prod e.target e'.target,
to_fun := λp, (e p.1, e' p.2),
inv_fun := λp, (e.symm p.1, e'.symm p.2),
map_source' := λp hp, by { simp at hp, simp [hp] },
map_target' := λp hp, by { simp at hp, simp [map_target, hp] },
left_inv' := λp hp, by { simp at hp, simp [hp] },
right_inv' := λp hp, by { simp at hp, simp [hp] } }
@[simp] lemma prod_source (e : local_equiv α β) (e' : local_equiv γ δ) :
(e.prod e').source = set.prod e.source e'.source := rfl
@[simp] lemma prod_target (e : local_equiv α β) (e' : local_equiv γ δ) :
(e.prod e').target = set.prod e.target e'.target := rfl
@[simp] lemma prod_coe (e : local_equiv α β) (e' : local_equiv γ δ) :
((e.prod e') : α × γ → β × δ) = (λp, (e p.1, e' p.2)) := rfl
@[simp] lemma prod_coe_symm (e : local_equiv α β) (e' : local_equiv γ δ) :
((e.prod e').symm : β × δ → α × γ) = (λp, (e.symm p.1, e'.symm p.2)) := rfl
end prod
end local_equiv
namespace set
-- All arguments are explicit to avoid missing information in the pretty printer output
/-- A bijection between two sets `s : set α` and `t : set β` provides a local equivalence
between `α` and `β`. -/
@[simps] noncomputable def bij_on.to_local_equiv [nonempty α] (f : α → β) (s : set α) (t : set β)
(hf : bij_on f s t) :
local_equiv α β :=
{ to_fun := f,
inv_fun := inv_fun_on f s,
source := s,
target := t,
map_source' := hf.maps_to,
map_target' := hf.surj_on.maps_to_inv_fun_on,
left_inv' := hf.inv_on_inv_fun_on.1,
right_inv' := hf.inv_on_inv_fun_on.2 }
/-- A map injective on a subset of its domain provides a local equivalence. -/
@[simp] noncomputable def inj_on.to_local_equiv [nonempty α] (f : α → β) (s : set α)
(hf : inj_on f s) :
local_equiv α β :=
hf.bij_on_image.to_local_equiv f s (f '' s)
end set
namespace equiv
/- equivs give rise to local_equiv. We set up simp lemmas to reduce most properties of the local
equiv to that of the equiv. -/
variables (e : equiv α β) (e' : equiv β γ)
@[simp] lemma to_local_equiv_coe : (e.to_local_equiv : α → β) = e := rfl
@[simp] lemma to_local_equiv_symm_coe : (e.to_local_equiv.symm : β → α) = e.symm := rfl
@[simp] lemma to_local_equiv_source : e.to_local_equiv.source = univ := rfl
@[simp] lemma to_local_equiv_target : e.to_local_equiv.target = univ := rfl
@[simp] lemma refl_to_local_equiv : (equiv.refl α).to_local_equiv = local_equiv.refl α := rfl
@[simp] lemma symm_to_local_equiv : e.symm.to_local_equiv = e.to_local_equiv.symm := rfl
@[simp] lemma trans_to_local_equiv :
(e.trans e').to_local_equiv = e.to_local_equiv.trans e'.to_local_equiv :=
local_equiv.ext (λx, rfl) (λx, rfl) (by simp [local_equiv.trans_source, equiv.to_local_equiv])
end equiv
|
7298fbaa824993be3afb74d8e4fa95ccfe226851
|
64874bd1010548c7f5a6e3e8902efa63baaff785
|
/tests/lean/run/finbug.lean
|
4a6430828cd05dfee149d2b04dd32921debe47df
|
[
"Apache-2.0"
] |
permissive
|
tjiaqi/lean
|
4634d729795c164664d10d093f3545287c76628f
|
d0ce4cf62f4246b0600c07e074d86e51f2195e30
|
refs/heads/master
| 1,622,323,796,480
| 1,422,643,069,000
| 1,422,643,069,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 913
|
lean
|
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Module: data.fin
Author: Leonardo de Moura
Finite ordinals.
-/
open nat
inductive fin : nat → Type :=
fz : Π n, fin (succ n),
fs : Π {n}, fin n → fin (succ n)
namespace fin
definition to_nat : Π {n}, fin n → nat,
to_nat (fz n) := zero,
to_nat (@fs n f) := succ (@to_nat n f)
definition lift : Π {n : nat}, fin n → Π (m : nat), fin (add m n),
lift (fz n) m := fz (add m n),
lift (@fs n f) m := fs (@lift n f m)
theorem to_nat_lift : ∀ {n : nat} (f : fin n) (m : nat), to_nat f = to_nat (lift f m),
to_nat_lift (fz n) m := rfl,
to_nat_lift (@fs n f) m := calc
to_nat (fs f) = (to_nat f) + 1 : rfl
... = (to_nat (lift f m)) + 1 : to_nat_lift f
... = to_nat (lift (fs f) m) : rfl
end fin
|
7b5101f3edcb33b866a7d42677b9c7ef8dad9f3c
|
94e33a31faa76775069b071adea97e86e218a8ee
|
/src/representation_theory/invariants.lean
|
d535445d622fbc2a5ad2f455f2f6b392ef3780f4
|
[
"Apache-2.0"
] |
permissive
|
urkud/mathlib
|
eab80095e1b9f1513bfb7f25b4fa82fa4fd02989
|
6379d39e6b5b279df9715f8011369a301b634e41
|
refs/heads/master
| 1,658,425,342,662
| 1,658,078,703,000
| 1,658,078,703,000
| 186,910,338
| 0
| 0
|
Apache-2.0
| 1,568,512,083,000
| 1,557,958,709,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 5,154
|
lean
|
/-
Copyright (c) 2022 Antoine Labelle. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Antoine Labelle
-/
import representation_theory.basic
import representation_theory.Rep
/-!
# Subspace of invariants a group representation
This file introduces the subspace of invariants of a group representation
and proves basic results about it.
The main tool used is the average of all elements of the group, seen as an element of
`monoid_algebra k G`. The action of this special element gives a projection onto the
subspace of invariants.
In order for the definition of the average element to make sense, we need to assume for most of the
results that the order of `G` is invertible in `k` (e. g. `k` has characteristic `0`).
-/
open_locale big_operators
open monoid_algebra
open representation
namespace group_algebra
variables (k G : Type*) [comm_semiring k] [group G]
variables [fintype G] [invertible (fintype.card G : k)]
/--
The average of all elements of the group `G`, considered as an element of `monoid_algebra k G`.
-/
noncomputable def average : monoid_algebra k G :=
⅟(fintype.card G : k) • ∑ g : G, of k G g
/--
`average k G` is invariant under left multiplication by elements of `G`.
-/
@[simp]
theorem mul_average_left (g : G) :
(finsupp.single g 1 * average k G : monoid_algebra k G) = average k G :=
begin
simp only [mul_one, finset.mul_sum, algebra.mul_smul_comm, average, monoid_algebra.of_apply,
finset.sum_congr, monoid_algebra.single_mul_single],
set f : G → monoid_algebra k G := λ x, finsupp.single x 1,
show ⅟ ↑(fintype.card G) • ∑ (x : G), f (g * x) = ⅟ ↑(fintype.card G) • ∑ (x : G), f x,
rw function.bijective.sum_comp (group.mul_left_bijective g) _,
end
/--
`average k G` is invariant under right multiplication by elements of `G`.
-/
@[simp]
theorem mul_average_right (g : G) :
average k G * finsupp.single g 1 = average k G :=
begin
simp only [mul_one, finset.sum_mul, algebra.smul_mul_assoc, average, monoid_algebra.of_apply,
finset.sum_congr, monoid_algebra.single_mul_single],
set f : G → monoid_algebra k G := λ x, finsupp.single x 1,
show ⅟ ↑(fintype.card G) • ∑ (x : G), f (x * g) = ⅟ ↑(fintype.card G) • ∑ (x : G), f x,
rw function.bijective.sum_comp (group.mul_right_bijective g) _,
end
end group_algebra
namespace representation
section invariants
open group_algebra
variables {k G V : Type*} [comm_semiring k] [group G] [add_comm_monoid V] [module k V]
variables (ρ : representation k G V)
/--
The subspace of invariants, consisting of the vectors fixed by all elements of `G`.
-/
def invariants : submodule k V :=
{ carrier := set_of (λ v, ∀ (g : G), ρ g v = v),
zero_mem' := λ g, by simp only [map_zero],
add_mem' := λ v w hv hw g, by simp only [hv g, hw g, map_add],
smul_mem' := λ r v hv g, by simp only [hv g, linear_map.map_smulₛₗ, ring_hom.id_apply]}
@[simp]
lemma mem_invariants (v : V) : v ∈ invariants ρ ↔ ∀ (g: G), ρ g v = v := by refl
lemma invariants_eq_inter :
(invariants ρ).carrier = ⋂ g : G, function.fixed_points (ρ g) :=
by {ext, simp [function.is_fixed_pt]}
variables [fintype G] [invertible (fintype.card G : k)]
/--
The action of `average k G` gives a projection map onto the subspace of invariants.
-/
@[simp]
noncomputable def average_map : V →ₗ[k] V := as_algebra_hom ρ (average k G)
/--
The `average_map` sends elements of `V` to the subspace of invariants.
-/
theorem average_map_invariant (v : V) : average_map ρ v ∈ invariants ρ :=
λ g, by rw [average_map, ←as_algebra_hom_single, ←linear_map.mul_apply, ←map_mul (as_algebra_hom ρ),
mul_average_left]
/--
The `average_map` acts as the identity on the subspace of invariants.
-/
theorem average_map_id (v : V) (hv : v ∈ invariants ρ) : average_map ρ v = v :=
begin
rw mem_invariants at hv,
simp [average, map_sum, hv, finset.card_univ, nsmul_eq_smul_cast k _ v, smul_smul],
end
theorem is_proj_average_map : linear_map.is_proj ρ.invariants ρ.average_map :=
⟨ρ.average_map_invariant, ρ.average_map_id⟩
end invariants
namespace lin_hom
universes u
open category_theory Action
variables {k : Type u} [comm_ring k] {G : Group.{u}}
lemma mem_invariants_iff_comm {X Y : Rep k G} (f : X.V →ₗ[k] Y.V) (g : G) :
(lin_hom X.ρ Y.ρ) g f = f ↔ X.ρ g ≫ f = f ≫ Y.ρ g :=
begin
rw [lin_hom_apply, ←ρ_Aut_apply_inv, ←linear_map.comp_assoc, ←Module.comp_def, ←Module.comp_def,
iso.inv_comp_eq, ρ_Aut_apply_hom], exact comm,
end
/-- The invariants of the representation `lin_hom X.ρ Y.ρ` correspond to the the representation
homomorphisms from `X` to `Y` -/
@[simps]
def invariants_equiv_Rep_hom (X Y : Rep k G) : (lin_hom X.ρ Y.ρ).invariants ≃ₗ[k] (X ⟶ Y) :=
{ to_fun := λ f, ⟨f.val, λ g, (mem_invariants_iff_comm _ g).1 (f.property g)⟩,
map_add' := λ _ _, rfl,
map_smul' := λ _ _, rfl,
inv_fun := λ f, ⟨f.hom, λ g, (mem_invariants_iff_comm _ g).2 (f.comm g)⟩,
left_inv := λ _, by { ext, refl },
right_inv := λ _, by { ext, refl } }
end lin_hom
end representation
|
da0deca7be36be722357ff40dde895fd23efc4ef
|
947b78d97130d56365ae2ec264df196ce769371a
|
/tests/lean/run/termElab.lean
|
885635588eed2cbe80171596f6a1bb7675249edf
|
[
"Apache-2.0"
] |
permissive
|
shyamalschandra/lean4
|
27044812be8698f0c79147615b1d5090b9f4b037
|
6e7a883b21eaf62831e8111b251dc9b18f40e604
|
refs/heads/master
| 1,671,417,126,371
| 1,601,859,995,000
| 1,601,860,020,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 687
|
lean
|
import Lean
new_frontend
open Lean
open Lean.Elab
open Lean.Elab.Term
def tst1 : TermElabM Unit := do
let opt ← getOptions;
let stx ← `(forall (a b : Nat), Nat);
IO.println "message 1"; -- This message goes direct to stdio. It will be displayed before trace messages.
let e ← elabTermAndSynthesize stx none;
logDbgTrace (">>> " ++ e); -- display message when `trace.Elab.debug` is true
IO.println "message 2"
#eval tst1
def tst2 : TermElabM Unit := do
let opt ← getOptions;
let a := mkIdent `a;
let b := mkIdent `b;
let stx ← `((fun ($a : Type) (x : $a) => @id $a x) _ 1);
let e ← elabTermAndSynthesize stx none;
logDbgTrace (">>> " ++ e);
throwErrorIfErrors
#eval tst2
|
f6b05f55a4be65f15cb75940b43e1d8ad92ab212
|
6432ea7a083ff6ba21ea17af9ee47b9c371760f7
|
/tests/lean/diamond7.lean
|
df96abc8dca19e7b2507f0d0f12a6d4bbbe77ce0
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"LGPL-3.0-only",
"LicenseRef-scancode-inner-net-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Spencer-94",
"LGPL-2.1-or-later",
"HPND",
"LicenseRef-scancode-pcre",
"ISC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"SunPro",
"CMU-Mach"
] |
permissive
|
leanprover/lean4
|
4bdf9790294964627eb9be79f5e8f6157780b4cc
|
f1f9dc0f2f531af3312398999d8b8303fa5f096b
|
refs/heads/master
| 1,693,360,665,786
| 1,693,350,868,000
| 1,693,350,868,000
| 129,571,436
| 2,827
| 311
|
Apache-2.0
| 1,694,716,156,000
| 1,523,760,560,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 2,864
|
lean
|
class Semigroup (α : Type u) extends Mul α where
mul_assoc (a b c : α) : a * b * c = a * (b * c)
class CommSemigroup (α : Type u) extends Semigroup α where
mul_comm (a b : α) : a * b = b * a
class One (α : Type u) where
one : α
instance [One α] : OfNat α (nat_lit 1) where
ofNat := One.one
class Monoid (α : Type u) extends Semigroup α, One α where
one_mul (a : α) : 1 * a = a
mul_one (a : α) : a * 1 = a
class CommMonoid (α : Type u) extends Monoid α, CommSemigroup α
set_option pp.all true
#check CommMonoid.mk
#print CommMonoid.toCommSemigroup
class Inv (α : Type u) where
inv : α → α
postfix:100 "⁻¹" => Inv.inv
class Group (α : Type u) extends Monoid α, Inv α where
mul_left_inv (a : α) : a⁻¹ * a = 1
class CommGroup (α : Type u) extends Group α, CommMonoid α
#check CommGroup.mk
#print CommGroup.toCommMonoid
class AddSemigroup (α : Type u) extends Add α where
add_assoc (a b c : α) : a + b + c = a + (b + c)
class AddCommSemigroup (α : Type u) extends AddSemigroup α where
add_comm (a b : α) : a + b = b + a
class Zero (α : Type u) where
zero : α
instance [Zero α] : OfNat α (nat_lit 0) where
ofNat := Zero.zero
class AddMonoid (α : Type u) extends AddSemigroup α, Zero α where
zero_add (a : α) : 0 + a = a
add_zero (a : α) : a + 0 = a
class AddCommMonoid (α : Type u) extends AddMonoid α, AddCommSemigroup α
class AddGroup (α : Type u) extends AddMonoid α, Neg α where
add_left_neg (a : α) : -a + a = 0
class AddCommGroup (α : Type u) extends AddGroup α, AddCommMonoid α
class Distrib (α : Type u) extends Mul α, Add α where
left_distrib ( a b c : α) : a * (b + c) = (a * b) + (a * c)
right_distrib (a b c : α) : (a + b) * c = (a * c) + (b * c)
class MulZero (α : Type u) extends Mul α, Zero α where
zero_mul (a : α) : 0 * a = 0
mul_zero (a : α) : a * 0 = 0
class ZeroNeOne (α : Type u) extends Zero α, One α where
zero_ne_one : (0:α) ≠ 1
class Semiring (α : Type u) extends AddCommMonoid α, Monoid α, Distrib α, MulZero α
class CommSemiring (α : Type u) extends Semiring α, CommMonoid α
class Ring (α : Type u) extends AddCommGroup α, Monoid α, Distrib α
class CommRing (α : Type u) extends Ring α, CommSemigroup α
class NoZeroDivisors (α : Type u) extends Mul α, Zero α where
eq_zero_or_eq_zero_of_mul_eq_zero (a b : α) : a * b = 0 → a = 0 ∨ b = 0
class IntegralDomain (α : Type u) extends CommRing α, NoZeroDivisors α, ZeroNeOne α
class DivisionRing (α : Type u) extends Ring α, Inv α, ZeroNeOne α where
mul_inv_cancel {a : α} : a ≠ 0 → a * a⁻¹ = 1
inv_mul_cancel {a : α} : a ≠ 0 → a⁻¹ * a = 1
class Field (α : Type u) extends DivisionRing α, CommRing α
set_option pp.all false in
#check Field.mk
#print Field.toDivisionRing
#print Field.toCommRing
|
5ce5c52ae70a842bfd8e629611e461f6f82617ac
|
4efff1f47634ff19e2f786deadd394270a59ecd2
|
/src/category_theory/filtered.lean
|
0501b5c2637fb1ab3936ce86645fc20c27368f67
|
[
"Apache-2.0"
] |
permissive
|
agjftucker/mathlib
|
d634cd0d5256b6325e3c55bb7fb2403548371707
|
87fe50de17b00af533f72a102d0adefe4a2285e8
|
refs/heads/master
| 1,625,378,131,941
| 1,599,166,526,000
| 1,599,166,526,000
| 160,748,509
| 0
| 0
|
Apache-2.0
| 1,544,141,789,000
| 1,544,141,789,000
| null |
UTF-8
|
Lean
| false
| false
| 2,456
|
lean
|
/-
Copyright (c) 2019 Reid Barton. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Reid Barton
-/
import category_theory.category
import order.bounded_lattice
/-!
# Filtered categories
A category is filtered if every finite diagram admits a cocone.
We give a simple characterisation of this condition as
1. for every pair of objects there exists another object "to the right",
2. for every pair of parallel morphisms there exists a morphism to the right so the compositions
are equal, and
3. there exists some object.
Filtered colimits are often better behaved than arbitrary colimits.
See `category_theory/limits/types` for some details.
(We'll prove existence of cocones for finite diagrams in a subsequent PR.)
## Future work
* Finite limits commute with filtered colimits
* Forgetful functors for algebraic categories typically preserve filtered colimits.
-/
universes v u -- declare the `v`'s first; see `category_theory.category` for an explanation
namespace category_theory
variables (C : Type u) [category.{v} C]
/--
A category `is_filtered_or_empty` if
1. for every pair of objects there exists another object "to the right", and
2. for every pair of parallel morphisms there exists a morphism to the right so the compositions
are equal.
-/
class is_filtered_or_empty : Prop :=
(cocone_objs : ∀ (X Y : C), ∃ Z (f : X ⟶ Z) (g : Y ⟶ Z), true)
(cocone_maps : ∀ ⦃X Y : C⦄ (f g : X ⟶ Y), ∃ Z (h : Y ⟶ Z), f ≫ h = g ≫ h)
section prio
set_option default_priority 100 -- see Note [default priority]
/--
A category `is_filtered` if
1. for every pair of objects there exists another object "to the right",
2. for every pair of parallel morphisms there exists a morphism to the right so the compositions
are equal, and
3. there exists some object.
-/
class is_filtered extends is_filtered_or_empty C : Prop :=
[nonempty : nonempty C]
end prio
@[priority 100]
instance is_filtered_or_empty_of_semilattice_sup
(α : Type u) [semilattice_sup α] : is_filtered_or_empty α :=
{ cocone_objs := λ X Y, ⟨X ⊔ Y, hom_of_le le_sup_left, hom_of_le le_sup_right, trivial⟩,
cocone_maps := λ X Y f g, ⟨Y, 𝟙 _, (by ext)⟩, }
@[priority 100]
instance is_filtered_of_semilattice_sup_top
(α : Type u) [semilattice_sup_top α] : is_filtered α :=
{ nonempty := ⟨⊤⟩,
..category_theory.is_filtered_or_empty_of_semilattice_sup α }
end category_theory
|
35d971d7f67e8ca243ad724bf091f042d6207990
|
e0f9ba56b7fedc16ef8697f6caeef5898b435143
|
/src/tactic/monotonicity/interactive.lean
|
552676edb11ae6317993c5a9ba28d4ac76ed12e3
|
[
"Apache-2.0"
] |
permissive
|
anrddh/mathlib
|
6a374da53c7e3a35cb0298b0cd67824efef362b4
|
a4266a01d2dcb10de19369307c986d038c7bb6a6
|
refs/heads/master
| 1,656,710,827,909
| 1,589,560,456,000
| 1,589,560,456,000
| 264,271,800
| 0
| 0
|
Apache-2.0
| 1,589,568,062,000
| 1,589,568,061,000
| null |
UTF-8
|
Lean
| false
| false
| 23,482
|
lean
|
/-
Copyright (c) 2019 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Simon Hudon
-/
import tactic.monotonicity.basic
import control.traversable
import control.traversable.derive
import data.dlist
variables {a b c p : Prop}
namespace tactic.interactive
open lean lean.parser interactive
open interactive.types
open tactic
local postfix `?`:9001 := optional
local postfix *:9001 := many
meta inductive mono_function (elab : bool := tt)
| non_assoc : expr elab → list (expr elab) → list (expr elab) → mono_function
| assoc : expr elab → option (expr elab) → option (expr elab) → mono_function
| assoc_comm : expr elab → expr elab → mono_function
meta instance : decidable_eq mono_function :=
by mk_dec_eq_instance
meta def mono_function.to_tactic_format : mono_function → tactic format
| (mono_function.non_assoc fn xs ys) := do
fn' ← pp fn,
xs' ← mmap pp xs,
ys' ← mmap pp ys,
return format!"{fn'} {xs'} _ {ys'}"
| (mono_function.assoc fn xs ys) := do
fn' ← pp fn,
xs' ← pp xs,
ys' ← pp ys,
return format!"{fn'} {xs'} _ {ys'}"
| (mono_function.assoc_comm fn xs) := do
fn' ← pp fn,
xs' ← pp xs,
return format!"{fn'} _ {xs'}"
meta instance has_to_tactic_format_mono_function : has_to_tactic_format mono_function :=
{ to_tactic_format := mono_function.to_tactic_format }
@[derive traversable]
meta structure ac_mono_ctx' (rel : Type) :=
(to_rel : rel)
(function : mono_function)
(left right rel_def : expr)
@[reducible]
meta def ac_mono_ctx := ac_mono_ctx' (option (expr → expr → expr))
@[reducible]
meta def ac_mono_ctx_ne := ac_mono_ctx' (expr → expr → expr)
meta def ac_mono_ctx.to_tactic_format (ctx : ac_mono_ctx) : tactic format :=
do fn ← pp ctx.function,
l ← pp ctx.left,
r ← pp ctx.right,
rel ← pp ctx.rel_def,
return format!"{{ function := {fn}\n, left := {l}\n, right := {r}\n, rel_def := {rel} }"
meta instance has_to_tactic_format_mono_ctx : has_to_tactic_format ac_mono_ctx :=
{ to_tactic_format := ac_mono_ctx.to_tactic_format }
meta def as_goal (e : expr) (tac : tactic unit) : tactic unit :=
do gs ← get_goals,
set_goals [e],
tac,
set_goals gs
open list (hiding map) functor dlist
section config
parameter opt : mono_cfg
parameter asms : list expr
meta def unify_with_instance (e : expr) : tactic unit :=
as_goal e $
apply_instance
<|>
apply_opt_param
<|>
apply_auto_param
<|>
tactic.solve_by_elim { lemmas := some asms }
<|>
reflexivity
<|>
applyc ``id
<|>
return ()
private meta def match_rule_head (p : expr)
: list expr → expr → expr → tactic expr
| vs e t :=
(unify t p >> mmap' unify_with_instance vs >> instantiate_mvars e)
<|>
do (expr.pi _ _ d b) ← return t | failed,
v ← mk_meta_var d,
match_rule_head (v::vs) (expr.app e v) (b.instantiate_var v)
meta def pi_head : expr → tactic expr
| (expr.pi n _ t b) :=
do v ← mk_meta_var t,
pi_head (b.instantiate_var v)
| e := return e
meta def delete_expr (e : expr)
: list expr → tactic (option (list expr))
| [] := return none
| (x :: xs) :=
(compare opt e x >> return (some xs))
<|>
(map (cons x) <$> delete_expr xs)
meta def match_ac'
: list expr → list expr → tactic (list expr × list expr × list expr)
| es (x :: xs) := do
es' ← delete_expr x es,
match es' with
| (some es') := do
(c,l,r) ← match_ac' es' xs, return (x::c,l,r)
| none := do
(c,l,r) ← match_ac' es xs, return (c,l,x::r)
end
| es [] := do
return ([],es,[])
meta def match_ac (unif : bool) (l : list expr) (r : list expr)
: tactic (list expr × list expr × list expr) :=
do (s',l',r') ← match_ac' l r,
s' ← mmap instantiate_mvars s',
l' ← mmap instantiate_mvars l',
r' ← mmap instantiate_mvars r',
return (s',l',r')
meta def match_prefix
: list expr → list expr → tactic (list expr × list expr × list expr)
| (x :: xs) (y :: ys) :=
(do compare opt x y,
prod.map ((::) x) id <$> match_prefix xs ys)
<|> return ([],x :: xs,y :: ys)
| xs ys := return ([],xs,ys)
/--
`(prefix,left,right,suffix) ← match_assoc unif l r` finds the
longest prefix and suffix common to `l` and `r` and
returns them along with the differences -/
meta def match_assoc (l : list expr) (r : list expr)
: tactic (list expr × list expr × list expr × list expr) :=
do (pre,l₁,r₁) ← match_prefix l r,
(suf,l₂,r₂) ← match_prefix (reverse l₁) (reverse r₁),
return (pre,reverse l₂,reverse r₂,reverse suf)
meta def check_ac : expr → tactic (bool × bool × option (expr × expr × expr) × expr)
| (expr.app (expr.app f x) y) :=
do t ← infer_type x,
a ← try_core $ to_expr ``(is_associative %%t %%f) >>= mk_instance,
c ← try_core $ to_expr ``(is_commutative %%t %%f) >>= mk_instance,
i ← try_core (do
v ← mk_meta_var t,
l_inst_p ← to_expr ``(is_left_id %%t %%f %%v),
r_inst_p ← to_expr ``(is_right_id %%t %%f %%v),
l_v ← mk_meta_var l_inst_p,
r_v ← mk_meta_var r_inst_p ,
l_id ← mk_mapp `is_left_id.left_id [some t,f,v,some l_v],
mk_instance l_inst_p >>= unify l_v,
r_id ← mk_mapp `is_right_id.right_id [none,f,v,some r_v],
mk_instance r_inst_p >>= unify r_v,
v' ← instantiate_mvars v,
return (l_id,r_id,v')),
return (a.is_some,c.is_some,i,f)
| _ := return (ff,ff,none,expr.var 1)
meta def parse_assoc_chain' (f : expr) : expr → tactic (dlist expr)
| e :=
(do (expr.app (expr.app f' x) y) ← return e,
is_def_eq f f',
(++) <$> parse_assoc_chain' x <*> parse_assoc_chain' y)
<|> return (singleton e)
meta def parse_assoc_chain (f : expr) : expr → tactic (list expr) :=
map dlist.to_list ∘ parse_assoc_chain' f
meta def fold_assoc (op : expr) : option (expr × expr × expr) → list expr → option (expr × list expr)
| _ (x::xs) := some (foldl (expr.app ∘ expr.app op) x xs, [])
| none [] := none
| (some (l_id,r_id,x₀)) [] := some (x₀,[l_id,r_id])
meta def fold_assoc1 (op : expr) : list expr → option expr
| (x::xs) := some $ foldl (expr.app ∘ expr.app op) x xs
| [] := none
meta def same_function_aux
: list expr → list expr → expr → expr → tactic (expr × list expr × list expr)
| xs₀ xs₁ (expr.app f₀ a₀) (expr.app f₁ a₁) :=
same_function_aux (a₀ :: xs₀) (a₁ :: xs₁) f₀ f₁
| xs₀ xs₁ e₀ e₁ := is_def_eq e₀ e₁ >> return (e₀,xs₀,xs₁)
meta def same_function : expr → expr → tactic (expr × list expr × list expr) :=
same_function_aux [] []
meta def parse_ac_mono_function (l r : expr)
: tactic (expr × expr × list expr × mono_function) :=
do (full_f,ls,rs) ← same_function l r,
(a,c,i,f) ← check_ac l,
if a
then if c
then do
(s,ls,rs) ← monad.join (match_ac tt
<$> parse_assoc_chain f l
<*> parse_assoc_chain f r),
(l',l_id) ← fold_assoc f i ls,
(r',r_id) ← fold_assoc f i rs,
s' ← fold_assoc1 f s,
return (l',r',l_id ++ r_id,mono_function.assoc_comm f s')
else do -- a ∧ ¬ c
(pre,ls,rs,suff) ← monad.join (match_assoc
<$> parse_assoc_chain f l
<*> parse_assoc_chain f r),
(l',l_id) ← fold_assoc f i ls,
(r',r_id) ← fold_assoc f i rs,
let pre' := fold_assoc1 f pre,
let suff' := fold_assoc1 f suff,
return (l',r',l_id ++ r_id,mono_function.assoc f pre' suff')
else do -- ¬ a
(xs₀,x₀,x₁,xs₁) ← find_one_difference opt ls rs,
return (x₀,x₁,[],mono_function.non_assoc full_f xs₀ xs₁)
meta def parse_ac_mono_function' (l r : pexpr) :=
do l' ← to_expr l,
r' ← to_expr r,
parse_ac_mono_function l' r'
meta def ac_monotonicity_goal : expr → tactic (expr × expr × list expr × ac_mono_ctx)
| `(%%e₀ → %%e₁) :=
do (l,r,id_rs,f) ← parse_ac_mono_function e₀ e₁,
t₀ ← infer_type e₀,
t₁ ← infer_type e₁,
rel_def ← to_expr ``(λ x₀ x₁, (x₀ : %%t₀) → (x₁ : %%t₁)),
return (e₀, e₁, id_rs,
{ function := f
, left := l, right := r
, to_rel := some $ expr.pi `x binder_info.default
, rel_def := rel_def })
| `(%%e₀ = %%e₁) :=
do (l,r,id_rs,f) ← parse_ac_mono_function e₀ e₁,
t₀ ← infer_type e₀,
t₁ ← infer_type e₁,
rel_def ← to_expr ``(λ x₀ x₁, (x₀ : %%t₀) = (x₁ : %%t₁)),
return (e₀, e₁, id_rs,
{ function := f
, left := l, right := r
, to_rel := none
, rel_def := rel_def })
| (expr.app (expr.app rel e₀) e₁) :=
do (l,r,id_rs,f) ← parse_ac_mono_function e₀ e₁,
return (e₀, e₁, id_rs,
{ function := f
, left := l, right := r
, to_rel := expr.app ∘ expr.app rel
, rel_def := rel })
| _ := fail "invalid monotonicity goal"
meta def bin_op_left (f : expr) : option expr → expr → expr
| none e := e
| (some e₀) e₁ := f.mk_app [e₀,e₁]
meta def bin_op (f a b : expr) : expr :=
f.mk_app [a,b]
meta def bin_op_right (f : expr) : expr → option expr → expr
| e none := e
| e₀ (some e₁) := f.mk_app [e₀,e₁]
meta def mk_fun_app : mono_function → expr → expr
| (mono_function.non_assoc f x y) z := f.mk_app (x ++ z :: y)
| (mono_function.assoc f x y) z := bin_op_left f x (bin_op_right f z y)
| (mono_function.assoc_comm f x) z := f.mk_app [z,x]
meta inductive mono_law
/- `assoc (l₀,r₀) (r₁,l₁)` gives first how to find rules to prove
x+(y₀+z) R x+(y₁+z);
if that fails, helps prove (x+y₀)+z R (x+y₁)+z -/
| assoc : expr × expr → expr × expr → mono_law
/- `congr r` gives the rule to prove `x = y → f x = f y` -/
| congr : expr → mono_law
| other : expr → mono_law
meta def mono_law.to_tactic_format : mono_law → tactic format
| (mono_law.other e) := do e ← pp e, return format!"other {e}"
| (mono_law.congr r) := do e ← pp r, return format!"congr {e}"
| (mono_law.assoc (x₀,x₁) (y₀,y₁)) :=
do x₀ ← pp x₀,
x₁ ← pp x₁,
y₀ ← pp y₀,
y₁ ← pp y₁,
return format!"assoc {x₀}; {x₁} | {y₀}; {y₁}"
meta instance has_to_tactic_format_mono_law : has_to_tactic_format mono_law :=
{ to_tactic_format := mono_law.to_tactic_format }
meta def mk_rel (ctx : ac_mono_ctx_ne) (f : expr → expr) : expr :=
ctx.to_rel (f ctx.left) (f ctx.right)
meta def mk_congr_args (fn : expr) (xs₀ xs₁ : list expr) (l r : expr) : tactic expr :=
do p ← mk_app `eq [fn.mk_app $ xs₀ ++ l :: xs₁,fn.mk_app $ xs₀ ++ r :: xs₁],
prod.snd <$> solve_aux p
(do iterate_exactly (xs₁.length) (applyc `congr_fun),
applyc `congr_arg)
meta def mk_congr_law (ctx : ac_mono_ctx) : tactic expr :=
match ctx.function with
| (mono_function.assoc f x₀ x₁) :=
if (x₀ <|> x₁).is_some
then mk_congr_args f x₀.to_monad x₁.to_monad ctx.left ctx.right
else failed
| (mono_function.assoc_comm f x₀) := mk_congr_args f [x₀] [] ctx.left ctx.right
| (mono_function.non_assoc f x₀ x₁) := mk_congr_args f x₀ x₁ ctx.left ctx.right
end
meta def mk_pattern (ctx : ac_mono_ctx) : tactic mono_law :=
match (sequence ctx : option (ac_mono_ctx' _)) with
| (some ctx) :=
match ctx.function with
| (mono_function.assoc f (some x) (some y)) :=
return $ mono_law.assoc
( mk_rel ctx (λ i, bin_op f x (bin_op f i y))
, mk_rel ctx (λ i, bin_op f i y))
( mk_rel ctx (λ i, bin_op f (bin_op f x i) y)
, mk_rel ctx (λ i, bin_op f x i))
| (mono_function.assoc f (some x) none) :=
return $ mono_law.other $
mk_rel ctx (λ e, mk_fun_app ctx.function e)
| (mono_function.assoc f none (some y)) :=
return $ mono_law.other $
mk_rel ctx (λ e, mk_fun_app ctx.function e)
| (mono_function.assoc f none none) :=
none
| _ :=
return $ mono_law.other $
mk_rel ctx (λ e, mk_fun_app ctx.function e)
end
| none := mono_law.congr <$> mk_congr_law ctx
end
meta def match_rule (pat : expr) (r : name) : tactic expr :=
do r' ← mk_const r,
t ← infer_type r',
match_rule_head pat [] r' t
meta def find_lemma (pat : expr) : list name → tactic (list expr)
| [] := return []
| (r :: rs) :=
do (cons <$> match_rule pat r <|> pure id) <*> find_lemma rs
meta def match_chaining_rules (ls : list name) (x₀ x₁ : expr) : tactic (list expr) :=
do x' ← to_expr ``(%%x₁ → %%x₀),
r₀ ← find_lemma x' ls,
r₁ ← find_lemma x₁ ls,
return (expr.app <$> r₀ <*> r₁)
meta def find_rule (ls : list name) : mono_law → tactic (list expr)
| (mono_law.assoc (x₀,x₁) (y₀,y₁)) :=
(match_chaining_rules ls x₀ x₁)
<|> (match_chaining_rules ls y₀ y₁)
| (mono_law.congr r) := return [r]
| (mono_law.other p) := find_lemma p ls
universes u v
def apply_rel {α : Sort u} (R : α → α → Sort v) {x y : α}
(x' y' : α)
(h : R x y)
(hx : x = x')
(hy : y = y')
: R x' y' :=
by { rw [← hx,← hy], apply h }
meta def ac_refine (e : expr) : tactic unit :=
refine ``(eq.mp _ %%e) ; ac_refl
meta def one_line (e : expr) : tactic format :=
do lbl ← pp e,
asm ← infer_type e >>= pp,
return format!"\t{asm}\n"
meta def side_conditions (e : expr) : tactic format :=
do let vs := e.list_meta_vars,
ts ← mmap one_line vs.tail,
let r := e.get_app_fn.const_name,
return format!"{r}:\n{format.join ts}"
open monad
/-- tactic-facing function, similar to `interactive.tactic.generalize` with the
exception that meta variables -/
private meta def monotonicity.generalize' (h : name) (v : expr) (x : name) : tactic (expr × expr) :=
do tgt ← target,
t ← infer_type v,
tgt' ← do {
⟨tgt', _⟩ ← solve_aux tgt (tactic.generalize v x >> target),
to_expr ``(λ y : %%t, Π x, y = x → %%(tgt'.binding_body.lift_vars 0 1))
} <|> to_expr ``(λ y : %%t, Π x, %%v = x → %%tgt),
t ← head_beta (tgt' v) >>= assert h,
swap,
r ← mk_eq_refl v,
solve1 $ tactic.exact (t v r),
prod.mk <$> tactic.intro x <*> tactic.intro h
private meta def hide_meta_vars (tac : list expr → tactic unit) : tactic unit :=
focus1 $
do tgt ← target >>= instantiate_mvars,
tactic.change tgt,
ctx ← local_context,
let vs := tgt.list_meta_vars,
vs' ← mmap (λ v,
do h ← get_unused_name `h,
x ← get_unused_name `x,
prod.snd <$> monotonicity.generalize' h v x) vs,
tac ctx;
vs'.mmap' (try ∘ tactic.subst)
meta def hide_meta_vars' (tac : itactic) : itactic :=
hide_meta_vars $ λ _, tac
end config
meta def solve_mvar (v : expr) (tac : tactic unit) : tactic unit :=
do gs ← get_goals,
set_goals [v],
target >>= instantiate_mvars >>= tactic.change,
tac, done,
set_goals $ gs
def list.minimum_on {α β} [decidable_linear_order β] (f : α → β) : list α → list α
| [] := []
| (x :: xs) := prod.snd $ xs.foldl (λ ⟨k,a⟩ b,
let k' := f b in
if k < k' then (k,a)
else if k' < k then (k', [b])
else (k,b :: a)) (f x, [x])
open format mono_selection
meta def best_match {β} (xs : list expr) (tac : expr → tactic β) : tactic unit :=
do t ← target,
xs ← xs.mmap (λ x,
try_core $ prod.mk x <$> solve_aux t (tac x >> get_goals)),
let xs := xs.filter_map id,
let r := list.minimum_on (list.length ∘ prod.fst ∘ prod.snd) xs,
match r with
| [(_,gs,pr)] := tactic.exact pr >> set_goals gs
| [] := fail "no good match found"
| _ :=
do lmms ← r.mmap (λ ⟨l,gs,_⟩,
do ts ← gs.mmap infer_type,
msg ← ts.mmap pp,
pure $ foldl compose "\n\n" (list.intersperse "\n" $ to_fmt l.get_app_fn.const_name :: msg)),
let msg := foldl compose "" lmms,
fail format!"ambiguous match: {msg}\n\nTip: try asserting a side condition to distinguish between the lemmas"
end
meta def mono_aux (dir : parse side) (cfg : mono_cfg := { mono_cfg . }) :
tactic unit :=
do t ← target >>= instantiate_mvars,
ns ← get_monotonicity_lemmas t dir,
asms ← local_context,
rs ← find_lemma asms t ns,
focus1 $ () <$ best_match rs (λ law, tactic.refine $ to_pexpr law)
/--
- `mono` applies a monotonicity rule.
- `mono*` applies monotonicity rules repetitively.
- `mono with x ≤ y` or `mono with [0 ≤ x,0 ≤ y]` creates an assertion for the listed
propositions. Those help to select the right monotonicity rule.
- `mono left` or `mono right` is useful when proving strict orderings:
for `x + y < w + z` could be broken down into either
- left: `x ≤ w` and `y < z` or
- right: `x < w` and `y ≤ z`
- `mono using [rule1,rule2]` calls `simp [rule1,rule2]` before applying mono.
- The general syntax is `mono '*'? ('with' hyp | 'with' [hyp1,hyp2])? ('using' [hyp1,hyp2])? mono_cfg?
To use it, first import `tactic.monotonicity`.
Here is an example of mono:
```lean
example (x y z k : ℤ)
(h : 3 ≤ (4 : ℤ))
(h' : z ≤ y) :
(k + 3 + x) - y ≤ (k + 4 + x) - z :=
begin
mono, -- unfold `(-)`, apply add_le_add
{ -- ⊢ k + 3 + x ≤ k + 4 + x
mono, -- apply add_le_add, refl
-- ⊢ k + 3 ≤ k + 4
mono },
{ -- ⊢ -y ≤ -z
mono /- apply neg_le_neg -/ }
end
```
More succinctly, we can prove the same goal as:
```lean
example (x y z k : ℤ)
(h : 3 ≤ (4 : ℤ))
(h' : z ≤ y) :
(k + 3 + x) - y ≤ (k + 4 + x) - z :=
by mono*
```
-/
meta def mono (many : parse (tk "*")?)
(dir : parse side)
(hyps : parse $ tk "with" *> pexpr_list_or_texpr <|> pure [])
(simp_rules : parse $ tk "using" *> simp_arg_list <|> pure [])
(cfg : mono_cfg := { mono_cfg . }) :
tactic unit :=
do hyps ← hyps.mmap (λ p, to_expr p >>= mk_meta_var),
hyps.mmap' (λ pr, do h ← get_unused_name `h, note h none pr),
when (¬ simp_rules.empty) (simp_core { } failed tt simp_rules [] (loc.ns [none])),
if many.is_some
then repeat $ mono_aux dir cfg
else mono_aux dir cfg,
gs ← get_goals,
set_goals $ hyps ++ gs
add_tactic_doc
{ name := "mono",
category := doc_category.tactic,
decl_names := [`tactic.interactive.mono],
tags := ["monotonicity"] }
/--
transforms a goal of the form `f x ≼ f y` into `x ≤ y` using lemmas
marked as `monotonic`.
Special care is taken when `f` is the repeated application of an
associative operator and if the operator is commutative
-/
meta def ac_mono_aux (cfg : mono_cfg := { mono_cfg . }) :
tactic unit :=
hide_meta_vars $ λ asms,
do try `[dunfold has_sub.sub algebra.sub],
tgt ← target >>= instantiate_mvars,
(l,r,id_rs,g) ← ac_monotonicity_goal cfg tgt
<|> fail "monotonic context not found",
ns ← get_monotonicity_lemmas tgt both,
p ← mk_pattern g,
rules ← find_rule asms ns p <|> fail "no applicable rules found",
when (rules = []) (fail "no applicable rules found"),
err ← format.join <$> mmap side_conditions rules,
focus1 $ best_match rules (λ rule, do
t₀ ← mk_meta_var `(Prop),
v₀ ← mk_meta_var t₀,
t₁ ← mk_meta_var `(Prop),
v₁ ← mk_meta_var t₁,
tactic.refine $ ``(apply_rel %%(g.rel_def) %%l %%r %%rule %%v₀ %%v₁),
solve_mvar v₀ (try (any_of id_rs rewrite_target) >>
( done <|>
refl <|>
ac_refl <|>
`[simp only [is_associative.assoc]]) ),
solve_mvar v₁ (try (any_of id_rs rewrite_target) >>
( done <|>
refl <|>
ac_refl <|>
`[simp only [is_associative.assoc]]) ),
n ← num_goals,
iterate_exactly (n-1) (try $ solve1 $ apply_instance <|>
tactic.solve_by_elim { lemmas := some asms }))
open sum nat
/-- (repeat_until_or_at_most n t u): repeat tactic `t` at most n times or until u succeeds -/
meta def repeat_until_or_at_most : nat → tactic unit → tactic unit → tactic unit
| 0 t _ := fail "too many applications"
| (succ n) t u := u <|> (t >> repeat_until_or_at_most n t u)
meta def repeat_until : tactic unit → tactic unit → tactic unit :=
repeat_until_or_at_most 100000
@[derive _root_.has_reflect, derive _root_.inhabited]
inductive rep_arity : Type
| one | exactly (n : ℕ) | many
meta def repeat_or_not : rep_arity → tactic unit → option (tactic unit) → tactic unit
| rep_arity.one tac none := tac
| rep_arity.many tac none := repeat tac
| (rep_arity.exactly n) tac none := iterate_exactly n tac
| rep_arity.one tac (some until) := tac >> until
| rep_arity.many tac (some until) := repeat_until tac until
| (rep_arity.exactly n) tac (some until) := iterate_exactly n tac >> until
meta def assert_or_rule : lean.parser (pexpr ⊕ pexpr) :=
(tk ":=" *> inl <$> texpr <|> (tk ":" *> inr <$> texpr))
meta def arity : lean.parser rep_arity :=
rep_arity.many <$ tk "*" <|>
rep_arity.exactly <$> (tk "^" *> small_nat) <|>
pure rep_arity.one
/--
`ac_mono` reduces the `f x ⊑ f y`, for some relation `⊑` and a
monotonic function `f` to `x ≺ y`.
`ac_mono*` unwraps monotonic functions until it can't.
`ac_mono^k`, for some literal number `k` applies monotonicity `k`
times.
`ac_mono h`, with `h` a hypothesis, unwraps monotonic functions and
uses `h` to solve the remaining goal. Can be combined with `*` or `^k`:
`ac_mono* h`
`ac_mono : p` asserts `p` and uses it to discharge the goal result
unwrapping a series of monotonic functions. Can be combined with * or
^k: `ac_mono* : p`
In the case where `f` is an associative or commutative operator,
`ac_mono` will consider any possible permutation of its arguments and
use the one the minimizes the difference between the left-hand side
and the right-hand side.
To use it, first import `tactic.monotonicity`.
`ac_mono` can be used as follows:
```lean
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : x ≤ y) :
(m + x + n) * z + k ≤ z * (y + n + m) + k :=
begin
ac_mono,
-- ⊢ (m + x + n) * z ≤ z * (y + n + m)
ac_mono,
-- ⊢ m + x + n ≤ y + n + m
ac_mono,
end
```
As with `mono*`, `ac_mono*` solves the goal in one go and so does
`ac_mono* h₁`. The latter syntax becomes especially interesting in the
following example:
```lean
example (x y z k m n : ℕ)
(h₀ : z ≥ 0)
(h₁ : m + x + n ≤ y + n + m) :
(m + x + n) * z + k ≤ z * (y + n + m) + k :=
by ac_mono* h₁.
```
By giving `ac_mono` the assumption `h₁`, we are asking `ac_refl` to
stop earlier than it would normally would.
-/
meta def ac_mono (rep : parse arity) :
parse assert_or_rule? →
opt_param mono_cfg { mono_cfg . } →
tactic unit
| none opt := focus1 $ repeat_or_not rep (ac_mono_aux opt) none
| (some (inl h)) opt :=
do focus1 $ repeat_or_not rep (ac_mono_aux opt) (some $ done <|> to_expr h >>= ac_refine)
| (some (inr t)) opt :=
do h ← i_to_expr t >>= assert `h,
tactic.swap,
focus1 $ repeat_or_not rep (ac_mono_aux opt) (some $ done <|> ac_refine h)
/-
TODO(Simon): with `ac_mono h` and `ac_mono : p` split the remaining
gaol if the provided rule does not solve it completely.
-/
add_tactic_doc
{ name := "ac_mono",
category := doc_category.tactic,
decl_names := [`tactic.interactive.ac_mono],
tags := ["monotonicity"] }
attribute [mono] and.imp or.imp
end tactic.interactive
|
a952510ad7471cbc5dc386ce29a52a68d58bad44
|
d406927ab5617694ec9ea7001f101b7c9e3d9702
|
/src/topology/vector_bundle/basic.lean
|
52b78edd640aefa7622993d5a282594b66d7e7c3
|
[
"Apache-2.0"
] |
permissive
|
alreadydone/mathlib
|
dc0be621c6c8208c581f5170a8216c5ba6721927
|
c982179ec21091d3e102d8a5d9f5fe06c8fafb73
|
refs/heads/master
| 1,685,523,275,196
| 1,670,184,141,000
| 1,670,184,141,000
| 287,574,545
| 0
| 0
|
Apache-2.0
| 1,670,290,714,000
| 1,597,421,623,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 34,780
|
lean
|
/-
Copyright © 2020 Nicolò Cavalleri. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Nicolò Cavalleri, Sebastien Gouezel, Heather Macbeth, Patrick Massot, Floris van Doorn
-/
import analysis.normed_space.bounded_linear_maps
import topology.fiber_bundle.basic
/-!
# Vector bundles
In this file we define (topological) vector bundles.
Let `B` be the base space, let `F` be a normed space over a normed field `R`, and let
`E : B → Type*` be a `fiber_bundle` with fiber `F`, in which, for each `x`, the fiber `E x` is a
topological vector space over `R`.
To have a vector bundle structure on `bundle.total_space E`, one should additionally have the
following properties:
* The bundle trivializations in the trivialization atlas should be continuous linear equivs in the
fibres;
* For any two trivializations `e`, `e'` in the atlas the transition function considered as a map
from `B` into `F →L[R] F` is continuous on `e.base_set ∩ e'.base_set` with respect to the operator
norm topology on `F →L[R] F`.
If these conditions are satisfied, we register the typeclass `vector_bundle R F E`.
We define constructions on vector bundles like pullbacks and direct sums in other files.
## Implementation notes
The implementation choices in the vector bundle definition are discussed in the "Implementation
notes" section of `topology.fiber_bundle.basic`.
## Tags
Vector bundle
-/
noncomputable theory
open bundle set
open_locale classical bundle
variables (R 𝕜 : Type*) {B : Type*} (F : Type*) (E : B → Type*)
section topological_vector_space
variables {B F E} [semiring R]
[topological_space F] [topological_space B]
/-- A mixin class for `pretrivialization`, stating that a pretrivialization is fibrewise linear with
respect to given module structures on its fibres and the model fibre. -/
protected class pretrivialization.is_linear [add_comm_monoid F] [module R F]
[∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)] (e : pretrivialization F (π E)) :
Prop :=
(linear : ∀ b ∈ e.base_set, is_linear_map R (λ x : E b, (e (total_space_mk b x)).2))
namespace pretrivialization
variables {F E} (e : pretrivialization F (π E)) {x : total_space E} {b : B} {y : E b}
lemma linear [add_comm_monoid F] [module R F] [∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)]
[e.is_linear R] {b : B} (hb : b ∈ e.base_set) :
is_linear_map R (λ x : E b, (e (total_space_mk b x)).2) :=
pretrivialization.is_linear.linear b hb
variables [add_comm_monoid F] [module R F] [∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)]
/-- A fiberwise linear inverse to `e`. -/
@[simps] protected def symmₗ (e : pretrivialization F (π E)) [e.is_linear R] (b : B) :
F →ₗ[R] E b :=
begin
refine is_linear_map.mk' (e.symm b) _,
by_cases hb : b ∈ e.base_set,
{ exact (((e.linear R hb).mk' _).inverse (e.symm b) (e.symm_apply_apply_mk hb)
(λ v, congr_arg prod.snd $ e.apply_mk_symm hb v)).is_linear },
{ rw [e.coe_symm_of_not_mem hb], exact (0 : F →ₗ[R] E b).is_linear }
end
/-- A pretrivialization for a vector bundle defines linear equivalences between the
fibers and the model space. -/
@[simps {fully_applied := ff}] def linear_equiv_at (e : pretrivialization F (π E)) [e.is_linear R]
(b : B) (hb : b ∈ e.base_set) :
E b ≃ₗ[R] F :=
{ to_fun := λ y, (e (total_space_mk b y)).2,
inv_fun := e.symm b,
left_inv := e.symm_apply_apply_mk hb,
right_inv := λ v, by simp_rw [e.apply_mk_symm hb v],
map_add' := λ v w, (e.linear R hb).map_add v w,
map_smul' := λ c v, (e.linear R hb).map_smul c v }
/-- A fiberwise linear map equal to `e` on `e.base_set`. -/
protected def linear_map_at (e : pretrivialization F (π E)) [e.is_linear R] (b : B) : E b →ₗ[R] F :=
if hb : b ∈ e.base_set then e.linear_equiv_at R b hb else 0
variables {R}
lemma coe_linear_map_at (e : pretrivialization F (π E)) [e.is_linear R] (b : B) :
⇑(e.linear_map_at R b) = λ y, if b ∈ e.base_set then (e (total_space_mk b y)).2 else 0 :=
by { rw [pretrivialization.linear_map_at], split_ifs; refl }
lemma coe_linear_map_at_of_mem (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
⇑(e.linear_map_at R b) = λ y, (e (total_space_mk b y)).2 :=
by simp_rw [coe_linear_map_at, if_pos hb]
lemma linear_map_at_apply (e : pretrivialization F (π E)) [e.is_linear R] {b : B} (y : E b) :
e.linear_map_at R b y = if b ∈ e.base_set then (e (total_space_mk b y)).2 else 0 :=
by rw [coe_linear_map_at]
lemma linear_map_at_def_of_mem (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
e.linear_map_at R b = e.linear_equiv_at R b hb :=
dif_pos hb
lemma linear_map_at_def_of_not_mem (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∉ e.base_set) :
e.linear_map_at R b = 0 :=
dif_neg hb
lemma linear_map_at_eq_zero (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∉ e.base_set) :
e.linear_map_at R b = 0 :=
dif_neg hb
lemma symmₗ_linear_map_at (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) (y : E b) :
e.symmₗ R b (e.linear_map_at R b y) = y :=
by { rw [e.linear_map_at_def_of_mem hb], exact (e.linear_equiv_at R b hb).left_inv y }
lemma linear_map_at_symmₗ (e : pretrivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) (y : F) :
e.linear_map_at R b (e.symmₗ R b y) = y :=
by { rw [e.linear_map_at_def_of_mem hb], exact (e.linear_equiv_at R b hb).right_inv y }
end pretrivialization
variables (R) [topological_space (total_space E)]
/-- A mixin class for `trivialization`, stating that a trivialization is fibrewise linear with
respect to given module structures on its fibres and the model fibre. -/
protected class trivialization.is_linear [add_comm_monoid F] [module R F]
[∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)] (e : trivialization F (π E)) : Prop :=
(linear : ∀ b ∈ e.base_set, is_linear_map R (λ x : E b, (e (total_space_mk b x)).2))
namespace trivialization
variables (e : trivialization F (π E)) {x : total_space E} {b : B} {y : E b}
protected lemma linear [add_comm_monoid F] [module R F] [∀ x, add_comm_monoid (E x)]
[∀ x, module R (E x)] [e.is_linear R] {b : B} (hb : b ∈ e.base_set) :
is_linear_map R (λ y : E b, (e (total_space_mk b y)).2) :=
trivialization.is_linear.linear b hb
instance to_pretrivialization.is_linear [add_comm_monoid F] [module R F]
[∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)] [e.is_linear R] :
e.to_pretrivialization.is_linear R :=
{ ..(‹_› : e.is_linear R) }
variables [add_comm_monoid F] [module R F] [∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)]
/-- A trivialization for a vector bundle defines linear equivalences between the
fibers and the model space. -/
def linear_equiv_at (e : trivialization F (π E)) [e.is_linear R] (b : B) (hb : b ∈ e.base_set) :
E b ≃ₗ[R] F :=
e.to_pretrivialization.linear_equiv_at R b hb
variables {R}
@[simp]
lemma linear_equiv_at_apply (e : trivialization F (π E)) [e.is_linear R] (b : B)
(hb : b ∈ e.base_set) (v : E b) :
e.linear_equiv_at R b hb v = (e (total_space_mk b v)).2 := rfl
@[simp]
lemma linear_equiv_at_symm_apply (e : trivialization F (π E)) [e.is_linear R] (b : B)
(hb : b ∈ e.base_set) (v : F) :
(e.linear_equiv_at R b hb).symm v = e.symm b v := rfl
variables (R)
/-- A fiberwise linear inverse to `e`. -/
protected def symmₗ (e : trivialization F (π E)) [e.is_linear R] (b : B) : F →ₗ[R] E b :=
e.to_pretrivialization.symmₗ R b
variables {R}
lemma coe_symmₗ (e : trivialization F (π E)) [e.is_linear R] (b : B) : ⇑(e.symmₗ R b) = e.symm b :=
rfl
variables (R)
/-- A fiberwise linear map equal to `e` on `e.base_set`. -/
protected def linear_map_at (e : trivialization F (π E)) [e.is_linear R] (b : B) : E b →ₗ[R] F :=
e.to_pretrivialization.linear_map_at R b
variables {R}
lemma coe_linear_map_at (e : trivialization F (π E)) [e.is_linear R] (b : B) :
⇑(e.linear_map_at R b) = λ y, if b ∈ e.base_set then (e (total_space_mk b y)).2 else 0 :=
e.to_pretrivialization.coe_linear_map_at b
lemma coe_linear_map_at_of_mem (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
⇑(e.linear_map_at R b) = λ y, (e (total_space_mk b y)).2 :=
by simp_rw [coe_linear_map_at, if_pos hb]
lemma linear_map_at_apply (e : trivialization F (π E)) [e.is_linear R] {b : B} (y : E b) :
e.linear_map_at R b y = if b ∈ e.base_set then (e (total_space_mk b y)).2 else 0 :=
by rw [coe_linear_map_at]
lemma linear_map_at_def_of_mem (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
e.linear_map_at R b = e.linear_equiv_at R b hb :=
dif_pos hb
lemma linear_map_at_def_of_not_mem (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∉ e.base_set) :
e.linear_map_at R b = 0 :=
dif_neg hb
lemma symmₗ_linear_map_at (e : trivialization F (π E)) [e.is_linear R] {b : B} (hb : b ∈ e.base_set)
(y : E b) :
e.symmₗ R b (e.linear_map_at R b y) = y :=
e.to_pretrivialization.symmₗ_linear_map_at hb y
lemma linear_map_at_symmₗ (e : trivialization F (π E)) [e.is_linear R] {b : B} (hb : b ∈ e.base_set)
(y : F) :
e.linear_map_at R b (e.symmₗ R b y) = y :=
e.to_pretrivialization.linear_map_at_symmₗ hb y
variables (R)
/-- A coordinate change function between two trivializations, as a continuous linear equivalence.
Defined to be the identity when `b` does not lie in the base set of both trivializations. -/
def coord_changeL (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R] (b : B) :
F ≃L[R] F :=
{ continuous_to_fun := begin
by_cases hb : b ∈ e.base_set ∩ e'.base_set,
{ simp_rw [dif_pos hb],
refine (e'.continuous_on.comp_continuous _ _).snd,
exact e.continuous_on_symm.comp_continuous (continuous.prod.mk b)
(λ y, mk_mem_prod hb.1 (mem_univ y)),
exact (λ y, e'.mem_source.mpr hb.2) },
{ rw [dif_neg hb], exact continuous_id }
end,
continuous_inv_fun := begin
by_cases hb : b ∈ e.base_set ∩ e'.base_set,
{ simp_rw [dif_pos hb],
refine (e.continuous_on.comp_continuous _ _).snd,
exact e'.continuous_on_symm.comp_continuous (continuous.prod.mk b)
(λ y, mk_mem_prod hb.2 (mem_univ y)),
exact (λ y, e.mem_source.mpr hb.1) },
{ rw [dif_neg hb], exact continuous_id }
end,
.. if hb : b ∈ e.base_set ∩ e'.base_set then
(e.linear_equiv_at R b (hb.1 : _)).symm.trans (e'.linear_equiv_at R b hb.2)
else linear_equiv.refl R F }
variables {R}
lemma coe_coord_changeL (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R] {b : B}
(hb : b ∈ e.base_set ∩ e'.base_set) :
⇑(coord_changeL R e e' b)
= (e.linear_equiv_at R b hb.1).symm.trans (e'.linear_equiv_at R b hb.2) :=
congr_arg linear_equiv.to_fun (dif_pos hb)
lemma coord_changeL_apply (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R] {b : B}
(hb : b ∈ e.base_set ∩ e'.base_set) (y : F) :
coord_changeL R e e' b y = (e' (total_space_mk b (e.symm b y))).2 :=
congr_arg (λ f, linear_equiv.to_fun f y) (dif_pos hb)
lemma mk_coord_changeL (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R] {b : B}
(hb : b ∈ e.base_set ∩ e'.base_set) (y : F) :
(b, coord_changeL R e e' b y) = e' (total_space_mk b (e.symm b y)) :=
begin
ext,
{ rw [e.mk_symm hb.1 y, e'.coe_fst', e.proj_symm_apply' hb.1],
rw [e.proj_symm_apply' hb.1], exact hb.2 },
{ exact e.coord_changeL_apply e' hb y }
end
/-- A version of `coord_change_apply` that fully unfolds `coord_change`. The right-hand side is
ugly, but has good definitional properties for specifically defined trivializations. -/
lemma coord_changeL_apply' (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R] {b : B}
(hb : b ∈ e.base_set ∩ e'.base_set) (y : F) :
coord_changeL R e e' b y = (e' (e.to_local_homeomorph.symm (b, y))).2 :=
by rw [e.coord_changeL_apply e' hb, e.mk_symm hb.1]
lemma coord_changeL_symm_apply (e e' : trivialization F (π E)) [e.is_linear R] [e'.is_linear R]
{b : B} (hb : b ∈ e.base_set ∩ e'.base_set) :
⇑(coord_changeL R e e' b).symm
= (e'.linear_equiv_at R b hb.2).symm.trans (e.linear_equiv_at R b hb.1) :=
congr_arg linear_equiv.inv_fun (dif_pos hb)
end trivialization
end topological_vector_space
section
variables [nontrivially_normed_field R] [∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)]
[normed_add_comm_group F] [normed_space R F] [topological_space B]
[topological_space (total_space E)] [∀ x, topological_space (E x)] [fiber_bundle F E]
/-- The space `total_space E` (for `E : B → Type*` such that each `E x` is a topological vector
space) has a topological vector space structure with fiber `F` (denoted with
`vector_bundle R F E`) if around every point there is a fiber bundle trivialization
which is linear in the fibers. -/
class vector_bundle : Prop :=
(trivialization_linear' : ∀ (e : trivialization F (π E)) [mem_trivialization_atlas e],
e.is_linear R)
(continuous_on_coord_change' [] : ∀ (e e' : trivialization F (π E)) [mem_trivialization_atlas e]
[mem_trivialization_atlas e'],
continuous_on
(λ b, by exactI trivialization.coord_changeL R e e' b : B → F →L[R] F) (e.base_set ∩ e'.base_set))
variables {F E}
@[priority 100]
instance trivialization_linear [vector_bundle R F E] (e : trivialization F (π E))
[mem_trivialization_atlas e] :
e.is_linear R :=
vector_bundle.trivialization_linear' e
lemma continuous_on_coord_change [vector_bundle R F E] (e e' : trivialization F (π E))
[he : mem_trivialization_atlas e]
[he' : mem_trivialization_atlas e'] :
continuous_on
(λ b, trivialization.coord_changeL R e e' b : B → F →L[R] F) (e.base_set ∩ e'.base_set) :=
vector_bundle.continuous_on_coord_change' R e e'
namespace trivialization
/-- Forward map of `continuous_linear_equiv_at` (only propositionally equal),
defined everywhere (`0` outside domain). -/
@[simps apply {fully_applied := ff}]
def continuous_linear_map_at (e : trivialization F (π E)) [e.is_linear R] (b : B) :
E b →L[R] F :=
{ to_fun := e.linear_map_at R b, -- given explicitly to help `simps`
cont := begin
dsimp,
rw [e.coe_linear_map_at b],
refine continuous_if_const _ (λ hb, _) (λ _, continuous_zero),
exact continuous_snd.comp (e.to_local_homeomorph.continuous_on.comp_continuous
(fiber_bundle.total_space_mk_inducing F E b).continuous
(λ x, e.mem_source.mpr hb))
end,
.. e.linear_map_at R b }
/-- Backwards map of `continuous_linear_equiv_at`, defined everywhere. -/
@[simps apply {fully_applied := ff}]
def symmL (e : trivialization F (π E)) [e.is_linear R] (b : B) : F →L[R] E b :=
{ to_fun := e.symm b, -- given explicitly to help `simps`
cont := begin
by_cases hb : b ∈ e.base_set,
{ rw (fiber_bundle.total_space_mk_inducing F E b).continuous_iff,
exact e.continuous_on_symm.comp_continuous (continuous_const.prod_mk continuous_id)
(λ x, mk_mem_prod hb (mem_univ x)) },
{ refine continuous_zero.congr (λ x, (e.symm_apply_of_not_mem hb x).symm) },
end,
.. e.symmₗ R b }
variables {R}
lemma symmL_continuous_linear_map_at (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) (y : E b) :
e.symmL R b (e.continuous_linear_map_at R b y) = y :=
e.symmₗ_linear_map_at hb y
lemma continuous_linear_map_at_symmL (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) (y : F) :
e.continuous_linear_map_at R b (e.symmL R b y) = y :=
e.linear_map_at_symmₗ hb y
variables (R)
/-- In a vector bundle, a trivialization in the fiber (which is a priori only linear)
is in fact a continuous linear equiv between the fibers and the model fiber. -/
@[simps apply symm_apply {fully_applied := ff}]
def continuous_linear_equiv_at (e : trivialization F (π E)) [e.is_linear R] (b : B)
(hb : b ∈ e.base_set) : E b ≃L[R] F :=
{ to_fun := λ y, (e (total_space_mk b y)).2, -- given explicitly to help `simps`
inv_fun := e.symm b, -- given explicitly to help `simps`
continuous_to_fun := continuous_snd.comp (e.to_local_homeomorph.continuous_on.comp_continuous
(fiber_bundle.total_space_mk_inducing F E b).continuous
(λ x, e.mem_source.mpr hb)),
continuous_inv_fun := (e.symmL R b).continuous,
.. e.to_pretrivialization.linear_equiv_at R b hb }
variables {R}
lemma coe_continuous_linear_equiv_at_eq (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
(e.continuous_linear_equiv_at R b hb : E b → F) = e.continuous_linear_map_at R b :=
(e.coe_linear_map_at_of_mem hb).symm
lemma symm_continuous_linear_equiv_at_eq (e : trivialization F (π E)) [e.is_linear R] {b : B}
(hb : b ∈ e.base_set) :
((e.continuous_linear_equiv_at R b hb).symm : F → E b) = e.symmL R b :=
rfl
@[simp] lemma continuous_linear_equiv_at_apply' (e : trivialization F (π E)) [e.is_linear R]
(x : total_space E) (hx : x ∈ e.source) :
e.continuous_linear_equiv_at R x.proj (e.mem_source.1 hx) x.2 = (e x).2 := by { cases x, refl }
variables (R)
lemma apply_eq_prod_continuous_linear_equiv_at (e : trivialization F (π E)) [e.is_linear R] (b : B)
(hb : b ∈ e.base_set) (z : E b) :
e.to_local_homeomorph ⟨b, z⟩ = (b, e.continuous_linear_equiv_at R b hb z) :=
begin
ext,
{ refine e.coe_fst _,
rw e.source_eq,
exact hb },
{ simp only [coe_coe, continuous_linear_equiv_at_apply] }
end
variables {R}
lemma symm_apply_eq_mk_continuous_linear_equiv_at_symm (e : trivialization F (π E)) [e.is_linear R]
(b : B) (hb : b ∈ e.base_set) (z : F) :
e.to_local_homeomorph.symm ⟨b, z⟩
= total_space_mk b ((e.continuous_linear_equiv_at R b hb).symm z) :=
begin
have h : (b, z) ∈ e.to_local_homeomorph.target,
{ rw e.target_eq,
exact ⟨hb, mem_univ _⟩ },
apply e.to_local_homeomorph.inj_on (e.to_local_homeomorph.map_target h),
{ simp only [e.source_eq, hb, mem_preimage]},
simp_rw [e.apply_eq_prod_continuous_linear_equiv_at R b hb, e.to_local_homeomorph.right_inv h,
continuous_linear_equiv.apply_symm_apply],
end
lemma comp_continuous_linear_equiv_at_eq_coord_change (e e' : trivialization F (π E))
[e.is_linear R] [e'.is_linear R] {b : B} (hb : b ∈ e.base_set ∩ e'.base_set) :
(e.continuous_linear_equiv_at R b hb.1).symm.trans (e'.continuous_linear_equiv_at R b hb.2)
= coord_changeL R e e' b :=
by { ext v, rw [coord_changeL_apply e e' hb], refl }
end trivialization
include R F
/-! ### Constructing vector bundles -/
variables (R B F)
/-- Analogous construction of `fiber_bundle_core` for vector bundles. This
construction gives a way to construct vector bundles from a structure registering how
trivialization changes act on fibers. -/
structure vector_bundle_core (ι : Type*) :=
(base_set : ι → set B)
(is_open_base_set : ∀ i, is_open (base_set i))
(index_at : B → ι)
(mem_base_set_at : ∀ x, x ∈ base_set (index_at x))
(coord_change : ι → ι → B → (F →L[R] F))
(coord_change_self : ∀ i, ∀ x ∈ base_set i, ∀ v, coord_change i i x v = v)
(continuous_on_coord_change : ∀ i j, continuous_on (coord_change i j) (base_set i ∩ base_set j))
(coord_change_comp : ∀ i j k, ∀ x ∈ (base_set i) ∩ (base_set j) ∩ (base_set k), ∀ v,
(coord_change j k x) (coord_change i j x v) = coord_change i k x v)
/-- The trivial vector bundle core, in which all the changes of coordinates are the
identity. -/
def trivial_vector_bundle_core (ι : Type*) [inhabited ι] :
vector_bundle_core R B F ι :=
{ base_set := λ ι, univ,
is_open_base_set := λ i, is_open_univ,
index_at := default,
mem_base_set_at := λ x, mem_univ x,
coord_change := λ i j x, continuous_linear_map.id R F,
coord_change_self := λ i x hx v, rfl,
coord_change_comp := λ i j k x hx v, rfl,
continuous_on_coord_change := λ i j, continuous_on_const }
instance (ι : Type*) [inhabited ι] : inhabited (vector_bundle_core R B F ι) :=
⟨trivial_vector_bundle_core R B F ι⟩
namespace vector_bundle_core
variables {R B F} {ι : Type*} (Z : vector_bundle_core R B F ι)
/-- Natural identification to a `fiber_bundle_core`. -/
@[simps (mfld_cfg)] def to_fiber_bundle_core : fiber_bundle_core ι B F :=
{ coord_change := λ i j b, Z.coord_change i j b,
continuous_on_coord_change := λ i j, is_bounded_bilinear_map_apply.continuous.comp_continuous_on
((Z.continuous_on_coord_change i j).prod_map continuous_on_id),
..Z }
instance to_fiber_bundle_core_coe : has_coe (vector_bundle_core R B F ι)
(fiber_bundle_core ι B F) := ⟨to_fiber_bundle_core⟩
include Z
lemma coord_change_linear_comp (i j k : ι): ∀ x ∈ (Z.base_set i) ∩ (Z.base_set j) ∩ (Z.base_set k),
(Z.coord_change j k x).comp (Z.coord_change i j x) = Z.coord_change i k x :=
λ x hx, by { ext v, exact Z.coord_change_comp i j k x hx v }
/-- The index set of a vector bundle core, as a convenience function for dot notation -/
@[nolint unused_arguments has_nonempty_instance]
def index := ι
/-- The base space of a vector bundle core, as a convenience function for dot notation-/
@[nolint unused_arguments, reducible]
def base := B
/-- The fiber of a vector bundle core, as a convenience function for dot notation and
typeclass inference -/
@[nolint unused_arguments has_nonempty_instance]
def fiber : B → Type* := Z.to_fiber_bundle_core.fiber
instance topological_space_fiber (x : B) : topological_space (Z.fiber x) :=
by delta_instance vector_bundle_core.fiber
instance add_comm_monoid_fiber : ∀ (x : B), add_comm_monoid (Z.fiber x) :=
by dsimp [vector_bundle_core.fiber]; delta_instance fiber_bundle_core.fiber
instance module_fiber : ∀ (x : B), module R (Z.fiber x) :=
by dsimp [vector_bundle_core.fiber]; delta_instance fiber_bundle_core.fiber
instance add_comm_group_fiber [add_comm_group F] : ∀ (x : B), add_comm_group (Z.fiber x) :=
by dsimp [vector_bundle_core.fiber]; delta_instance fiber_bundle_core.fiber
/-- The projection from the total space of a fiber bundle core, on its base. -/
@[reducible, simp, mfld_simps] def proj : total_space Z.fiber → B := total_space.proj
/-- The total space of the vector bundle, as a convenience function for dot notation.
It is by definition equal to `bundle.total_space Z.fiber`, a.k.a. `Σ x, Z.fiber x` but with a
different name for typeclass inference. -/
@[nolint unused_arguments, reducible]
def total_space := bundle.total_space Z.fiber
/-- Local homeomorphism version of the trivialization change. -/
def triv_change (i j : ι) : local_homeomorph (B × F) (B × F) :=
fiber_bundle_core.triv_change ↑Z i j
@[simp, mfld_simps] lemma mem_triv_change_source (i j : ι) (p : B × F) :
p ∈ (Z.triv_change i j).source ↔ p.1 ∈ Z.base_set i ∩ Z.base_set j :=
fiber_bundle_core.mem_triv_change_source ↑Z i j p
/-- Topological structure on the total space of a vector bundle created from core, designed so
that all the local trivialization are continuous. -/
instance to_topological_space : topological_space Z.total_space :=
Z.to_fiber_bundle_core.to_topological_space
variables (b : B) (a : F)
@[simp, mfld_simps] lemma coe_coord_change (i j : ι) :
Z.to_fiber_bundle_core.coord_change i j b = Z.coord_change i j b := rfl
/-- One of the standard local trivializations of a vector bundle constructed from core, taken by
considering this in particular as a fiber bundle constructed from core. -/
def local_triv (i : ι) : trivialization F (π Z.fiber) :=
by dsimp [vector_bundle_core.total_space, vector_bundle_core.fiber];
exact Z.to_fiber_bundle_core.local_triv i
/-- The standard local trivializations of a vector bundle constructed from core are linear. -/
instance local_triv.is_linear (i : ι) : (Z.local_triv i).is_linear R :=
{ linear := λ x hx, by dsimp [vector_bundle_core.local_triv]; exact
{ map_add := λ v w, by simp only [continuous_linear_map.map_add] with mfld_simps,
map_smul := λ r v, by simp only [continuous_linear_map.map_smul] with mfld_simps} }
variables (i j : ι)
@[simp, mfld_simps] lemma mem_local_triv_source (p : Z.total_space) :
p ∈ (Z.local_triv i).source ↔ p.1 ∈ Z.base_set i :=
by dsimp [vector_bundle_core.fiber]; exact iff.rfl
@[simp, mfld_simps] lemma base_set_at : Z.base_set i = (Z.local_triv i).base_set := rfl
@[simp, mfld_simps] lemma local_triv_apply (p : Z.total_space) :
(Z.local_triv i) p = ⟨p.1, Z.coord_change (Z.index_at p.1) i p.1 p.2⟩ := rfl
@[simp, mfld_simps] lemma mem_local_triv_target (p : B × F) :
p ∈ (Z.local_triv i).target ↔ p.1 ∈ (Z.local_triv i).base_set :=
Z.to_fiber_bundle_core.mem_local_triv_target i p
@[simp, mfld_simps] lemma local_triv_symm_fst (p : B × F) :
(Z.local_triv i).to_local_homeomorph.symm p =
⟨p.1, Z.coord_change i (Z.index_at p.1) p.1 p.2⟩ := rfl
@[simp, mfld_simps] lemma local_triv_symm_apply {b : B} (hb : b ∈ Z.base_set i) (v : F) :
(Z.local_triv i).symm b v = Z.coord_change i (Z.index_at b) b v :=
by apply (Z.local_triv i).symm_apply hb v
@[simp, mfld_simps] lemma local_triv_coord_change_eq {b : B} (hb : b ∈ Z.base_set i ∩ Z.base_set j)
(v : F) :
(Z.local_triv i).coord_changeL R (Z.local_triv j) b v = Z.coord_change i j b v :=
begin
rw [trivialization.coord_changeL_apply', local_triv_symm_fst, local_triv_apply,
coord_change_comp],
exacts [⟨⟨hb.1, Z.mem_base_set_at b⟩, hb.2⟩, hb]
end
/-- Preferred local trivialization of a vector bundle constructed from core, at a given point, as
a bundle trivialization -/
def local_triv_at (b : B) : trivialization F (π Z.fiber) :=
Z.local_triv (Z.index_at b)
@[simp, mfld_simps] lemma local_triv_at_def :
Z.local_triv (Z.index_at b) = Z.local_triv_at b := rfl
@[simp, mfld_simps] lemma mem_source_at : (⟨b, a⟩ : Z.total_space) ∈ (Z.local_triv_at b).source :=
by { rw [local_triv_at, mem_local_triv_source], exact Z.mem_base_set_at b }
@[simp, mfld_simps] lemma local_triv_at_apply (p : Z.total_space) :
((Z.local_triv_at p.1) p) = ⟨p.1, p.2⟩ :=
fiber_bundle_core.local_triv_at_apply Z p
@[simp, mfld_simps] lemma local_triv_at_apply_mk (b : B) (a : F) :
((Z.local_triv_at b) ⟨b, a⟩) = ⟨b, a⟩ :=
Z.local_triv_at_apply _
@[simp, mfld_simps] lemma mem_local_triv_at_base_set :
b ∈ (Z.local_triv_at b).base_set :=
fiber_bundle_core.mem_local_triv_at_base_set Z b
instance fiber_bundle : fiber_bundle F Z.fiber := Z.to_fiber_bundle_core.fiber_bundle
instance vector_bundle : vector_bundle R F Z.fiber :=
{ trivialization_linear' := begin
rintros _ ⟨i, rfl⟩,
apply local_triv.is_linear,
end,
continuous_on_coord_change' := begin
rintros _ _ ⟨i, rfl⟩ ⟨i', rfl⟩,
refine (Z.continuous_on_coord_change i i').congr (λ b hb, _),
ext v,
exact Z.local_triv_coord_change_eq i i' hb v,
end }
/-- The projection on the base of a vector bundle created from core is continuous -/
@[continuity] lemma continuous_proj : continuous Z.proj :=
fiber_bundle_core.continuous_proj Z
/-- The projection on the base of a vector bundle created from core is an open map -/
lemma is_open_map_proj : is_open_map Z.proj :=
fiber_bundle_core.is_open_map_proj Z
end vector_bundle_core
end
/-! ### Vector prebundle -/
section
variables [nontrivially_normed_field R] [∀ x, add_comm_monoid (E x)] [∀ x, module R (E x)]
[normed_add_comm_group F] [normed_space R F] [topological_space B]
open topological_space
open vector_bundle
/-- This structure permits to define a vector bundle when trivializations are given as local
equivalences but there is not yet a topology on the total space or the fibers.
The total space is hence given a topology in such a way that there is a fiber bundle structure for
which the local equivalences are also local homeomorphisms and hence vector bundle trivializations.
The topology on the fibers is induced from the one on the total space.
The field `exists_coord_change` is stated as an existential statement (instead of 3 separate
fields), since it depends on propositional information (namely `e e' ∈ pretrivialization_atlas`).
This makes it inconvenient to explicitly define a `coord_change` function when constructing a
`vector_prebundle`. -/
@[nolint has_nonempty_instance]
structure vector_prebundle :=
(pretrivialization_atlas : set (pretrivialization F (π E)))
(pretrivialization_linear' : ∀ (e : pretrivialization F (π E)) (he : e ∈ pretrivialization_atlas),
e.is_linear R)
(pretrivialization_at : B → pretrivialization F (π E))
(mem_base_pretrivialization_at : ∀ x : B, x ∈ (pretrivialization_at x).base_set)
(pretrivialization_mem_atlas : ∀ x : B, pretrivialization_at x ∈ pretrivialization_atlas)
(exists_coord_change : ∀ (e e' ∈ pretrivialization_atlas), ∃ f : B → F →L[R] F,
continuous_on f (e.base_set ∩ e'.base_set) ∧
∀ (b : B) (hb : b ∈ e.base_set ∩ e'.base_set) (v : F),
f b v = (e' (total_space_mk b (e.symm b v))).2)
namespace vector_prebundle
variables {R E F}
/-- A randomly chosen coordinate change on a `vector_prebundle`, given by
the field `exists_coord_change`. -/
def coord_change (a : vector_prebundle R F E)
{e e' : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas)
(he' : e' ∈ a.pretrivialization_atlas) (b : B) : F →L[R] F :=
classical.some (a.exists_coord_change e he e' he') b
lemma continuous_on_coord_change (a : vector_prebundle R F E)
{e e' : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas)
(he' : e' ∈ a.pretrivialization_atlas) :
continuous_on (a.coord_change he he') (e.base_set ∩ e'.base_set) :=
(classical.some_spec (a.exists_coord_change e he e' he')).1
lemma coord_change_apply (a : vector_prebundle R F E)
{e e' : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas)
(he' : e' ∈ a.pretrivialization_atlas) {b : B} (hb : b ∈ e.base_set ∩ e'.base_set) (v : F) :
a.coord_change he he' b v = (e' (total_space_mk b (e.symm b v))).2 :=
(classical.some_spec (a.exists_coord_change e he e' he')).2 b hb v
lemma mk_coord_change (a : vector_prebundle R F E)
{e e' : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas)
(he' : e' ∈ a.pretrivialization_atlas) {b : B} (hb : b ∈ e.base_set ∩ e'.base_set) (v : F) :
(b, a.coord_change he he' b v) = e' (total_space_mk b (e.symm b v)) :=
begin
ext,
{ rw [e.mk_symm hb.1 v, e'.coe_fst', e.proj_symm_apply' hb.1],
rw [e.proj_symm_apply' hb.1], exact hb.2 },
{ exact a.coord_change_apply he he' hb v }
end
/-- Natural identification of `vector_prebundle` as a `fiber_prebundle`. -/
def to_fiber_prebundle (a : vector_prebundle R F E) :
fiber_prebundle F E :=
{ continuous_triv_change := begin
intros e he e' he',
have := is_bounded_bilinear_map_apply.continuous.comp_continuous_on
((a.continuous_on_coord_change he' he).prod_map continuous_on_id),
have H : e'.to_local_equiv.target ∩ e'.to_local_equiv.symm ⁻¹'
e.to_local_equiv.source =(e'.base_set ∩ e.base_set) ×ˢ univ,
{ rw [e'.target_eq, e.source_eq],
ext ⟨b, f⟩,
simp only [-total_space.proj, and.congr_right_iff, e'.proj_symm_apply', iff_self,
implies_true_iff] with mfld_simps {contextual := tt} },
rw [H],
refine (continuous_on_fst.prod this).congr _,
rintros ⟨b, f⟩ ⟨hb, -⟩,
dsimp only [function.comp, prod.map],
rw [a.mk_coord_change _ _ hb, e'.mk_symm hb.1],
refl,
end,
.. a }
/-- Topology on the total space that will make the prebundle into a bundle. -/
def total_space_topology (a : vector_prebundle R F E) :
topological_space (total_space E) :=
a.to_fiber_prebundle.total_space_topology
/-- Promotion from a `trivialization` in the `pretrivialization_atlas` of a
`vector_prebundle` to a `trivialization`. -/
def trivialization_of_mem_pretrivialization_atlas (a : vector_prebundle R F E)
{e : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas) :
@trivialization B F _ _ _ a.total_space_topology (π E) :=
a.to_fiber_prebundle.trivialization_of_mem_pretrivialization_atlas he
lemma linear_of_mem_pretrivialization_atlas (a : vector_prebundle R F E)
{e : pretrivialization F (π E)} (he : e ∈ a.pretrivialization_atlas) :
@trivialization.is_linear R B F _ _ _ _ a.total_space_topology _ _ _ _
(trivialization_of_mem_pretrivialization_atlas a he) :=
{ linear := (a.pretrivialization_linear' e he).linear }
variable (a : vector_prebundle R F E)
lemma mem_trivialization_at_source (b : B) (x : E b) :
total_space_mk b x ∈ (a.pretrivialization_at b).source :=
a.to_fiber_prebundle.mem_trivialization_at_source b x
@[simp] lemma total_space_mk_preimage_source (b : B) :
(total_space_mk b) ⁻¹' (a.pretrivialization_at b).source = univ :=
a.to_fiber_prebundle.total_space_mk_preimage_source b
/-- Topology on the fibers `E b` induced by the map `E b → E.total_space`. -/
def fiber_topology (b : B) : topological_space (E b) :=
a.to_fiber_prebundle.fiber_topology b
@[continuity] lemma inducing_total_space_mk (b : B) :
@inducing _ _ (a.fiber_topology b) a.total_space_topology (total_space_mk b) :=
a.to_fiber_prebundle.inducing_total_space_mk b
@[continuity] lemma continuous_total_space_mk (b : B) :
@continuous _ _ (a.fiber_topology b) a.total_space_topology (total_space_mk b) :=
a.to_fiber_prebundle.continuous_total_space_mk b
/-- Make a `fiber_bundle` from a `vector_prebundle`; auxiliary construction for
`vector_prebundle.vector_bundle`. -/
def to_fiber_bundle : @fiber_bundle B F _ _ _ a.total_space_topology a.fiber_topology :=
a.to_fiber_prebundle.to_fiber_bundle
/-- Make a `vector_bundle` from a `vector_prebundle`. Concretely this means
that, given a `vector_prebundle` structure for a sigma-type `E` -- which consists of a
number of "pretrivializations" identifying parts of `E` with product spaces `U × F` -- one
establishes that for the topology constructed on the sigma-type using
`vector_prebundle.total_space_topology`, these "pretrivializations" are actually
"trivializations" (i.e., homeomorphisms with respect to the constructed topology). -/
lemma to_vector_bundle :
@vector_bundle R _ F E _ _ _ _ _ _ a.total_space_topology a.fiber_topology a.to_fiber_bundle :=
{ trivialization_linear' := begin
rintros _ ⟨e, he, rfl⟩,
apply linear_of_mem_pretrivialization_atlas,
end,
continuous_on_coord_change' := begin
rintros _ _ ⟨e, he, rfl⟩ ⟨e', he', rfl⟩,
refine (a.continuous_on_coord_change he he').congr _,
intros b hb,
ext v,
rw [a.coord_change_apply he he' hb v, continuous_linear_equiv.coe_coe,
trivialization.coord_changeL_apply],
exacts [rfl, hb]
end }
end vector_prebundle
end
|
afb1bb06fc9ff6b23494c0c93aae54e5d45b4a47
|
57c233acf9386e610d99ed20ef139c5f97504ba3
|
/src/category_theory/preadditive/yoneda.lean
|
f7ad10ff21e071f7723ec50a24161aa0c98c73c2
|
[
"Apache-2.0"
] |
permissive
|
robertylewis/mathlib
|
3d16e3e6daf5ddde182473e03a1b601d2810952c
|
1d13f5b932f5e40a8308e3840f96fc882fae01f0
|
refs/heads/master
| 1,651,379,945,369
| 1,644,276,960,000
| 1,644,276,960,000
| 98,875,504
| 0
| 0
|
Apache-2.0
| 1,644,253,514,000
| 1,501,495,700,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 4,227
|
lean
|
/-
Copyright (c) 2022 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import category_theory.preadditive.opposite
import algebra.category.Module.basic
import algebra.category.Group.preadditive
/-!
# The Yoneda embedding for preadditive categories
The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an
object `X` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End Y`-module
structure.
We also show that this presheaf is additive and that it is compatible with the normal Yoneda
embedding in the expected way.
## TODO
* The Yoneda embedding is additive itself
* The Yoneda embedding for preadditive categories relates to the Yoneda embedding for linear
categories.
-/
universes v u
open category_theory.preadditive opposite
namespace category_theory
variables {C : Type u} [category.{v} C] [preadditive C]
/--
The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an
object `X` to the `End Y`-module of morphisms `X ⟶ Y`.
-/
@[simps]
def preadditive_yoneda_obj (Y : C) : Cᵒᵖ ⥤ Module.{v} (End Y) :=
{ obj := λ X, Module.of _ (X.unop ⟶ Y),
map := λ X X' f,
{ to_fun := λ g, f.unop ≫ g,
map_add' := λ g g', comp_add _ _ _ _ _ _,
map_smul' := λ r g, eq.symm $ category.assoc _ _ _ } }
/--
The Yoneda embedding for preadditive categories sends an object `Y` to the presheaf sending an
object `X` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End Y`-module
structure, see `preadditive_yoneda_obj`.
-/
@[simps]
def preadditive_yoneda : C ⥤ (Cᵒᵖ ⥤ AddCommGroup.{v}) :=
{ obj := λ Y, preadditive_yoneda_obj Y ⋙ forget₂ _ _,
map := λ Y Y' f,
{ app := λ X,
{ to_fun := λ g, g ≫ f,
map_zero' := limits.zero_comp,
map_add' := λ g g', add_comp _ _ _ _ _ _ },
naturality' := λ X X' g, AddCommGroup.ext _ _ _ _ $ λ x, category.assoc _ _ _ },
map_id' := λ X, by { ext, simp },
map_comp' := λ X Y Z f g, by { ext, simp } }
/--
The Yoneda embedding for preadditive categories sends an object `X` to the copresheaf sending an
object `Y` to the `End X`-module of morphisms `X ⟶ Y`.
-/
@[simps]
def preadditive_coyoneda_obj (X : Cᵒᵖ) : C ⥤ Module.{v} (End X) :=
{ obj := λ Y, Module.of _ (unop X ⟶ Y),
map := λ Y Y' f,
{ to_fun := λ g, g ≫ f,
map_add' := λ g g', add_comp _ _ _ _ _ _,
map_smul' := λ r g, category.assoc _ _ _ } }
/--
The Yoneda embedding for preadditive categories sends an object `X` to the copresheaf sending an
object `Y` to the group of morphisms `X ⟶ Y`. At each point, we get an additional `End X`-module
structure, see `preadditive_coyoneda_obj`.
-/
@[simps]
def preadditive_coyoneda : Cᵒᵖ ⥤ (C ⥤ AddCommGroup.{v}) :=
{ obj := λ X, preadditive_coyoneda_obj X ⋙ forget₂ _ _,
map := λ X X' f,
{ app := λ Y,
{ to_fun := λ g, f.unop ≫ g,
map_zero' := limits.comp_zero,
map_add' := λ g g', comp_add _ _ _ _ _ _ },
naturality' := λ Y Y' g, AddCommGroup.ext _ _ _ _ $ λ x, eq.symm $ category.assoc _ _ _ },
map_id' := λ X, by { ext, simp },
map_comp' := λ X Y Z f g, by { ext, simp } }
instance additive_yoneda_obj (X : C) : functor.additive (preadditive_yoneda_obj X) := {}
instance additive_yoneda_obj' (X : C) : functor.additive (preadditive_yoneda.obj X) := {}
instance additive_coyoneda_obj (X : Cᵒᵖ) : functor.additive (preadditive_coyoneda_obj X) := {}
instance additive_coyoneda_obj' (X : Cᵒᵖ) : functor.additive (preadditive_coyoneda.obj X) := {}
/--
Composing the preadditive yoneda embedding with the forgetful functor yields the regular
Yoneda embedding.
-/
@[simp] lemma whiskering_preadditive_yoneda : preadditive_yoneda ⋙
(whiskering_right Cᵒᵖ AddCommGroup (Type v)).obj (forget AddCommGroup) = yoneda :=
rfl
/--
Composing the preadditive yoneda embedding with the forgetful functor yields the regular
Yoneda embedding.
-/
@[simp] lemma whiskering_preadditive_coyoneda : preadditive_coyoneda ⋙
(whiskering_right C AddCommGroup (Type v)).obj (forget AddCommGroup) = coyoneda :=
rfl
end category_theory
|
a947e1e503ca7efcdeb978f0e587b1af7ddc17c7
|
4d2583807a5ac6caaffd3d7a5f646d61ca85d532
|
/test/lint_coe_t.lean
|
ebe242ceeab98f1b482938238086d128c37daf3c
|
[
"Apache-2.0"
] |
permissive
|
AntoineChambert-Loir/mathlib
|
64aabb896129885f12296a799818061bc90da1ff
|
07be904260ab6e36a5769680b6012f03a4727134
|
refs/heads/master
| 1,693,187,631,771
| 1,636,719,886,000
| 1,636,719,886,000
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Lean
| false
| false
| 1,083
|
lean
|
import tactic.lint
open tactic
-- bad, because every iteration of tc search will loop, generating nested quotients
section
local attribute [instance]
def a_to_quot {α} (R : setoid α) : has_coe α (quotient R) := ⟨quotient.mk⟩
run_cmd do
d ← get_decl ``a_to_quot,
some _ ← linter.has_coe_variable.test d,
d ← get_decl ``coe_trans,
some s ← fails_quickly 3000 d,
guard $ "maximum class-instance resolution depth has been reached".is_prefix_of s
end
-- good, because the term gets smaller in every iteration
noncomputable instance quot_to_a {α} (R : setoid α) : has_coe (quotient R) α :=
⟨λ q, quot.rec_on q (λ a, classical.choice ⟨a⟩) (by cc)⟩
run_cmd do
decl ← get_decl ``quot_to_a,
-- linter does not complain
none ← linter.has_coe_variable.test decl,
skip
-- bad, because it introduces a metavariable
section
local attribute [instance]
def int_to_a {α} [inhabited α] : has_coe ℤ α := ⟨λ _, default _⟩
run_cmd do
decl ← get_decl ``int_to_a,
-- linter does not complain
some _ ← linter.has_coe_variable.test decl,
skip
end
|
7892f303825d0b23894a91760f68e896af7296d0
|
e00ea76a720126cf9f6d732ad6216b5b824d20a7
|
/src/order/lexicographic.lean
|
6ab8d7e8a0442a218f80427e210008366b6ab399
|
[
"Apache-2.0"
] |
permissive
|
vaibhavkarve/mathlib
|
a574aaf68c0a431a47fa82ce0637f0f769826bfe
|
17f8340912468f49bdc30acdb9a9fa02eeb0473a
|
refs/heads/master
| 1,621,263,802,637
| 1,585,399,588,000
| 1,585,399,588,000
| 250,833,447
| 0
| 0
|
Apache-2.0
| 1,585,410,341,000
| 1,585,410,341,000
| null |
UTF-8
|
Lean
| false
| false
| 8,630
|
lean
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Scott Morrison, Minchao Wu
Lexicographic preorder / partial_order / linear_order / decidable_linear_order,
for pairs and dependent pairs.
-/
import tactic.basic
import algebra.order
universes u v
def lex (α : Type u) (β : Type v) := α × β
variables {α : Type u} {β : Type v}
instance [decidable_eq α] [decidable_eq β] : decidable_eq (lex α β) :=
prod.decidable_eq
instance [inhabited α] [inhabited β] : inhabited (lex α β) :=
prod.inhabited
/-- Dictionary / lexicographic ordering on pairs. -/
instance lex_has_le [preorder α] [preorder β] : has_le (lex α β) :=
{ le := prod.lex (<) (≤) }
instance lex_has_lt [preorder α] [preorder β] : has_lt (lex α β) :=
{ lt := prod.lex (<) (<) }
/-- Dictionary / lexicographic preorder for pairs. -/
instance lex_preorder [preorder α] [preorder β] : preorder (lex α β) :=
{ le_refl := λ ⟨l, r⟩, by { right, apply le_refl },
le_trans :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ ⟨a₃, b₃⟩ ⟨h₁l, h₁r⟩ ⟨h₂l, h₂r⟩,
{ left, apply lt_trans, repeat { assumption } },
{ left, assumption },
{ left, assumption },
{ right, apply le_trans, repeat { assumption } }
end,
lt_iff_le_not_le :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
split,
{ rintros (⟨_, _, _, _, hlt⟩ | ⟨_, _, _, hlt⟩),
{ split,
{ left, assumption },
{ rintro ⟨l,r⟩,
{ apply lt_asymm hlt, assumption },
{ apply lt_irrefl _ hlt } } },
{ split,
{ right, rw lt_iff_le_not_le at hlt, exact hlt.1 },
{ rintro ⟨l,r⟩,
{ apply lt_irrefl a₁, assumption },
{ rw lt_iff_le_not_le at hlt, apply hlt.2, assumption } } } },
{ rintros ⟨⟨h₁ll, h₁lr⟩, h₂r⟩,
{ left, assumption },
{ right, rw lt_iff_le_not_le, split,
{ assumption },
{ intro h, apply h₂r, right, exact h } } }
end,
.. lex_has_le,
.. lex_has_lt }
/-- Dictionary / lexicographic partial_order for pairs. -/
instance lex_partial_order [partial_order α] [partial_order β] : partial_order (lex α β) :=
{ le_antisymm :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ (⟨_, _, _, _, hlt₁⟩ | ⟨_, _, _, hlt₁⟩) (⟨_, _, _, _, hlt₂⟩ | ⟨_, _, _, hlt₂⟩),
{ exfalso, exact lt_irrefl a₁ (lt_trans hlt₁ hlt₂) },
{ exfalso, exact lt_irrefl a₁ hlt₁ },
{ exfalso, exact lt_irrefl a₁ hlt₂ },
{ have := le_antisymm hlt₁ hlt₂, simp [this] }
end
.. lex_preorder }
/-- Dictionary / lexicographic linear_order for pairs. -/
instance lex_linear_order [linear_order α] [linear_order β] : linear_order (lex α β) :=
{ le_total :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
rcases le_total a₁ a₂ with ha | ha;
cases lt_or_eq_of_le ha with a_lt a_eq,
-- Deal with the two goals with a₁ ≠ a₂
{ left, left, exact a_lt },
swap,
{ right, left, exact a_lt },
-- Now deal with the two goals with a₁ = a₂
all_goals { subst a_eq, rcases le_total b₁ b₂ with hb | hb },
{ left, right, exact hb },
{ right, right, exact hb },
{ left, right, exact hb },
{ right, right, exact hb },
end
.. lex_partial_order }.
/-- Dictionary / lexicographic decidable_linear_order for pairs. -/
instance lex_decidable_linear_order [decidable_linear_order α] [decidable_linear_order β] :
decidable_linear_order (lex α β) :=
{ decidable_le :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
rcases decidable_linear_order.decidable_le α a₁ a₂ with a_lt | a_le,
{ -- a₂ < a₁
left, rw not_le at a_lt, rintro ⟨l, r⟩,
{ apply lt_irrefl a₂, apply lt_trans, repeat { assumption } },
{ apply lt_irrefl a₁, assumption } },
{ -- a₁ ≤ a₂
by_cases h : a₁ = a₂,
{ rw h,
rcases decidable_linear_order.decidable_le _ b₁ b₂ with b_lt | b_le,
{ -- b₂ < b₁
left, rw not_le at b_lt, rintro ⟨l, r⟩,
{ apply lt_irrefl a₂, assumption },
{ apply lt_irrefl b₂, apply lt_of_lt_of_le, repeat { assumption } } },
-- b₁ ≤ b₂
{ right, right, assumption } },
-- a₁ < a₂
{ right, left, apply lt_of_le_of_ne, repeat { assumption } } }
end,
.. lex_linear_order }
variables {Z : α → Type v}
/--
Dictionary / lexicographic ordering on dependent pairs.
The 'pointwise' partial order `prod.has_le` doesn't make
sense for dependent pairs, so it's safe to mark these as
instances here.
-/
instance dlex_has_le [preorder α] [∀ a, preorder (Z a)] : has_le (Σ' a, Z a) :=
{ le := psigma.lex (<) (λ a, (≤)) }
instance dlex_has_lt [preorder α] [∀ a, preorder (Z a)] : has_lt (Σ' a, Z a) :=
{ lt := psigma.lex (<) (λ a, (<)) }
/-- Dictionary / lexicographic preorder on dependent pairs. -/
instance dlex_preorder [preorder α] [∀ a, preorder (Z a)] : preorder (Σ' a, Z a) :=
{ le_refl := λ ⟨l, r⟩, by { right, apply le_refl },
le_trans :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ ⟨a₃, b₃⟩ ⟨h₁l, h₁r⟩ ⟨h₂l, h₂r⟩,
{ left, apply lt_trans, repeat { assumption } },
{ left, assumption },
{ left, assumption },
{ right, apply le_trans, repeat { assumption } }
end,
lt_iff_le_not_le :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
split,
{ rintros (⟨_, _, _, _, hlt⟩ | ⟨_, _, _, hlt⟩),
{ split,
{ left, assumption },
{ rintro ⟨l,r⟩,
{ apply lt_asymm hlt, assumption },
{ apply lt_irrefl _ hlt } } },
{ split,
{ right, rw lt_iff_le_not_le at hlt, exact hlt.1 },
{ rintro ⟨l,r⟩,
{ apply lt_irrefl a₁, assumption },
{ rw lt_iff_le_not_le at hlt, apply hlt.2, assumption } } } },
{ rintros ⟨⟨h₁ll, h₁lr⟩, h₂r⟩,
{ left, assumption },
{ right, rw lt_iff_le_not_le, split,
{ assumption },
{ intro h, apply h₂r, right, exact h } } }
end,
.. dlex_has_le,
.. dlex_has_lt }
/-- Dictionary / lexicographic partial_order for dependent pairs. -/
instance dlex_partial_order [partial_order α] [∀ a, partial_order (Z a)] : partial_order (Σ' a, Z a) :=
{ le_antisymm :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩ (⟨_, _, _, _, hlt₁⟩ | ⟨_, _, _, hlt₁⟩) (⟨_, _, _, _, hlt₂⟩ | ⟨_, _, _, hlt₂⟩),
{ exfalso, exact lt_irrefl a₁ (lt_trans hlt₁ hlt₂) },
{ exfalso, exact lt_irrefl a₁ hlt₁ },
{ exfalso, exact lt_irrefl a₁ hlt₂ },
{ have := le_antisymm hlt₁ hlt₂, simp [this] }
end
.. dlex_preorder }
/-- Dictionary / lexicographic linear_order for pairs. -/
instance dlex_linear_order [linear_order α] [∀ a, linear_order (Z a)] : linear_order (Σ' a, Z a) :=
{ le_total :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
rcases le_total a₁ a₂ with ha | ha;
cases lt_or_eq_of_le ha with a_lt a_eq,
-- Deal with the two goals with a₁ ≠ a₂
{ left, left, exact a_lt },
swap,
{ right, left, exact a_lt },
-- Now deal with the two goals with a₁ = a₂
all_goals { subst a_eq, rcases le_total b₁ b₂ with hb | hb },
{ left, right, exact hb },
{ right, right, exact hb },
{ left, right, exact hb },
{ right, right, exact hb },
end
.. dlex_partial_order }.
/-- Dictionary / lexicographic decidable_linear_order for dependent pairs. -/
instance dlex_decidable_linear_order [decidable_linear_order α] [∀ a, decidable_linear_order (Z a)] :
decidable_linear_order (Σ' a, Z a) :=
{ decidable_le :=
begin
rintros ⟨a₁, b₁⟩ ⟨a₂, b₂⟩,
rcases decidable_linear_order.decidable_le α a₁ a₂ with a_lt | a_le,
{ -- a₂ < a₁
left, rw not_le at a_lt, rintro ⟨l, r⟩,
{ apply lt_irrefl a₂, apply lt_trans, repeat { assumption } },
{ apply lt_irrefl a₁, assumption } },
{ -- a₁ ≤ a₂
by_cases h : a₁ = a₂,
{ subst h,
rcases decidable_linear_order.decidable_le _ b₁ b₂ with b_lt | b_le,
{ -- b₂ < b₁
left, rw not_le at b_lt, rintro ⟨l, r⟩,
{ apply lt_irrefl a₁, assumption },
{ apply lt_irrefl b₂, apply lt_of_lt_of_le, repeat { assumption } } },
-- b₁ ≤ b₂
{ right, right, assumption } },
-- a₁ < a₂
{ right, left, apply lt_of_le_of_ne, repeat { assumption } } }
end,
.. dlex_linear_order }
|
8d36df106a00ba5569bfc3d592aae58491a4d786
|
9dc8cecdf3c4634764a18254e94d43da07142918
|
/src/analysis/fourier.lean
|
8c568d14f027dfc2f603ba0ccb02768728c51841
|
[
"Apache-2.0"
] |
permissive
|
jcommelin/mathlib
|
d8456447c36c176e14d96d9e76f39841f69d2d9b
|
ee8279351a2e434c2852345c51b728d22af5a156
|
refs/heads/master
| 1,664,782,136,488
| 1,663,638,983,000
| 1,663,638,983,000
| 132,563,656
| 0
| 0
|
Apache-2.0
| 1,663,599,929,000
| 1,525,760,539,000
|
Lean
|
UTF-8
|
Lean
| false
| false
| 10,700
|
lean
|
/-
Copyright (c) 2021 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import analysis.complex.circle
import analysis.inner_product_space.l2_space
import measure_theory.function.continuous_map_dense
import measure_theory.function.l2_space
import measure_theory.measure.haar
import measure_theory.group.integration
import topology.metric_space.emetric_paracompact
import topology.continuous_function.stone_weierstrass
/-!
# Fourier analysis on the circle
This file contains basic results on Fourier series.
## Main definitions
* `haar_circle`, Haar measure on the circle, normalized to have total measure `1`
* instances `measure_space`, `is_probability_measure` for the circle with respect to this measure
* for `n : ℤ`, `fourier n` is the monomial `λ z, z ^ n`, bundled as a continuous map from `circle`
to `ℂ`
* for `n : ℤ` and `p : ℝ≥0∞`, `fourier_Lp p n` is an abbreviation for the monomial `fourier n`
considered as an element of the Lᵖ-space `Lp ℂ p haar_circle`, via the embedding
`continuous_map.to_Lp`
* `fourier_series` is the canonical isometric isomorphism from `Lp ℂ 2 haar_circle` to `ℓ²(ℤ, ℂ)`
induced by taking Fourier series
## Main statements
The theorem `span_fourier_closure_eq_top` states that the span of the monomials `fourier n` is
dense in `C(circle, ℂ)`, i.e. that its `submodule.topological_closure` is `⊤`. This follows from
the Stone-Weierstrass theorem after checking that it is a subalgebra, closed under conjugation, and
separates points.
The theorem `span_fourier_Lp_closure_eq_top` states that for `1 ≤ p < ∞` the span of the monomials
`fourier_Lp` is dense in `Lp ℂ p haar_circle`, i.e. that its `submodule.topological_closure` is
`⊤`. This follows from the previous theorem using general theory on approximation of Lᵖ functions
by continuous functions.
The theorem `orthonormal_fourier` states that the monomials `fourier_Lp 2 n` form an orthonormal
set (in the L² space of the circle).
The last two results together provide that the functions `fourier_Lp 2 n` form a Hilbert basis for
L²; this is named as `fourier_series`.
Parseval's identity, `tsum_sq_fourier_series_repr`, is a direct consequence of the construction of
this Hilbert basis.
-/
noncomputable theory
open_locale ennreal complex_conjugate classical
open topological_space continuous_map measure_theory measure_theory.measure algebra submodule set
/-! ### Choice of measure on the circle -/
section haar_circle
/-! We make the circle into a measure space, using the Haar measure normalized to have total
measure 1. -/
instance : measurable_space circle := borel circle
instance : borel_space circle := ⟨rfl⟩
/-- Haar measure on the circle, normalized to have total measure 1. -/
@[derive is_haar_measure]
def haar_circle : measure circle := haar_measure ⊤
instance : is_probability_measure haar_circle := ⟨haar_measure_self⟩
instance : measure_space circle :=
{ volume := haar_circle,
.. circle.measurable_space }
end haar_circle
/-! ### Monomials on the circle -/
section monomials
/-- The family of monomials `λ z, z ^ n`, parametrized by `n : ℤ` and considered as bundled
continuous maps from `circle` to `ℂ`. -/
@[simps] def fourier (n : ℤ) : C(circle, ℂ) :=
{ to_fun := λ z, z ^ n,
continuous_to_fun := continuous_subtype_coe.zpow₀ n $ λ z, or.inl (ne_zero_of_mem_circle z) }
@[simp] lemma fourier_zero {z : circle} : fourier 0 z = 1 := rfl
@[simp] lemma fourier_neg {n : ℤ} {z : circle} : fourier (-n) z = conj (fourier n z) :=
by simp [← coe_inv_circle_eq_conj z]
@[simp] lemma fourier_add {m n : ℤ} {z : circle} :
fourier (m + n) z = (fourier m z) * (fourier n z) :=
by simp [zpow_add₀ (ne_zero_of_mem_circle z)]
/-- The subalgebra of `C(circle, ℂ)` generated by `z ^ n` for `n ∈ ℤ`; equivalently, polynomials in
`z` and `conj z`. -/
def fourier_subalgebra : subalgebra ℂ C(circle, ℂ) := algebra.adjoin ℂ (range fourier)
/-- The subalgebra of `C(circle, ℂ)` generated by `z ^ n` for `n ∈ ℤ` is in fact the linear span of
these functions. -/
lemma fourier_subalgebra_coe : fourier_subalgebra.to_submodule = span ℂ (range fourier) :=
begin
apply adjoin_eq_span_of_subset,
refine subset.trans _ submodule.subset_span,
intros x hx,
apply submonoid.closure_induction hx (λ _, id) ⟨0, rfl⟩,
rintros _ _ ⟨m, rfl⟩ ⟨n, rfl⟩,
refine ⟨m + n, _⟩,
ext1 z,
exact fourier_add,
end
/-- The subalgebra of `C(circle, ℂ)` generated by `z ^ n` for `n ∈ ℤ` separates points. -/
lemma fourier_subalgebra_separates_points : fourier_subalgebra.separates_points :=
begin
intros x y hxy,
refine ⟨_, ⟨fourier 1, _, rfl⟩, _⟩,
{ exact subset_adjoin ⟨1, rfl⟩ },
{ simp [hxy] }
end
/-- The subalgebra of `C(circle, ℂ)` generated by `z ^ n` for `n ∈ ℤ` is invariant under complex
conjugation. -/
lemma fourier_subalgebra_conj_invariant :
conj_invariant_subalgebra (fourier_subalgebra.restrict_scalars ℝ) :=
begin
rintros _ ⟨f, hf, rfl⟩,
change _ ∈ fourier_subalgebra,
change _ ∈ fourier_subalgebra at hf,
apply adjoin_induction hf,
{ rintros _ ⟨n, rfl⟩,
suffices : fourier (-n) ∈ fourier_subalgebra,
{ convert this,
ext1,
simp },
exact subset_adjoin ⟨-n, rfl⟩ },
{ intros c,
exact fourier_subalgebra.algebra_map_mem (conj c) },
{ intros f g hf hg,
convert fourier_subalgebra.add_mem hf hg,
exact alg_hom.map_add _ f g, },
{ intros f g hf hg,
convert fourier_subalgebra.mul_mem hf hg,
exact alg_hom.map_mul _ f g, }
end
/-- The subalgebra of `C(circle, ℂ)` generated by `z ^ n` for `n ∈ ℤ` is dense. -/
lemma fourier_subalgebra_closure_eq_top : fourier_subalgebra.topological_closure = ⊤ :=
continuous_map.subalgebra_is_R_or_C_topological_closure_eq_top_of_separates_points
fourier_subalgebra
fourier_subalgebra_separates_points
fourier_subalgebra_conj_invariant
/-- The linear span of the monomials `z ^ n` is dense in `C(circle, ℂ)`. -/
lemma span_fourier_closure_eq_top : (span ℂ (range fourier)).topological_closure = ⊤ :=
begin
rw ← fourier_subalgebra_coe,
exact congr_arg subalgebra.to_submodule fourier_subalgebra_closure_eq_top,
end
/-- The family of monomials `λ z, z ^ n`, parametrized by `n : ℤ` and considered as elements of
the `Lp` space of functions on `circle` taking values in `ℂ`. -/
abbreviation fourier_Lp (p : ℝ≥0∞) [fact (1 ≤ p)] (n : ℤ) : Lp ℂ p haar_circle :=
to_Lp p haar_circle ℂ (fourier n)
lemma coe_fn_fourier_Lp (p : ℝ≥0∞) [fact (1 ≤ p)] (n : ℤ) :
⇑(fourier_Lp p n) =ᵐ[haar_circle] fourier n :=
coe_fn_to_Lp haar_circle (fourier n)
/-- For each `1 ≤ p < ∞`, the linear span of the monomials `z ^ n` is dense in
`Lp ℂ p haar_circle`. -/
lemma span_fourier_Lp_closure_eq_top {p : ℝ≥0∞} [fact (1 ≤ p)] (hp : p ≠ ∞) :
(span ℂ (range (fourier_Lp p))).topological_closure = ⊤ :=
begin
convert (continuous_map.to_Lp_dense_range ℂ hp haar_circle ℂ).topological_closure_map_submodule
span_fourier_closure_eq_top,
rw [map_span, range_comp],
simp
end
/-- For `n ≠ 0`, a rotation by `n⁻¹ * real.pi` negates the monomial `z ^ n`. -/
lemma fourier_add_half_inv_index {n : ℤ} (hn : n ≠ 0) (z : circle) :
fourier n ((exp_map_circle (n⁻¹ * real.pi) * z)) = - fourier n z :=
begin
have : ↑n * ((↑n)⁻¹ * ↑real.pi * complex.I) = ↑real.pi * complex.I,
{ have : (n:ℂ) ≠ 0 := by exact_mod_cast hn,
field_simp,
ring },
simp [mul_zpow, ← complex.exp_int_mul, complex.exp_pi_mul_I, this]
end
/-- The monomials `z ^ n` are an orthonormal set with respect to Haar measure on the circle. -/
lemma orthonormal_fourier : orthonormal ℂ (fourier_Lp 2) :=
begin
rw orthonormal_iff_ite,
intros i j,
rw continuous_map.inner_to_Lp haar_circle (fourier i) (fourier j),
split_ifs,
{ simp [h, is_probability_measure.measure_univ, ← fourier_neg, ← fourier_add, -fourier_apply] },
simp only [← fourier_add, ← fourier_neg],
have hij : -i + j ≠ 0,
{ rw add_comm,
exact sub_ne_zero.mpr (ne.symm h) },
exact integral_eq_zero_of_mul_left_eq_neg (fourier_add_half_inv_index hij)
end
end monomials
section fourier
/-- We define `fourier_series` to be a `ℤ`-indexed Hilbert basis for `Lp ℂ 2 haar_circle`, which by
definition is an isometric isomorphism from `Lp ℂ 2 haar_circle` to `ℓ²(ℤ, ℂ)`. -/
def fourier_series : hilbert_basis ℤ ℂ (Lp ℂ 2 haar_circle) :=
hilbert_basis.mk orthonormal_fourier (span_fourier_Lp_closure_eq_top (by norm_num)).ge
/-- The elements of the Hilbert basis `fourier_series` for `Lp ℂ 2 haar_circle` are the functions
`fourier_Lp 2`, the monomials `λ z, z ^ n` on the circle considered as elements of `L2`. -/
@[simp] lemma coe_fourier_series : ⇑fourier_series = fourier_Lp 2 := hilbert_basis.coe_mk _ _
/-- Under the isometric isomorphism `fourier_series` from `Lp ℂ 2 haar_circle` to `ℓ²(ℤ, ℂ)`, the
`i`-th coefficient is the integral over the circle of `λ t, t ^ (-i) * f t`. -/
lemma fourier_series_repr (f : Lp ℂ 2 haar_circle) (i : ℤ) :
fourier_series.repr f i = ∫ t : circle, t ^ (-i) * f t ∂ haar_circle :=
begin
transitivity ∫ t : circle, conj ((fourier_Lp 2 i : circle → ℂ) t) * f t ∂ haar_circle,
{ simp [fourier_series.repr_apply_apply f i, measure_theory.L2.inner_def] },
apply integral_congr_ae,
filter_upwards [coe_fn_fourier_Lp 2 i] with _ ht,
rw [ht, ← fourier_neg],
simp [-fourier_neg]
end
/-- The Fourier series of an `L2` function `f` sums to `f`, in the `L2` topology on the circle. -/
lemma has_sum_fourier_series (f : Lp ℂ 2 haar_circle) :
has_sum (λ i, fourier_series.repr f i • fourier_Lp 2 i) f :=
by simpa using hilbert_basis.has_sum_repr fourier_series f
/-- **Parseval's identity**: the sum of the squared norms of the Fourier coefficients equals the
`L2` norm of the function. -/
lemma tsum_sq_fourier_series_repr (f : Lp ℂ 2 haar_circle) :
∑' i : ℤ, ∥fourier_series.repr f i∥ ^ 2 = ∫ t : circle, ∥f t∥ ^ 2 ∂ haar_circle :=
begin
have H₁ : ∥fourier_series.repr f∥ ^ 2 = ∑' i, ∥fourier_series.repr f i∥ ^ 2,
{ exact_mod_cast lp.norm_rpow_eq_tsum _ (fourier_series.repr f),
norm_num },
have H₂ : ∥fourier_series.repr f∥ ^ 2 = ∥f∥ ^2 := by simp,
have H₃ := congr_arg is_R_or_C.re (@L2.inner_def circle ℂ ℂ _ _ _ _ f f),
rw ← integral_re at H₃,
{ simp only [← norm_sq_eq_inner] at H₃,
rw [← H₁, H₂],
exact H₃ },
{ exact L2.integrable_inner f f },
end
end fourier
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.